From 3505281559513e2922484ebf0996a8846dcc0a34 Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Sun, 21 Jul 2019 14:40:52 -0500 Subject: Make sure that fsync errors are raised This changes `couch_file` to ensure that errors are raised when a call to `fsync` fails. It will also stop the couch_file process to ensure that anything handling a failed `fsync` won't attempt to retry the operation and experience issues discovered by Postgres [1]. [1] http://danluu.com/fsyncgate/ --- src/couch/src/couch_file.erl | 29 ++++++++++++++++++++++++++--- src/couch/test/couch_file_tests.erl | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 3 deletions(-) diff --git a/src/couch/src/couch_file.erl b/src/couch/src/couch_file.erl index d6e4066db..6db23eaa3 100644 --- a/src/couch/src/couch_file.erl +++ b/src/couch/src/couch_file.erl @@ -215,12 +215,26 @@ truncate(Fd, Pos) -> sync(Filepath) when is_list(Filepath) -> case file:open(Filepath, [append, raw]) of {ok, Fd} -> - try ok = file:sync(Fd) after ok = file:close(Fd) end; + try + case file:sync(Fd) of + ok -> + ok; + {error, Reason} -> + erlang:error({fsync_error, Reason}) + end + after + ok = file:close(Fd) + end; {error, Error} -> erlang:error(Error) end; sync(Fd) -> - gen_server:call(Fd, sync, infinity). + case gen_server:call(Fd, sync, infinity) of + ok -> + ok; + {error, Reason} -> + erlang:error({fsync_error, Reason}) + end. %%---------------------------------------------------------------------- %% Purpose: Close the file. @@ -462,7 +476,16 @@ handle_call({set_db_pid, Pid}, _From, #file{db_monitor=OldRef}=File) -> {reply, ok, File#file{db_monitor=Ref}}; handle_call(sync, _From, #file{fd=Fd}=File) -> - {reply, file:sync(Fd), File}; + case file:sync(Fd) of + ok -> + {reply, ok, File}; + {error, _} = Error -> + % We're intentionally dropping all knowledge + % of this Fd so that we don't accidentally + % recover in some whacky edge case that I + % can't fathom. + {stop, Error, Error, #file{fd = nil}} + end; handle_call({truncate, Pos}, _From, #file{fd=Fd}=File) -> {ok, Pos} = file:position(Fd, Pos), diff --git a/src/couch/test/couch_file_tests.erl b/src/couch/test/couch_file_tests.erl index 34c1a1654..e9806c09a 100644 --- a/src/couch/test/couch_file_tests.erl +++ b/src/couch/test/couch_file_tests.erl @@ -498,3 +498,36 @@ make_delete_dir_test_case({RootDir, ViewDir}, DeleteAfterRename) -> remove_dir(Dir) -> [file:delete(File) || File <- filelib:wildcard(filename:join([Dir, "*"]))], file:del_dir(Dir). + + +fsync_error_test_() -> + { + "Test fsync raises errors", + { + setup, + fun() -> + test_util:start(?MODULE, [ioq]) + end, + fun(Ctx) -> + test_util:stop(Ctx) + end, + [ + fun fsync_raises_errors/0 + ] + } + }. + + +fsync_raises_errors() -> + Fd = spawn(fun() -> fake_fsync_fd() end), + ?assertError({fsync_error, eio}, couch_file:sync(Fd)). + + +fake_fsync_fd() -> + % Mocking gen_server did not go very + % well so faking the couch_file pid + % will have to do. + receive + {'$gen_call', From, sync} -> + gen:reply(From, {error, eio}) + end. -- cgit v1.2.1 From a6c0da1f8959763242153cf11282ee41e6d3f2f9 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Sat, 27 Jul 2019 21:38:33 -0400 Subject: Increase timeouts on two slow btree tests These two tests are reliably timing out on ARM hardware in Jenkins. They do a lot of individual btree operations so this is not entirely surprising. Appropriate course of action here is to raise the timeout. --- src/couch/test/couch_btree_tests.erl | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/couch/test/couch_btree_tests.erl b/src/couch/test/couch_btree_tests.erl index 3c8840a60..c9b791d2c 100644 --- a/src/couch/test/couch_btree_tests.erl +++ b/src/couch/test/couch_btree_tests.erl @@ -16,6 +16,7 @@ -include_lib("couch/include/couch_db.hrl"). -define(ROWS, 1000). +-define(TIMEOUT, 60). % seconds setup() -> @@ -276,7 +277,9 @@ should_add_every_odd_key_remove_every_even(KeyValues, {_, Btree}) -> false -> {Count + 1, Left, [X | Right]} end end, {0, [], []}, KeyValues), - ?_assert(test_add_remove(Btree1, Rem2Keys0, Rem2Keys1)). + {timeout, ?TIMEOUT, + ?_assert(test_add_remove(Btree1, Rem2Keys0, Rem2Keys1)) + }. should_add_every_even_key_remove_every_old(KeyValues, {_, Btree}) -> {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []), @@ -286,7 +289,9 @@ should_add_every_even_key_remove_every_old(KeyValues, {_, Btree}) -> false -> {Count + 1, Left, [X | Right]} end end, {0, [], []}, KeyValues), - ?_assert(test_add_remove(Btree1, Rem2Keys1, Rem2Keys0)). + {timeout, ?TIMEOUT, + ?_assert(test_add_remove(Btree1, Rem2Keys1, Rem2Keys0)) + }. should_reduce_without_specified_direction({_, Btree}) -> -- cgit v1.2.1 From ec2a9637ec220823a4cab2d4b430231ca49ae082 Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Sun, 28 Jul 2019 13:05:39 -0400 Subject: Fix flaky mem3_sync_event_listener EUnit test Config setting was asynchronous and the waiting function was not waiting for the actual state value to change just that the state function was returning. The fix is to wait for the config value to propagate to the state. --- src/mem3/src/mem3_sync_event_listener.erl | 39 ++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/src/mem3/src/mem3_sync_event_listener.erl b/src/mem3/src/mem3_sync_event_listener.erl index e3368e23f..d7f745137 100644 --- a/src/mem3/src/mem3_sync_event_listener.erl +++ b/src/mem3/src/mem3_sync_event_listener.erl @@ -258,14 +258,16 @@ subscribe_for_config_test_() -> should_set_sync_delay(Pid) -> ?_test(begin config:set("mem3", "sync_delay", "123", false), - ?assertMatch(#state{delay = 123}, capture(Pid)), + wait_state_delay(Pid, 123), + ?assertMatch(#state{delay = 123}, get_state(Pid)), ok end). should_set_sync_frequency(Pid) -> ?_test(begin config:set("mem3", "sync_frequency", "456", false), - ?assertMatch(#state{frequency = 456}, capture(Pid)), + wait_state_frequency(Pid, 456), + ?assertMatch(#state{frequency = 456}, get_state(Pid)), ok end). @@ -293,17 +295,38 @@ should_terminate(Pid) -> ok end). -capture(Pid) -> + +get_state(Pid) -> Ref = make_ref(), + Pid ! {get_state, Ref, self()}, + receive + {Ref, State} -> State + after 10 -> + timeout + end. + + +wait_state_frequency(Pid, Val) -> WaitFun = fun() -> - Pid ! {get_state, Ref, self()}, - receive - {Ref, State} -> State - after 0 -> - wait + case get_state(Pid) of + timeout -> + wait; + #state{frequency = Val} -> + true end end, test_util:wait(WaitFun). +wait_state_delay(Pid, Val) -> + WaitFun = fun() -> + case get_state(Pid) of + timeout -> + wait; + #state{delay = Val} -> + true + end + end, + test_util:wait(WaitFun). + -endif. -- cgit v1.2.1 From 29d484e45054c4b40f6b3a223298c8a31914f90d Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Sun, 28 Jul 2019 23:50:50 -0400 Subject: Fix EUnit timeouts (#2087) * Proactively increase timeout for PBKDF2 test This test was taking 134s in a recent run, which is uncomfortably close to the threshold. * Extend timeouts for all reshard API tests We're observing timeouts on various tests in this suite so let's keep it consistent and increase timeouts across the board. * Bump default timeout for all mem3_reshard tests A couple of these tests were exceeding the default timeout under normal circumstances, but many of them do a significant amount of work, so for simplicity we set a module-wide timeout and apply it consistently throughout. * Modernize the sync_security test setup/teardown This test actually doesn't do much real work, but I think what was happening is that the setup and teardown time was being charged to the test itself. I've refactored it to use a more modern scaffolding following some of our more recent additions to the test suite, but have left the timeout at the default to test this hypothesis. * Increase timeouts on more heavyweight mem3 tests * Extend timeouts for replication tests --- src/couch/test/couch_passwords_tests.erl | 2 +- src/mem3/test/mem3_rep_test.erl | 17 ++-- src/mem3/test/mem3_reshard_api_test.erl | 121 ++++++++++++----------- src/mem3/test/mem3_reshard_changes_feed_test.erl | 9 +- src/mem3/test/mem3_reshard_test.erl | 37 +++---- src/mem3/test/mem3_sync_security_test.erl | 48 ++++++--- 6 files changed, 130 insertions(+), 104 deletions(-) diff --git a/src/couch/test/couch_passwords_tests.erl b/src/couch/test/couch_passwords_tests.erl index dea6d6b7b..88de8530f 100644 --- a/src/couch/test/couch_passwords_tests.erl +++ b/src/couch/test/couch_passwords_tests.erl @@ -46,7 +46,7 @@ pbkdf2_test_()-> <<"sa\0lt">>, 4096, 16))}, - {timeout, 180, %% this may runs too long on slow hosts + {timeout, 600, %% this may runs too long on slow hosts {"Iterations: 16777216 - this may take some time", ?_assertEqual( {ok, <<"eefe3d61cd4da4e4e9945b3d6ba2158c2634e984">>}, diff --git a/src/mem3/test/mem3_rep_test.erl b/src/mem3/test/mem3_rep_test.erl index 7e8856f7c..4a46e7b93 100644 --- a/src/mem3/test/mem3_rep_test.erl +++ b/src/mem3/test/mem3_rep_test.erl @@ -20,6 +20,7 @@ -define(ID, <<"_id">>). +-define(TIMEOUT, 60). % seconds setup() -> {AllSrc, AllTgt} = {?tempdb(), ?tempdb()}, @@ -65,7 +66,7 @@ mem3_reshard_db_test_() -> replicate_basics(#{allsrc := AllSrc, alltgt := AllTgt}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin DocSpec = #{docs => 10, delete => [5, 9]}, add_test_docs(AllSrc, DocSpec), SDocs = get_all_docs(AllSrc), @@ -79,11 +80,11 @@ replicate_basics(#{allsrc := AllSrc, alltgt := AllTgt}) -> ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)), ?assertEqual(SDocs, get_all_docs(AllTgt)) - end). + end)}. replicate_small_batches(#{allsrc := AllSrc, alltgt := AllTgt}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin DocSpec = #{docs => 10, delete => [5, 9]}, add_test_docs(AllSrc, DocSpec), SDocs = get_all_docs(AllSrc), @@ -97,11 +98,11 @@ replicate_small_batches(#{allsrc := AllSrc, alltgt := AllTgt}) -> ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)), ?assertEqual(SDocs, get_all_docs(AllTgt)) - end). + end)}. replicate_low_batch_count(#{allsrc := AllSrc, alltgt := AllTgt}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin DocSpec = #{docs => 10, delete => [5, 9]}, add_test_docs(AllSrc, DocSpec), SDocs = get_all_docs(AllSrc), @@ -122,11 +123,11 @@ replicate_low_batch_count(#{allsrc := AllSrc, alltgt := AllTgt}) -> ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts3)), ?assertEqual(SDocs, get_all_docs(AllTgt)) - end). + end)}. replicate_with_partitions(#{partsrc := PartSrc, parttgt := PartTgt}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin DocSpec = #{ pdocs => #{ <<"PX">> => 15, @@ -149,7 +150,7 @@ replicate_with_partitions(#{partsrc := PartSrc, parttgt := PartTgt}) -> ?assertEqual(PXSrc, get_partition_info(PartTgt, <<"PX">>)), ?assertEqual(PYSrc, get_partition_info(PartTgt, <<"PY">>)), ?assertEqual(SDocs, get_all_docs(PartTgt)) - end). + end)}. get_partition_info(DbName, Partition) -> diff --git a/src/mem3/test/mem3_reshard_api_test.erl b/src/mem3/test/mem3_reshard_api_test.erl index 982fed173..c4df24ad3 100644 --- a/src/mem3/test/mem3_reshard_api_test.erl +++ b/src/mem3/test/mem3_reshard_api_test.erl @@ -27,6 +27,7 @@ -define(STATE, "_reshard/state"). -define(ID, <<"id">>). -define(OK, <<"ok">>). +-define(TIMEOUT, 60). % seconds setup() -> @@ -110,7 +111,7 @@ mem3_reshard_api_test_() -> basics({Top, _}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin % GET /_reshard ?assertMatch({200, #{ <<"state">> := <<"running">>, @@ -137,11 +138,11 @@ basics({Top, _}) -> ?assertMatch({404, _}, req(get, Top ++ ?RESHARD ++ "/invalidpath")), ?assertMatch({405, _}, req(put, Top ++ ?RESHARD, #{dont => thinkso})), ?assertMatch({405, _}, req(post, Top ++ ?RESHARD, #{nope => nope})) - end). + end)}. create_job_basic({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin % POST /_reshard/jobs {C1, R1} = req(post, Top ++ ?JOBS, #{type => split, db => Db1}), ?assertEqual(201, C1), @@ -192,11 +193,11 @@ create_job_basic({Top, {Db1, _, _}}) -> % DELETE /_reshard/jobs/$jobid should be a 404 as well ?assertMatch({404, #{}}, req(delete, Top ++ ?JOBS ++ ?b2l(Id))) - end). + end)}. create_two_jobs({Top, {Db1, Db2, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin Jobs = Top ++ ?JOBS, ?assertMatch({201, [#{?OK := true}]}, @@ -218,20 +219,20 @@ create_two_jobs({Top, {Db1, Db2, _}}) -> ?assertMatch({200, #{<<"total">> := 1}}, req(get, Top ++ ?RESHARD)), {200, #{?OK := true}} = req(delete, Jobs ++ ?b2l(Id2)), ?assertMatch({200, #{<<"total">> := 0}}, req(get, Top ++ ?RESHARD)) - end). + end)}. create_multiple_jobs_from_one_post({Top, {_, _, Db3}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin Jobs = Top ++ ?JOBS, {C1, R1} = req(post, Jobs, #{type => split, db => Db3}), ?assertMatch({201, [#{?OK := true}, #{?OK := true}]}, {C1, R1}), ?assertMatch({200, #{<<"total">> := 2}}, req(get, Top ++ ?RESHARD)) - end). + end)}. start_stop_cluster_basic({Top, _}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin Url = Top ++ ?STATE, ?assertMatch({200, #{ @@ -261,11 +262,11 @@ start_stop_cluster_basic({Top, _}) -> }}, req(get, Top ++ ?RESHARD)), ?assertMatch({200, _}, req(put, Url, #{state => running})), ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, Url)) - end). + end)}. test_disabled({Top, _}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin application:set_env(mem3, reshard_disabled, true), ?assertMatch({501, _}, req(get, Top ++ ?RESHARD)), ?assertMatch({501, _}, req(put, Top ++ ?STATE, #{state => running})), @@ -273,11 +274,11 @@ test_disabled({Top, _}) -> application:unset_env(mem3, reshard_disabled), ?assertMatch({200, _}, req(get, Top ++ ?RESHARD)), ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})) - end). + end)}. start_stop_cluster_with_a_job({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin Url = Top ++ ?STATE, ?assertMatch({200, _}, req(put, Url, #{state => stopped})), @@ -316,11 +317,11 @@ start_stop_cluster_with_a_job({Top, {Db1, _, _}}) -> ?assertMatch({200, _}, req(put, Url, #{state => running})), ?assertMatch({200, #{?ID := Id2, <<"job_state">> := JSt}} when JSt =/= <<"stopped">>, req(get, Top ++ ?JOBS ++ ?b2l(Id2))) - end). + end)}. individual_job_start_stop({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin intercept_state(topoff1), Body = #{type => split, db => Db1}, @@ -352,11 +353,11 @@ individual_job_start_stop({Top, {Db1, _, _}}) -> % Let it continue running and it should complete eventually JobPid2 ! continue, wait_state(StUrl, <<"completed">>) - end). + end)}. individual_job_stop_when_cluster_stopped({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin intercept_state(topoff1), Body = #{type => split, db => Db1}, @@ -397,11 +398,11 @@ individual_job_stop_when_cluster_stopped({Top, {Db1, _, _}}) -> % Let it continue running and it should complete eventually JobPid2 ! continue, wait_state(StUrl, <<"completed">>) - end). + end)}. create_job_with_invalid_arguments({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin Jobs = Top ++ ?JOBS, % Nothing in the body @@ -434,11 +435,11 @@ create_job_with_invalid_arguments({Top, {Db1, _, _}}) -> % Can't have both db and shard ?assertMatch({400, _}, req(post, Jobs, #{type => split, db => Db1, shard => <<"blah">>})) - end). + end)}. create_job_with_db({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin Jobs = Top ++ ?JOBS, Body1 = #{type => split, db => Db1}, @@ -465,11 +466,11 @@ create_job_with_db({Top, {Db1, _, _}}) -> [16#80000000, 16#bfffffff], [16#c0000000, 16#ffffffff] ], [mem3:range(S) || S <- lists:sort(mem3:shards(Db1))]) - end). + end)}. create_job_with_shard_name({Top, {_, _, Db3}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin Jobs = Top ++ ?JOBS, [S1, S2] = [mem3:name(S) || S <- lists:sort(mem3:shards(Db3))], @@ -490,11 +491,11 @@ create_job_with_shard_name({Top, {_, _, Db3}}) -> [16#80000000, 16#bfffffff], [16#c0000000, 16#ffffffff] ], [mem3:range(S) || S <- lists:sort(mem3:shards(Db3))]) - end). + end)}. completed_job_handling({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin Jobs = Top ++ ?JOBS, % Run job to completion @@ -542,109 +543,109 @@ completed_job_handling({Top, {Db1, _, _}}) -> ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)), ?assertMatch({200, #{?OK := true}}, req(delete, JobUrl)) - end). + end)}. handle_db_deletion_in_topoff1({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin JobId = delete_source_in_state(Top, Db1, topoff1), wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>) - end). + end)}. handle_db_deletion_in_initial_copy({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin JobId = delete_source_in_state(Top, Db1, initial_copy), wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>) - end). + end)}. handle_db_deletion_in_copy_local_docs({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin JobId = delete_source_in_state(Top, Db1, copy_local_docs), wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>) - end). + end)}. handle_db_deletion_in_build_indices({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin JobId = delete_source_in_state(Top, Db1, build_indices), wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>) - end). + end)}. handle_db_deletion_in_update_shard_map({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin JobId = delete_source_in_state(Top, Db1, update_shardmap), wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>) - end). + end)}. handle_db_deletion_in_wait_source_close({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin JobId = delete_source_in_state(Top, Db1, wait_source_close), wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>) - end). + end)}. recover_in_topoff1({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin JobId = recover_in_state(Top, Db1, topoff1), wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) - end). + end)}. recover_in_initial_copy({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin JobId = recover_in_state(Top, Db1, initial_copy), wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) - end). + end)}. recover_in_copy_local_docs({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin JobId = recover_in_state(Top, Db1, copy_local_docs), wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) - end). + end)}. recover_in_build_indices({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin JobId = recover_in_state(Top, Db1, build_indices), wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) - end). + end)}. recover_in_update_shard_map({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin JobId = recover_in_state(Top, Db1, update_shardmap), wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) - end). + end)}. recover_in_wait_source_close({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin JobId = recover_in_state(Top, Db1, wait_source_close), wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) - end). + end)}. recover_in_topoff3({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin JobId = recover_in_state(Top, Db1, topoff3), wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) - end). + end)}. recover_in_source_delete({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin JobId = recover_in_state(Top, Db1, source_delete), wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) - end). + end)}. check_max_jobs({Top, {Db1, Db2, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin Jobs = Top ++ ?JOBS, config:set("reshard", "max_jobs", "0", _Persist=false), @@ -678,11 +679,11 @@ check_max_jobs({Top, {Db1, Db2, _}}) -> % Jobs that have been created already are not removed if max jobs is lowered % so make sure the job completes wait_to_complete(Top, R4) - end). + end)}. check_node_and_range_required_params({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin Jobs = Top ++ ?JOBS, Node = atom_to_binary(node(), utf8), @@ -704,18 +705,18 @@ check_node_and_range_required_params({Top, {Db1, _, _}}) -> {C3, R3} = req(post, Jobs, Body), ?assertMatch({201, [#{?OK := true}]}, {C3, R3}), wait_to_complete_then_cleanup(Top, R3) - end). + end)}. cleanup_completed_jobs({Top, {Db1, _, _}}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin Body = #{type => split, db => Db1}, {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body), JobUrl = Top ++ ?JOBS ++ ?b2l(Id), wait_state(JobUrl ++ "/state", <<"completed">>), delete_db(Top, Db1), wait_for_http_code(JobUrl, 404) - end). + end)}. % Test help functions diff --git a/src/mem3/test/mem3_reshard_changes_feed_test.erl b/src/mem3/test/mem3_reshard_changes_feed_test.erl index 52e18fb26..4b9e2a34a 100644 --- a/src/mem3/test/mem3_reshard_changes_feed_test.erl +++ b/src/mem3/test/mem3_reshard_changes_feed_test.erl @@ -17,6 +17,7 @@ -include_lib("couch/include/couch_db.hrl"). -include_lib("mem3/src/mem3_reshard.hrl"). +-define(TIMEOUT, 60). % seconds -define(assertChanges(Expected, Received), begin @@ -66,7 +67,7 @@ mem3_reshard_changes_feed_test_() -> normal_feed_should_work_after_split(#{db1 := Db}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin DocSpec = #{ docs => [1, 10], delete => [5, 6] @@ -141,11 +142,11 @@ normal_feed_should_work_after_split(#{db1 := Db}) -> ?assertEqual(4, length(Changes7)), [#{seq := Seq7} | _] = Changes7, ?assertEqual(EndSeq7, Seq7) - end). + end)}. continuous_feed_should_work_during_split(#{db1 := Db}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin {UpdaterPid, UpdaterRef} = spawn_monitor(fun() -> Updater = fun U({State, I}) -> receive @@ -233,7 +234,7 @@ continuous_feed_should_work_during_split(#{db1 := Db}) -> DocIDs = [Id || #{id := Id} <- StopChanges ++ AfterChanges], ExpectedDocIDs = [doc_id(<<>>, N) || N <- lists:seq(1, DocCount)], ?assertEqual(ExpectedDocIDs, lists:usort(DocIDs)) - end). + end)}. split_and_wait(Db) -> diff --git a/src/mem3/test/mem3_reshard_test.erl b/src/mem3/test/mem3_reshard_test.erl index 8c4479656..ab6202115 100644 --- a/src/mem3/test/mem3_reshard_test.erl +++ b/src/mem3/test/mem3_reshard_test.erl @@ -19,6 +19,7 @@ -include_lib("couch_mrview/include/couch_mrview.hrl"). % for all_docs function -define(ID, <<"_id">>). +-define(TIMEOUT, 60). setup() -> HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name}, @@ -81,7 +82,7 @@ mem3_reshard_db_test_() -> % This is a basic test to check that shard splitting preserves documents, and % db meta props like revs limits and security. split_one_shard(#{db1 := Db}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin DocSpec = #{docs => 10, delete => [5, 9], mrview => 1, local => 1}, add_test_docs(Db, DocSpec), @@ -135,13 +136,13 @@ split_one_shard(#{db1 := Db}) -> % Don't forget about the local but don't include internal checkpoints % as some of those are munged and transformed during the split ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1)) - end). + end)}. % This test checks that document added while the shard is being split are not % lost. Topoff1 state happens before indices are built update_docs_before_topoff1(#{db1 := Db}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin add_test_docs(Db, #{docs => 10}), intercept_state(topoff1), @@ -177,12 +178,12 @@ update_docs_before_topoff1(#{db1 := Db}) -> ?assertEqual(Docs0, Docs1), ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1)) - end). + end)}. % This test that indices are built during shard splitting. indices_are_built(#{db1 := Db}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name}, HaveHastings = code:lib_dir(hastings) /= {error, bad_name}, @@ -206,7 +207,7 @@ indices_are_built(#{db1 := Db}) -> % 4 because there are 2 indices and 2 target shards ?assertEqual(4, meck:num_calls(hastings_index, await, 2)) end - end). + end)}. mock_dreyfus_indices() -> @@ -238,7 +239,7 @@ mock_hastings_indices() -> % Split partitioned database split_partitioned_db(#{db2 := Db}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin DocSpec = #{ pdocs => #{ <<"PX">> => 5, @@ -304,14 +305,14 @@ split_partitioned_db(#{db2 := Db}) -> % Don't forget about the local but don't include internal checkpoints % as some of those are munged and transformed during the split ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1)) - end). + end)}. % Make sure a shard can be split again after it was split once. This checks that % too many got added to some range, such that on next split they'd fail to fit % in to any of the new target ranges. split_twice(#{db1 := Db}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin DocSpec = #{docs => 100, delete => [80, 99], mrview => 2, local => 100}, add_test_docs(Db, DocSpec), @@ -390,11 +391,11 @@ split_twice(#{db1 := Db}) -> ?assertEqual(trunc(UpdateSeq1 * 1.5), UpdateSeq2), ?assertEqual(Docs1, Docs2), ?assertEqual(without_meta_locals(Local1), without_meta_locals(Local2)) - end). + end)}. couch_events_are_emitted(#{db1 := Db}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin couch_event:register_all(self()), % Split the one shard @@ -425,11 +426,11 @@ couch_events_are_emitted(#{db1 := Db}) -> StartAtDeleted = lists:dropwhile(fun(E) -> E =/= deleted end, Events), ?assertMatch([deleted, deleted, updated, updated | _], StartAtDeleted), couch_event:unregister(self()) - end). + end)}. retries_work(#{db1 := Db}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin meck:expect(couch_db_split, split, fun(_, _, _) -> error(kapow) end), @@ -439,11 +440,11 @@ retries_work(#{db1 := Db}) -> wait_state(JobId, failed), ?assertEqual(3, meck:num_calls(couch_db_split, split, 3)) - end). + end)}. target_reset_in_initial_copy(#{db1 := Db}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin [#shard{} = Src] = lists:sort(mem3:local_shards(Db)), Job = #job{ source = Src, @@ -465,17 +466,17 @@ target_reset_in_initial_copy(#{db1 := Db}) -> exit(JobPid, kill), exit(BogusParent, kill), ?assertEqual(2, meck:num_calls(couch_db_split, cleanup_target, 2)) - end). + end)}. split_an_incomplete_shard_map(#{db1 := Db}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin [#shard{} = Src] = lists:sort(mem3:local_shards(Db)), [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)), meck:expect(mem3_util, calculate_max_n, 1, 0), ?assertMatch({error, {not_enough_shard_copies, _}}, mem3_reshard:start_split_job(Shard)) - end). + end)}. intercept_state(State) -> diff --git a/src/mem3/test/mem3_sync_security_test.erl b/src/mem3/test/mem3_sync_security_test.erl index 4e06dd807..e67a72017 100644 --- a/src/mem3/test/mem3_sync_security_test.erl +++ b/src/mem3/test/mem3_sync_security_test.erl @@ -17,16 +17,38 @@ -include("mem3.hrl"). -include_lib("eunit/include/eunit.hrl"). -go_test() -> - Ctx = test_util:start_couch([fabric, mem3]), - try - ok = meck:new(fabric, [passthrough]), - meck:expect(fabric, all_dbs, fun() -> - {ok, [<<"NoExistDb1">>, <<"NoExistDb2">>]} - end), - Result = mem3_sync_security:go(), - ?assertEqual(ok, Result) - after - meck:unload(), - test_util:stop_couch(Ctx) - end. +-define(TIMEOUT, 5). % seconds + +go_test_() -> + { + "security property sync test", + { + setup, + fun start_couch/0, fun stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun sync_security_ok/1 + ] + } + } + }. + +start_couch() -> + test_util:start_couch([fabric, mem3]). + +stop_couch(Ctx) -> + test_util:stop_couch(Ctx). + +setup() -> + ok = meck:new(fabric, [passthrough]), + meck:expect(fabric, all_dbs, fun() -> + {ok, [<<"NoExistDb1">>, <<"NoExistDb2">>]} + end). + +teardown(_) -> + meck:unload(). + +sync_security_ok(_) -> + {timeout, ?TIMEOUT, ?_assertEqual(ok, mem3_sync_security:go())}. -- cgit v1.2.1 From f33378b38e45629c5934e739835571fbbd28bdda Mon Sep 17 00:00:00 2001 From: ILYA Khlopotov Date: Thu, 27 Jun 2019 10:35:05 +0000 Subject: Fix credo complains for dreyfus --- src/dreyfus/test/elixir/test/partition_search_test.exs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/dreyfus/test/elixir/test/partition_search_test.exs b/src/dreyfus/test/elixir/test/partition_search_test.exs index 052a41ad1..4400d7b7f 100644 --- a/src/dreyfus/test/elixir/test/partition_search_test.exs +++ b/src/dreyfus/test/elixir/test/partition_search_test.exs @@ -26,12 +26,12 @@ defmodule PartitionSearchTest do end def create_ddoc(db_name, opts \\ %{}) do - indexFn = "function(doc) {\n if (doc.some) {\n index('some', doc.some);\n }\n}" + index_fn = "function(doc) {\n if (doc.some) {\n index('some', doc.some);\n }\n}" default_ddoc = %{ indexes: %{ books: %{ analyzer: %{name: "standard"}, - index: indexFn + index: index_fn } } } -- cgit v1.2.1 From d5849629a99603904ce94ce561916ebb66d04600 Mon Sep 17 00:00:00 2001 From: ILYA Khlopotov Date: Thu, 9 May 2019 21:49:32 +0000 Subject: Minimal ExUnit setup --- .credo.exs | 164 ++++++++++++++++++++++++++++++++++ .formatter.exs | 9 ++ .gitignore | 7 ++ Makefile | 18 +++- Makefile.win | 14 +++ config/config.exs | 30 +++++++ config/dev.exs | 1 + config/prod.exs | 1 + config/test.exs | 12 +++ mix.exs | 47 ++++++++++ mix.lock | 9 ++ src/couch/test/exunit/test_helper.exs | 2 + test/elixir/.credo.exs | 156 -------------------------------- 13 files changed, 312 insertions(+), 158 deletions(-) create mode 100644 .credo.exs create mode 100644 .formatter.exs create mode 100644 config/config.exs create mode 100644 config/dev.exs create mode 100644 config/prod.exs create mode 100644 config/test.exs create mode 100644 mix.exs create mode 100644 mix.lock create mode 100644 src/couch/test/exunit/test_helper.exs delete mode 100644 test/elixir/.credo.exs diff --git a/.credo.exs b/.credo.exs new file mode 100644 index 000000000..db301e4d0 --- /dev/null +++ b/.credo.exs @@ -0,0 +1,164 @@ +# This file contains the configuration for Credo and you are probably reading +# this after creating it with `mix credo.gen.config`. +# +# If you find anything wrong or unclear in this file, please report an +# issue on GitHub: https://github.com/rrrene/credo/issues +# +%{ + # + # You can have as many configs as you like in the `configs:` field. + configs: [ + %{ + # + # Run any exec using `mix credo -C `. If no exec name is given + # "default" is used. + # + name: "default", + # + # These are the files included in the analysis: + files: %{ + # + # You can give explicit globs or simply directories. + # In the latter case `**/*.{ex,exs}` will be used. + # + included: ["lib/", "src/", "test/", "web/", "apps/"], + excluded: [ + ~r"/_build/", + ~r"/node_modules/", + ~r"/src/jason", + ~r"/src/httpotion", + ~r"/src/credo", + ~r"/src/junit_formatter", + ~r"/src/bunt" + ] + }, + # + # If you create your own checks, you must specify the source files for + # them here, so they can be loaded by Credo before running the analysis. + # + requires: [], + # + # If you want to enforce a style guide and need a more traditional linting + # experience, you can change `strict` to `true` below: + # + strict: false, + # + # If you want to use uncolored output by default, you can change `color` + # to `false` below: + # + color: true, + # + # You can customize the parameters of any check by adding a second element + # to the tuple. + # + # To disable a check put `false` as second element: + # + # {Credo.Check.Design.DuplicatedCode, false} + # + checks: [ + # + ## Consistency Checks + # + {Credo.Check.Consistency.ExceptionNames, []}, + {Credo.Check.Consistency.LineEndings, []}, + {Credo.Check.Consistency.ParameterPatternMatching, false}, + {Credo.Check.Consistency.SpaceAroundOperators, []}, + {Credo.Check.Consistency.SpaceInParentheses, []}, + {Credo.Check.Consistency.TabsOrSpaces, []}, + + # + ## Design Checks + # + # You can customize the priority of any check + # Priority values are: `low, normal, high, higher` + # + {Credo.Check.Design.AliasUsage, + [priority: :low, if_nested_deeper_than: 2, if_called_more_often_than: 0]}, + # You can also customize the exit_status of each check. + # If you don't want TODO comments to cause `mix credo` to fail, just + # set this value to 0 (zero). + # + {Credo.Check.Design.TagTODO, false}, + {Credo.Check.Design.TagFIXME, []}, + + # + ## Readability Checks + # + {Credo.Check.Readability.AliasOrder, []}, + {Credo.Check.Readability.FunctionNames, []}, + {Credo.Check.Readability.LargeNumbers, []}, + {Credo.Check.Readability.MaxLineLength, [priority: :normal, max_length: 90]}, + {Credo.Check.Readability.ModuleAttributeNames, []}, + {Credo.Check.Readability.ModuleDoc, []}, + {Credo.Check.Readability.ModuleNames, []}, + {Credo.Check.Readability.ParenthesesInCondition, []}, + {Credo.Check.Readability.ParenthesesOnZeroArityDefs, []}, + {Credo.Check.Readability.PredicateFunctionNames, []}, + {Credo.Check.Readability.PreferImplicitTry, []}, + {Credo.Check.Readability.RedundantBlankLines, []}, + {Credo.Check.Readability.Semicolons, []}, + {Credo.Check.Readability.SpaceAfterCommas, []}, + {Credo.Check.Readability.StringSigils, []}, + {Credo.Check.Readability.TrailingBlankLine, []}, + {Credo.Check.Readability.TrailingWhiteSpace, []}, + {Credo.Check.Readability.VariableNames, []}, + + # + ## Refactoring Opportunities + # + {Credo.Check.Refactor.CondStatements, []}, + {Credo.Check.Refactor.CyclomaticComplexity, false}, + {Credo.Check.Refactor.FunctionArity, []}, + {Credo.Check.Refactor.LongQuoteBlocks, false}, + {Credo.Check.Refactor.MapInto, []}, + {Credo.Check.Refactor.MatchInCondition, []}, + {Credo.Check.Refactor.NegatedConditionsInUnless, []}, + {Credo.Check.Refactor.NegatedConditionsWithElse, []}, + {Credo.Check.Refactor.Nesting, false}, + {Credo.Check.Refactor.PipeChainStart, + [ + excluded_argument_types: [:atom, :binary, :fn, :keyword], + excluded_functions: [] + ]}, + {Credo.Check.Refactor.UnlessWithElse, []}, + + # + ## Warnings + # + {Credo.Check.Warning.BoolOperationOnSameValues, []}, + {Credo.Check.Warning.ExpensiveEmptyEnumCheck, []}, + {Credo.Check.Warning.IExPry, []}, + {Credo.Check.Warning.IoInspect, []}, + {Credo.Check.Warning.LazyLogging, []}, + {Credo.Check.Warning.OperationOnSameValues, []}, + {Credo.Check.Warning.OperationWithConstantResult, []}, + {Credo.Check.Warning.RaiseInsideRescue, []}, + {Credo.Check.Warning.UnusedEnumOperation, []}, + {Credo.Check.Warning.UnusedFileOperation, []}, + {Credo.Check.Warning.UnusedKeywordOperation, []}, + {Credo.Check.Warning.UnusedListOperation, []}, + {Credo.Check.Warning.UnusedPathOperation, []}, + {Credo.Check.Warning.UnusedRegexOperation, []}, + {Credo.Check.Warning.UnusedStringOperation, []}, + {Credo.Check.Warning.UnusedTupleOperation, []}, + + # + # Controversial and experimental checks (opt-in, just remove `, false`) + # + {Credo.Check.Consistency.MultiAliasImportRequireUse, false}, + {Credo.Check.Design.DuplicatedCode, false}, + {Credo.Check.Readability.Specs, false}, + {Credo.Check.Refactor.ABCSize, false}, + {Credo.Check.Refactor.AppendSingleItem, false}, + {Credo.Check.Refactor.DoubleBooleanNegation, false}, + {Credo.Check.Refactor.VariableRebinding, false}, + {Credo.Check.Warning.MapGetUnsafePass, false}, + {Credo.Check.Warning.UnsafeToAtom, false} + + # + # Custom checks can be created using `mix credo.gen.check`. + # + ] + } + ] +} diff --git a/.formatter.exs b/.formatter.exs new file mode 100644 index 000000000..28b883d54 --- /dev/null +++ b/.formatter.exs @@ -0,0 +1,9 @@ +# Used by "mix format" +[ + inputs: [ + "{mix,.formatter}.exs", + "{config,src}/*/test/exunit/*.{ex,exs}" + ], + line_length: 90, + rename_deprecated_at: "1.5.0" +] diff --git a/.gitignore b/.gitignore index 36bc13007..6b9198d42 100644 --- a/.gitignore +++ b/.gitignore @@ -104,3 +104,10 @@ src/global_changes/ebin/ src/mango/ebin/ src/mango/test/*.pyc src/mango/venv/ + +/_build/ +/src/bunt +/src/credo/ +/src/httpotion/ +/src/jason/ +/src/junit_formatter/ diff --git a/Makefile b/Makefile index 0acf8284d..3d1be5149 100644 --- a/Makefile +++ b/Makefile @@ -173,6 +173,20 @@ eunit: couch $(REBAR) -r eunit $(EUNIT_OPTS) apps=$$dir || exit 1; \ done +.PHONY: exunit +# target: exunit - Run ExUnit tests +exunit: export BUILDDIR = $(shell pwd) +exunit: export MIX_ENV=test +exunit: export ERL_LIBS = $(shell pwd)/src +exunit: export ERL_AFLAGS = -config $(shell pwd)/rel/files/eunit.config +exunit: export COUCHDB_QUERY_SERVER_JAVASCRIPT = $(shell pwd)/bin/couchjs $(shell pwd)/share/server/main.js +exunit: couch elixir-check-formatted elixir-credo + @mix local.hex --force + @mix local.rebar rebar ${REBAR} --force + @mix deps.get + @$(REBAR) setup_eunit 2> /dev/null + @mix test --trace $(EXUNIT_OPTS) + setup-eunit: export BUILDDIR = $(shell pwd) setup-eunit: export ERL_AFLAGS = -config $(shell pwd)/rel/files/eunit.config setup-eunit: @@ -233,13 +247,13 @@ elixir-cluster-with-quorum: elixir-check-formatted elixir-credo devclean .PHONY: elixir-check-formatted elixir-check-formatted: - @cd test/elixir/ && mix format --check-formatted + mix format --check-formatted # Credo is a static code analysis tool for Elixir. # We use it in our tests .PHONY: elixir-credo elixir-credo: - @cd test/elixir/ && mix credo + mix credo .PHONY: javascript # target: javascript - Run JavaScript test suites or specific ones defined by suites option diff --git a/Makefile.win b/Makefile.win index 99ec71278..2c4dc9821 100644 --- a/Makefile.win +++ b/Makefile.win @@ -144,6 +144,20 @@ eunit: couch @$(REBAR) setup_eunit 2> nul @$(REBAR) -r eunit $(EUNIT_OPTS) +.PHONY: exunit +# target: exunit - Run ExUnit tests +exunit: export BUILDDIR = $(shell echo %cd%) +exunit: export MIX_ENV=test +exunit: export ERL_LIBS = $(shell echo %cd%)\src +exunit: export ERL_AFLAGS = -config $(shell echo %cd%)/rel/files/eunit.config +exunit: export COUCHDB_QUERY_SERVER_JAVASCRIPT = $(shell echo %cd%)/bin/couchjs $(shell echo %cd%)/share/server/main.js +exunit: couch elixir-check-formatted elixir-credo + @mix local.hex --force + @mix local.rebar rebar ${REBAR} --force + @mix deps.get + @$(REBAR) setup_eunit 2> nul + @mix test --trace $(EXUNIT_OPTS) + setup-eunit: export BUILDDIR = $(shell pwd) setup-eunit: export ERL_AFLAGS = $(shell echo "-config rel/files/eunit.config") setup-eunit: diff --git a/config/config.exs b/config/config.exs new file mode 100644 index 000000000..8e52433cc --- /dev/null +++ b/config/config.exs @@ -0,0 +1,30 @@ +# This file is responsible for configuring your application +# and its dependencies with the aid of the Mix.Config module. +use Mix.Config + +# This configuration is loaded before any dependency and is restricted +# to this project. If another project depends on this project, this +# file won't be loaded nor affect the parent project. For this reason, +# if you want to provide default values for your application for +# 3rd-party users, it should be done in your "mix.exs" file. + +# You can configure your application as: +# +# config :couchdbtest, key: :value +# +# and access this configuration in your application as: +# +# Application.get_env(:couchdbtest, :key) +# +# You can also configure a 3rd-party app: +# +# config :logger, level: :info +# + +# It is also possible to import configuration files, relative to this +# directory. For example, you can emulate configuration per environment +# by uncommenting the line below and defining dev.exs, test.exs and such. +# Configuration from the imported file will override the ones defined +# here (which is why it is important to import them last). +# +import_config "#{Mix.env}.exs" \ No newline at end of file diff --git a/config/dev.exs b/config/dev.exs new file mode 100644 index 000000000..d2d855e6d --- /dev/null +++ b/config/dev.exs @@ -0,0 +1 @@ +use Mix.Config diff --git a/config/prod.exs b/config/prod.exs new file mode 100644 index 000000000..d2d855e6d --- /dev/null +++ b/config/prod.exs @@ -0,0 +1 @@ +use Mix.Config diff --git a/config/test.exs b/config/test.exs new file mode 100644 index 000000000..c5a5ed24a --- /dev/null +++ b/config/test.exs @@ -0,0 +1,12 @@ +use Mix.Config + +config :logger, + backends: [:console], + compile_time_purge_level: :debug, + level: :debug + +config :kernel, + error_logger: false + +config :sasl, + sasl_error_logger: false diff --git a/mix.exs b/mix.exs new file mode 100644 index 000000000..d9c8c2160 --- /dev/null +++ b/mix.exs @@ -0,0 +1,47 @@ +defmodule CouchDBTest.Mixfile do + use Mix.Project + + def project do + [ + app: :couchdbtest, + version: "0.1.0", + elixir: "~> 1.5", + lockfile: Path.expand("mix.lock", __DIR__), + deps_path: Path.expand("src", __DIR__), + build_path: Path.expand("_build", __DIR__), + compilers: [:elixir, :app], + start_permanent: Mix.env() == :prod, + build_embedded: Mix.env() == :prod, + deps: deps(), + consolidate_protocols: Mix.env() not in [:test, :dev], + test_paths: get_test_paths(), + elixirc_paths: elixirc_paths(Mix.env()) + ] + end + + # Run "mix help compile.app" to learn about applications. + def application do + [ + extra_applications: [:logger] + ] + end + + # Specifies which paths to compile per environment. + defp elixirc_paths(:test), do: ["test/elixir/lib", "test/elixir/test/support"] + defp elixirc_paths(_), do: ["test/elixir/lib"] + + # Run "mix help deps" to learn about dependencies. + defp deps() do + [ + {:httpotion, "~> 3.0", only: [:dev, :test], runtime: false}, + {:jiffy, path: Path.expand("src/jiffy", __DIR__)}, + {:ibrowse, + path: Path.expand("src/ibrowse", __DIR__), override: true, compile: false}, + {:credo, "~> 1.0.0", only: [:dev, :test], runtime: false} + ] + end + + def get_test_paths do + Path.wildcard("src/*/test/exunit") |> Enum.filter(&File.dir?/1) + end +end diff --git a/mix.lock b/mix.lock new file mode 100644 index 000000000..34c0ea961 --- /dev/null +++ b/mix.lock @@ -0,0 +1,9 @@ +%{ + "bunt": {:hex, :bunt, "0.2.0", "951c6e801e8b1d2cbe58ebbd3e616a869061ddadcc4863d0a2182541acae9a38", [:mix], [], "hexpm"}, + "credo": {:hex, :credo, "1.0.5", "fdea745579f8845315fe6a3b43e2f9f8866839cfbc8562bb72778e9fdaa94214", [:mix], [{:bunt, "~> 0.2.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm"}, + "httpotion": {:hex, :httpotion, "3.1.2", "50e3e559c2ffe8c8908c97e4ffb01efc1c18e8547cc7ce5dd173c9cf0a573a3b", [:mix], [{:ibrowse, "== 4.4.0", [hex: :ibrowse, repo: "hexpm", optional: false]}], "hexpm"}, + "ibrowse": {:hex, :ibrowse, "4.4.0", "2d923325efe0d2cb09b9c6a047b2835a5eda69d8a47ed6ff8bc03628b764e991", [:rebar3], [], "hexpm"}, + "jason": {:hex, :jason, "1.1.2", "b03dedea67a99223a2eaf9f1264ce37154564de899fd3d8b9a21b1a6fd64afe7", [:mix], [{:decimal, "~> 1.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm"}, + "jiffy": {:hex, :jiffy, "0.15.2", "de266c390111fd4ea28b9302f0bc3d7472468f3b8e0aceabfbefa26d08cd73b7", [:rebar3], [], "hexpm"}, + "junit_formatter": {:hex, :junit_formatter, "3.0.0", "13950d944dbd295da7d8cc4798b8faee808a8bb9b637c88069954eac078ac9da", [:mix], [], "hexpm"}, +} diff --git a/src/couch/test/exunit/test_helper.exs b/src/couch/test/exunit/test_helper.exs new file mode 100644 index 000000000..314050085 --- /dev/null +++ b/src/couch/test/exunit/test_helper.exs @@ -0,0 +1,2 @@ +ExUnit.configure(formatters: [JUnitFormatter, ExUnit.CLIFormatter]) +ExUnit.start() diff --git a/test/elixir/.credo.exs b/test/elixir/.credo.exs deleted file mode 100644 index e24836c8f..000000000 --- a/test/elixir/.credo.exs +++ /dev/null @@ -1,156 +0,0 @@ -# This file contains the configuration for Credo and you are probably reading -# this after creating it with `mix credo.gen.config`. -# -# If you find anything wrong or unclear in this file, please report an -# issue on GitHub: https://github.com/rrrene/credo/issues -# -%{ - # - # You can have as many configs as you like in the `configs:` field. - configs: [ - %{ - # - # Run any exec using `mix credo -C `. If no exec name is given - # "default" is used. - # - name: "default", - # - # These are the files included in the analysis: - files: %{ - # - # You can give explicit globs or simply directories. - # In the latter case `**/*.{ex,exs}` will be used. - # - included: ["lib/", "src/", "test/", "web/", "apps/"], - excluded: [~r"/_build/", ~r"/deps/", ~r"/node_modules/"] - }, - # - # If you create your own checks, you must specify the source files for - # them here, so they can be loaded by Credo before running the analysis. - # - requires: [], - # - # If you want to enforce a style guide and need a more traditional linting - # experience, you can change `strict` to `true` below: - # - strict: false, - # - # If you want to use uncolored output by default, you can change `color` - # to `false` below: - # - color: true, - # - # You can customize the parameters of any check by adding a second element - # to the tuple. - # - # To disable a check put `false` as second element: - # - # {Credo.Check.Design.DuplicatedCode, false} - # - checks: [ - # - ## Consistency Checks - # - {Credo.Check.Consistency.ExceptionNames, []}, - {Credo.Check.Consistency.LineEndings, []}, - {Credo.Check.Consistency.ParameterPatternMatching, false}, - {Credo.Check.Consistency.SpaceAroundOperators, []}, - {Credo.Check.Consistency.SpaceInParentheses, []}, - {Credo.Check.Consistency.TabsOrSpaces, []}, - - # - ## Design Checks - # - # You can customize the priority of any check - # Priority values are: `low, normal, high, higher` - # - {Credo.Check.Design.AliasUsage, - [priority: :low, if_nested_deeper_than: 2, if_called_more_often_than: 0]}, - # You can also customize the exit_status of each check. - # If you don't want TODO comments to cause `mix credo` to fail, just - # set this value to 0 (zero). - # - {Credo.Check.Design.TagTODO, false}, - {Credo.Check.Design.TagFIXME, []}, - - # - ## Readability Checks - # - {Credo.Check.Readability.AliasOrder, []}, - {Credo.Check.Readability.FunctionNames, []}, - {Credo.Check.Readability.LargeNumbers, []}, - {Credo.Check.Readability.MaxLineLength, [priority: :normal, max_length: 90]}, - {Credo.Check.Readability.ModuleAttributeNames, []}, - {Credo.Check.Readability.ModuleDoc, []}, - {Credo.Check.Readability.ModuleNames, []}, - {Credo.Check.Readability.ParenthesesInCondition, []}, - {Credo.Check.Readability.ParenthesesOnZeroArityDefs, []}, - {Credo.Check.Readability.PredicateFunctionNames, []}, - {Credo.Check.Readability.PreferImplicitTry, []}, - {Credo.Check.Readability.RedundantBlankLines, []}, - {Credo.Check.Readability.Semicolons, []}, - {Credo.Check.Readability.SpaceAfterCommas, []}, - {Credo.Check.Readability.StringSigils, []}, - {Credo.Check.Readability.TrailingBlankLine, []}, - {Credo.Check.Readability.TrailingWhiteSpace, []}, - {Credo.Check.Readability.VariableNames, []}, - - # - ## Refactoring Opportunities - # - {Credo.Check.Refactor.CondStatements, []}, - {Credo.Check.Refactor.CyclomaticComplexity, false}, - {Credo.Check.Refactor.FunctionArity, []}, - {Credo.Check.Refactor.LongQuoteBlocks, false}, - {Credo.Check.Refactor.MapInto, []}, - {Credo.Check.Refactor.MatchInCondition, []}, - {Credo.Check.Refactor.NegatedConditionsInUnless, []}, - {Credo.Check.Refactor.NegatedConditionsWithElse, []}, - {Credo.Check.Refactor.Nesting, false}, - {Credo.Check.Refactor.PipeChainStart, - [ - excluded_argument_types: [:atom, :binary, :fn, :keyword], - excluded_functions: [] - ]}, - {Credo.Check.Refactor.UnlessWithElse, []}, - - # - ## Warnings - # - {Credo.Check.Warning.BoolOperationOnSameValues, []}, - {Credo.Check.Warning.ExpensiveEmptyEnumCheck, []}, - {Credo.Check.Warning.IExPry, []}, - {Credo.Check.Warning.IoInspect, []}, - {Credo.Check.Warning.LazyLogging, []}, - {Credo.Check.Warning.OperationOnSameValues, []}, - {Credo.Check.Warning.OperationWithConstantResult, []}, - {Credo.Check.Warning.RaiseInsideRescue, []}, - {Credo.Check.Warning.UnusedEnumOperation, []}, - {Credo.Check.Warning.UnusedFileOperation, []}, - {Credo.Check.Warning.UnusedKeywordOperation, []}, - {Credo.Check.Warning.UnusedListOperation, []}, - {Credo.Check.Warning.UnusedPathOperation, []}, - {Credo.Check.Warning.UnusedRegexOperation, []}, - {Credo.Check.Warning.UnusedStringOperation, []}, - {Credo.Check.Warning.UnusedTupleOperation, []}, - - # - # Controversial and experimental checks (opt-in, just remove `, false`) - # - {Credo.Check.Consistency.MultiAliasImportRequireUse, false}, - {Credo.Check.Design.DuplicatedCode, false}, - {Credo.Check.Readability.Specs, false}, - {Credo.Check.Refactor.ABCSize, false}, - {Credo.Check.Refactor.AppendSingleItem, false}, - {Credo.Check.Refactor.DoubleBooleanNegation, false}, - {Credo.Check.Refactor.VariableRebinding, false}, - {Credo.Check.Warning.MapGetUnsafePass, false}, - {Credo.Check.Warning.UnsafeToAtom, false} - - # - # Custom checks can be created using `mix credo.gen.check`. - # - ] - } - ] -} -- cgit v1.2.1 From cf60cffb44bf18889aa8362b616bafb7bed87445 Mon Sep 17 00:00:00 2001 From: ILYA Khlopotov Date: Fri, 10 May 2019 00:45:21 +0000 Subject: Move eunit tests into test/eunit directory --- src/chttpd/test/chttpd_cors_test.erl | 564 ----------- src/chttpd/test/chttpd_csp_tests.erl | 81 -- .../test/chttpd_db_attachment_size_tests.erl | 206 ---- .../test/chttpd_db_bulk_get_multipart_test.erl | 313 ------ src/chttpd/test/chttpd_db_bulk_get_test.erl | 341 ------- src/chttpd/test/chttpd_db_doc_size_tests.erl | 179 ---- src/chttpd/test/chttpd_db_test.erl | 437 --------- src/chttpd/test/chttpd_dbs_info_test.erl | 169 ---- src/chttpd/test/chttpd_error_info_tests.erl | 168 ---- src/chttpd/test/chttpd_handlers_tests.erl | 87 -- src/chttpd/test/chttpd_open_revs_error_test.erl | 112 --- src/chttpd/test/chttpd_plugin_tests.erl | 187 ---- src/chttpd/test/chttpd_prefer_header_test.erl | 112 --- src/chttpd/test/chttpd_purge_tests.erl | 406 -------- src/chttpd/test/chttpd_security_tests.erl | 384 -------- src/chttpd/test/chttpd_socket_buffer_size_test.erl | 127 --- src/chttpd/test/chttpd_view_test.erl | 123 --- src/chttpd/test/chttpd_welcome_test.erl | 104 -- src/chttpd/test/chttpd_xframe_test.erl | 84 -- src/chttpd/test/eunit/chttpd_cors_test.erl | 564 +++++++++++ src/chttpd/test/eunit/chttpd_csp_tests.erl | 81 ++ .../test/eunit/chttpd_db_attachment_size_tests.erl | 206 ++++ .../eunit/chttpd_db_bulk_get_multipart_test.erl | 313 ++++++ src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl | 341 +++++++ src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl | 179 ++++ src/chttpd/test/eunit/chttpd_db_test.erl | 437 +++++++++ src/chttpd/test/eunit/chttpd_dbs_info_test.erl | 169 ++++ src/chttpd/test/eunit/chttpd_error_info_tests.erl | 168 ++++ src/chttpd/test/eunit/chttpd_handlers_tests.erl | 87 ++ .../test/eunit/chttpd_open_revs_error_test.erl | 112 +++ src/chttpd/test/eunit/chttpd_plugin_tests.erl | 187 ++++ .../test/eunit/chttpd_prefer_header_test.erl | 112 +++ src/chttpd/test/eunit/chttpd_purge_tests.erl | 406 ++++++++ src/chttpd/test/eunit/chttpd_security_tests.erl | 384 ++++++++ .../test/eunit/chttpd_socket_buffer_size_test.erl | 127 +++ src/chttpd/test/eunit/chttpd_view_test.erl | 123 +++ src/chttpd/test/eunit/chttpd_welcome_test.erl | 104 ++ src/chttpd/test/eunit/chttpd_xframe_test.erl | 84 ++ src/couch/include/couch_eunit.hrl | 2 +- src/couch/test/chttpd_endpoints_tests.erl | 184 ---- src/couch/test/couch_auth_cache_tests.erl | 384 -------- src/couch/test/couch_base32_tests.erl | 28 - src/couch/test/couch_bt_engine_compactor_tests.erl | 130 --- src/couch/test/couch_bt_engine_tests.erl | 20 - src/couch/test/couch_bt_engine_upgrade_tests.erl | 243 ----- src/couch/test/couch_btree_tests.erl | 572 ----------- src/couch/test/couch_changes_tests.erl | 1001 -------------------- src/couch/test/couch_compress_tests.erl | 85 -- src/couch/test/couch_db_doc_tests.erl | 113 --- src/couch/test/couch_db_mpr_tests.erl | 137 --- src/couch/test/couch_db_plugin_tests.erl | 205 ---- src/couch/test/couch_db_props_upgrade_tests.erl | 83 -- src/couch/test/couch_db_split_tests.erl | 302 ------ src/couch/test/couch_db_tests.erl | 171 ---- src/couch/test/couch_doc_json_tests.erl | 493 ---------- src/couch/test/couch_doc_tests.erl | 145 --- src/couch/test/couch_ejson_size_tests.erl | 72 -- src/couch/test/couch_etag_tests.erl | 30 - src/couch/test/couch_file_tests.erl | 533 ----------- src/couch/test/couch_flags_config_tests.erl | 116 --- src/couch/test/couch_flags_tests.erl | 150 --- src/couch/test/couch_hotp_tests.erl | 28 - src/couch/test/couch_index_tests.erl | 234 ----- src/couch/test/couch_key_tree_prop_tests.erl | 530 ----------- src/couch/test/couch_key_tree_tests.erl | 413 -------- src/couch/test/couch_passwords_tests.erl | 54 -- src/couch/test/couch_query_servers_tests.erl | 95 -- src/couch/test/couch_server_tests.erl | 310 ------ src/couch/test/couch_stream_tests.erl | 124 --- src/couch/test/couch_task_status_tests.erl | 233 ----- src/couch/test/couch_totp_tests.erl | 55 -- src/couch/test/couch_util_tests.erl | 170 ---- src/couch/test/couch_uuids_tests.erl | 155 --- src/couch/test/couch_work_queue_tests.erl | 402 -------- src/couch/test/couchdb_attachments_tests.erl | 765 --------------- src/couch/test/couchdb_auth_tests.erl | 96 -- src/couch/test/couchdb_cookie_domain_tests.erl | 69 -- src/couch/test/couchdb_cors_tests.erl | 344 ------- src/couch/test/couchdb_db_tests.erl | 91 -- src/couch/test/couchdb_design_doc_tests.erl | 88 -- src/couch/test/couchdb_file_compression_tests.erl | 260 ----- src/couch/test/couchdb_location_header_tests.erl | 78 -- src/couch/test/couchdb_mrview_cors_tests.erl | 140 --- src/couch/test/couchdb_mrview_tests.erl | 253 ----- src/couch/test/couchdb_os_proc_pool.erl | 306 ------ src/couch/test/couchdb_update_conflicts_tests.erl | 281 ------ src/couch/test/couchdb_vhosts_tests.erl | 272 ------ src/couch/test/couchdb_views_tests.erl | 669 ------------- src/couch/test/eunit/chttpd_endpoints_tests.erl | 184 ++++ src/couch/test/eunit/couch_auth_cache_tests.erl | 384 ++++++++ src/couch/test/eunit/couch_base32_tests.erl | 28 + .../test/eunit/couch_bt_engine_compactor_tests.erl | 130 +++ src/couch/test/eunit/couch_bt_engine_tests.erl | 20 + .../test/eunit/couch_bt_engine_upgrade_tests.erl | 243 +++++ src/couch/test/eunit/couch_btree_tests.erl | 572 +++++++++++ src/couch/test/eunit/couch_changes_tests.erl | 1001 ++++++++++++++++++++ src/couch/test/eunit/couch_compress_tests.erl | 85 ++ src/couch/test/eunit/couch_db_doc_tests.erl | 113 +++ src/couch/test/eunit/couch_db_mpr_tests.erl | 137 +++ src/couch/test/eunit/couch_db_plugin_tests.erl | 205 ++++ .../test/eunit/couch_db_props_upgrade_tests.erl | 83 ++ src/couch/test/eunit/couch_db_split_tests.erl | 302 ++++++ src/couch/test/eunit/couch_db_tests.erl | 171 ++++ src/couch/test/eunit/couch_doc_json_tests.erl | 493 ++++++++++ src/couch/test/eunit/couch_doc_tests.erl | 145 +++ src/couch/test/eunit/couch_ejson_size_tests.erl | 72 ++ src/couch/test/eunit/couch_etag_tests.erl | 30 + src/couch/test/eunit/couch_file_tests.erl | 533 +++++++++++ src/couch/test/eunit/couch_flags_config_tests.erl | 116 +++ src/couch/test/eunit/couch_flags_tests.erl | 150 +++ src/couch/test/eunit/couch_hotp_tests.erl | 28 + src/couch/test/eunit/couch_index_tests.erl | 234 +++++ src/couch/test/eunit/couch_key_tree_prop_tests.erl | 530 +++++++++++ src/couch/test/eunit/couch_key_tree_tests.erl | 413 ++++++++ src/couch/test/eunit/couch_passwords_tests.erl | 54 ++ src/couch/test/eunit/couch_query_servers_tests.erl | 95 ++ src/couch/test/eunit/couch_server_tests.erl | 310 ++++++ src/couch/test/eunit/couch_stream_tests.erl | 124 +++ src/couch/test/eunit/couch_task_status_tests.erl | 233 +++++ src/couch/test/eunit/couch_totp_tests.erl | 55 ++ src/couch/test/eunit/couch_util_tests.erl | 170 ++++ src/couch/test/eunit/couch_uuids_tests.erl | 155 +++ src/couch/test/eunit/couch_work_queue_tests.erl | 402 ++++++++ src/couch/test/eunit/couchdb_attachments_tests.erl | 765 +++++++++++++++ src/couch/test/eunit/couchdb_auth_tests.erl | 96 ++ .../test/eunit/couchdb_cookie_domain_tests.erl | 69 ++ src/couch/test/eunit/couchdb_cors_tests.erl | 344 +++++++ src/couch/test/eunit/couchdb_db_tests.erl | 91 ++ src/couch/test/eunit/couchdb_design_doc_tests.erl | 88 ++ .../test/eunit/couchdb_file_compression_tests.erl | 260 +++++ .../test/eunit/couchdb_location_header_tests.erl | 78 ++ src/couch/test/eunit/couchdb_mrview_cors_tests.erl | 140 +++ src/couch/test/eunit/couchdb_mrview_tests.erl | 253 +++++ src/couch/test/eunit/couchdb_os_proc_pool.erl | 306 ++++++ .../test/eunit/couchdb_update_conflicts_tests.erl | 281 ++++++ src/couch/test/eunit/couchdb_vhosts_tests.erl | 272 ++++++ src/couch/test/eunit/couchdb_views_tests.erl | 669 +++++++++++++ .../fixtures/3b835456c235b1827e012e25666152f3.view | Bin 0 -> 4192 bytes .../test/eunit/fixtures/couch_stats_aggregates.cfg | 19 + .../test/eunit/fixtures/couch_stats_aggregates.ini | 20 + .../test/eunit/fixtures/db_non_partitioned.couch | Bin 0 -> 12479 bytes .../eunit/fixtures/db_v6_with_1_purge_req.couch | Bin 0 -> 12470 bytes .../db_v6_with_1_purge_req_for_2_docs.couch | Bin 0 -> 16557 bytes .../eunit/fixtures/db_v6_with_2_purge_req.couch | Bin 0 -> 16566 bytes .../eunit/fixtures/db_v6_without_purge_req.couch | Bin 0 -> 61644 bytes .../eunit/fixtures/db_v7_with_1_purge_req.couch | Bin 0 -> 16617 bytes .../db_v7_with_1_purge_req_for_2_docs.couch | Bin 0 -> 20705 bytes .../eunit/fixtures/db_v7_with_2_purge_req.couch | Bin 0 -> 20713 bytes .../eunit/fixtures/db_v7_without_purge_req.couch | Bin 0 -> 65781 bytes src/couch/test/eunit/fixtures/logo.png | Bin 0 -> 3010 bytes src/couch/test/eunit/fixtures/multipart.http | 13 + .../test/eunit/fixtures/os_daemon_bad_perm.sh | 17 + .../test/eunit/fixtures/os_daemon_can_reboot.sh | 15 + .../test/eunit/fixtures/os_daemon_configer.escript | 97 ++ .../test/eunit/fixtures/os_daemon_die_on_boot.sh | 15 + .../test/eunit/fixtures/os_daemon_die_quickly.sh | 15 + .../test/eunit/fixtures/os_daemon_looper.escript | 26 + src/couch/test/eunit/fixtures/test.couch | Bin 0 -> 16482 bytes src/couch/test/eunit/global_changes_tests.erl | 159 ++++ src/couch/test/eunit/json_stream_parse_tests.erl | 151 +++ src/couch/test/eunit/test_web.erl | 114 +++ .../fixtures/3b835456c235b1827e012e25666152f3.view | Bin 4192 -> 0 bytes src/couch/test/fixtures/couch_stats_aggregates.cfg | 19 - src/couch/test/fixtures/couch_stats_aggregates.ini | 20 - src/couch/test/fixtures/db_non_partitioned.couch | Bin 12479 -> 0 bytes .../test/fixtures/db_v6_with_1_purge_req.couch | Bin 12470 -> 0 bytes .../db_v6_with_1_purge_req_for_2_docs.couch | Bin 16557 -> 0 bytes .../test/fixtures/db_v6_with_2_purge_req.couch | Bin 16566 -> 0 bytes .../test/fixtures/db_v6_without_purge_req.couch | Bin 61644 -> 0 bytes .../test/fixtures/db_v7_with_1_purge_req.couch | Bin 16617 -> 0 bytes .../db_v7_with_1_purge_req_for_2_docs.couch | Bin 20705 -> 0 bytes .../test/fixtures/db_v7_with_2_purge_req.couch | Bin 20713 -> 0 bytes .../test/fixtures/db_v7_without_purge_req.couch | Bin 65781 -> 0 bytes src/couch/test/fixtures/logo.png | Bin 3010 -> 0 bytes src/couch/test/fixtures/multipart.http | 13 - src/couch/test/fixtures/os_daemon_bad_perm.sh | 17 - src/couch/test/fixtures/os_daemon_can_reboot.sh | 15 - src/couch/test/fixtures/os_daemon_configer.escript | 97 -- src/couch/test/fixtures/os_daemon_die_on_boot.sh | 15 - src/couch/test/fixtures/os_daemon_die_quickly.sh | 15 - src/couch/test/fixtures/os_daemon_looper.escript | 26 - src/couch/test/fixtures/test.couch | Bin 16482 -> 0 bytes src/couch/test/global_changes_tests.erl | 159 ---- src/couch/test/json_stream_parse_tests.erl | 151 --- src/couch/test/test_web.erl | 114 --- src/couch_epi/test/couch_epi_basic_test.erl | 137 --- src/couch_epi/test/couch_epi_tests.erl | 690 -------------- src/couch_epi/test/eunit/couch_epi_basic_test.erl | 137 +++ src/couch_epi/test/eunit/couch_epi_tests.erl | 690 ++++++++++++++ src/couch_epi/test/eunit/fixtures/app_data1.cfg | 4 + src/couch_epi/test/eunit/fixtures/app_data2.cfg | 8 + src/couch_epi/test/fixtures/app_data1.cfg | 4 - src/couch_epi/test/fixtures/app_data2.cfg | 8 - .../test/couch_index_compaction_tests.erl | 106 --- .../test/couch_index_ddoc_updated_tests.erl | 145 --- .../test/eunit/couch_index_compaction_tests.erl | 106 +++ .../test/eunit/couch_index_ddoc_updated_tests.erl | 145 +++ .../test/couch_log_config_listener_test.erl | 82 -- src/couch_log/test/couch_log_config_test.erl | 110 --- .../test/couch_log_error_logger_h_test.erl | 45 - src/couch_log/test/couch_log_formatter_test.erl | 875 ----------------- src/couch_log/test/couch_log_monitor_test.erl | 67 -- src/couch_log/test/couch_log_server_test.erl | 118 --- src/couch_log/test/couch_log_test.erl | 84 -- src/couch_log/test/couch_log_test_util.erl | 168 ---- src/couch_log/test/couch_log_trunc_io_fmt_test.erl | 92 -- src/couch_log/test/couch_log_util_test.erl | 55 -- src/couch_log/test/couch_log_writer_ets.erl | 49 - src/couch_log/test/couch_log_writer_file_test.erl | 169 ---- .../test/couch_log_writer_stderr_test.erl | 58 -- .../test/couch_log_writer_syslog_test.erl | 122 --- src/couch_log/test/couch_log_writer_test.erl | 54 -- .../test/eunit/couch_log_config_listener_test.erl | 82 ++ src/couch_log/test/eunit/couch_log_config_test.erl | 110 +++ .../test/eunit/couch_log_error_logger_h_test.erl | 45 + .../test/eunit/couch_log_formatter_test.erl | 875 +++++++++++++++++ .../test/eunit/couch_log_monitor_test.erl | 67 ++ src/couch_log/test/eunit/couch_log_server_test.erl | 118 +++ src/couch_log/test/eunit/couch_log_test.erl | 84 ++ src/couch_log/test/eunit/couch_log_test_util.erl | 168 ++++ .../test/eunit/couch_log_trunc_io_fmt_test.erl | 92 ++ src/couch_log/test/eunit/couch_log_util_test.erl | 55 ++ src/couch_log/test/eunit/couch_log_writer_ets.erl | 49 + .../test/eunit/couch_log_writer_file_test.erl | 169 ++++ .../test/eunit/couch_log_writer_stderr_test.erl | 58 ++ .../test/eunit/couch_log_writer_syslog_test.erl | 122 +++ src/couch_log/test/eunit/couch_log_writer_test.erl | 54 ++ .../test/couch_mrview_all_docs_tests.erl | 140 --- .../test/couch_mrview_changes_since_tests.erl | 210 ---- .../test/couch_mrview_collation_tests.erl | 207 ---- .../test/couch_mrview_compact_tests.erl | 115 --- .../test/couch_mrview_ddoc_updated_tests.erl | 145 --- .../test/couch_mrview_ddoc_validation_tests.erl | 422 --------- .../test/couch_mrview_design_docs_tests.erl | 136 --- src/couch_mrview/test/couch_mrview_http_tests.erl | 28 - .../test/couch_mrview_index_changes_tests.erl | 224 ----- .../test/couch_mrview_index_info_tests.erl | 121 --- .../test/couch_mrview_local_docs_tests.erl | 148 --- .../test/couch_mrview_map_views_tests.erl | 144 --- .../test/couch_mrview_purge_docs_fabric_tests.erl | 276 ------ .../test/couch_mrview_purge_docs_tests.erl | 575 ----------- .../test/couch_mrview_red_views_tests.erl | 95 -- src/couch_mrview/test/couch_mrview_util_tests.erl | 39 - .../test/eunit/couch_mrview_all_docs_tests.erl | 140 +++ .../eunit/couch_mrview_changes_since_tests.erl | 210 ++++ .../test/eunit/couch_mrview_collation_tests.erl | 207 ++++ .../test/eunit/couch_mrview_compact_tests.erl | 115 +++ .../test/eunit/couch_mrview_ddoc_updated_tests.erl | 145 +++ .../eunit/couch_mrview_ddoc_validation_tests.erl | 422 +++++++++ .../test/eunit/couch_mrview_design_docs_tests.erl | 136 +++ .../test/eunit/couch_mrview_http_tests.erl | 28 + .../eunit/couch_mrview_index_changes_tests.erl | 224 +++++ .../test/eunit/couch_mrview_index_info_tests.erl | 121 +++ .../test/eunit/couch_mrview_local_docs_tests.erl | 148 +++ .../test/eunit/couch_mrview_map_views_tests.erl | 144 +++ .../eunit/couch_mrview_purge_docs_fabric_tests.erl | 276 ++++++ .../test/eunit/couch_mrview_purge_docs_tests.erl | 575 +++++++++++ .../test/eunit/couch_mrview_red_views_tests.erl | 95 ++ .../test/eunit/couch_mrview_util_tests.erl | 39 + src/couch_peruser/test/couch_peruser_test.erl | 519 ---------- .../test/eunit/couch_peruser_test.erl | 519 ++++++++++ .../couch_replicator_attachments_too_large.erl | 104 -- .../test/couch_replicator_compact_tests.erl | 455 --------- .../test/couch_replicator_connection_tests.erl | 241 ----- ...replicator_create_target_with_options_tests.erl | 143 --- .../test/couch_replicator_filtered_tests.erl | 244 ----- .../test/couch_replicator_httpc_pool_tests.erl | 174 ---- .../test/couch_replicator_id_too_long_tests.erl | 94 -- .../test/couch_replicator_large_atts_tests.erl | 124 --- .../test/couch_replicator_many_leaves_tests.erl | 206 ---- .../test/couch_replicator_missing_stubs_tests.erl | 157 --- .../test/couch_replicator_proxy_tests.erl | 69 -- .../test/couch_replicator_rate_limiter_tests.erl | 89 -- ...ch_replicator_retain_stats_between_job_runs.erl | 160 ---- .../test/couch_replicator_selector_tests.erl | 121 --- ...ch_replicator_small_max_request_size_target.erl | 188 ---- .../test/couch_replicator_test_helper.erl | 135 --- .../couch_replicator_use_checkpoints_tests.erl | 181 ---- .../couch_replicator_attachments_too_large.erl | 104 ++ .../test/eunit/couch_replicator_compact_tests.erl | 455 +++++++++ .../eunit/couch_replicator_connection_tests.erl | 241 +++++ ...replicator_create_target_with_options_tests.erl | 143 +++ .../test/eunit/couch_replicator_filtered_tests.erl | 244 +++++ .../eunit/couch_replicator_httpc_pool_tests.erl | 174 ++++ .../eunit/couch_replicator_id_too_long_tests.erl | 94 ++ .../eunit/couch_replicator_large_atts_tests.erl | 124 +++ .../eunit/couch_replicator_many_leaves_tests.erl | 206 ++++ .../eunit/couch_replicator_missing_stubs_tests.erl | 157 +++ .../test/eunit/couch_replicator_proxy_tests.erl | 69 ++ .../eunit/couch_replicator_rate_limiter_tests.erl | 89 ++ ...ch_replicator_retain_stats_between_job_runs.erl | 160 ++++ .../test/eunit/couch_replicator_selector_tests.erl | 121 +++ ...ch_replicator_small_max_request_size_target.erl | 188 ++++ .../test/eunit/couch_replicator_test_helper.erl | 135 +++ .../couch_replicator_use_checkpoints_tests.erl | 181 ++++ src/ddoc_cache/test/ddoc_cache_basic_test.erl | 175 ---- src/ddoc_cache/test/ddoc_cache_coverage_test.erl | 77 -- src/ddoc_cache/test/ddoc_cache_disabled_test.erl | 62 -- src/ddoc_cache/test/ddoc_cache_entry_test.erl | 159 ---- src/ddoc_cache/test/ddoc_cache_ev.erl | 21 - src/ddoc_cache/test/ddoc_cache_eviction_test.erl | 96 -- src/ddoc_cache/test/ddoc_cache_lru_test.erl | 216 ----- src/ddoc_cache/test/ddoc_cache_no_cache_test.erl | 79 -- src/ddoc_cache/test/ddoc_cache_open_error_test.erl | 46 - src/ddoc_cache/test/ddoc_cache_open_test.erl | 107 --- src/ddoc_cache/test/ddoc_cache_opener_test.erl | 33 - src/ddoc_cache/test/ddoc_cache_refresh_test.erl | 174 ---- src/ddoc_cache/test/ddoc_cache_remove_test.erl | 224 ----- src/ddoc_cache/test/ddoc_cache_test.hrl | 26 - src/ddoc_cache/test/ddoc_cache_tutil.erl | 111 --- .../test/eunit/ddoc_cache_basic_test.erl | 175 ++++ .../test/eunit/ddoc_cache_coverage_test.erl | 77 ++ .../test/eunit/ddoc_cache_disabled_test.erl | 62 ++ .../test/eunit/ddoc_cache_entry_test.erl | 159 ++++ src/ddoc_cache/test/eunit/ddoc_cache_ev.erl | 21 + .../test/eunit/ddoc_cache_eviction_test.erl | 96 ++ src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl | 216 +++++ .../test/eunit/ddoc_cache_no_cache_test.erl | 79 ++ .../test/eunit/ddoc_cache_open_error_test.erl | 46 + src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl | 107 +++ .../test/eunit/ddoc_cache_opener_test.erl | 33 + .../test/eunit/ddoc_cache_refresh_test.erl | 174 ++++ .../test/eunit/ddoc_cache_remove_test.erl | 224 +++++ src/ddoc_cache/test/eunit/ddoc_cache_test.hrl | 26 + src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl | 111 +++ src/fabric/test/eunit/fabric_rpc_purge_tests.erl | 307 ++++++ src/fabric/test/fabric_rpc_purge_tests.erl | 307 ------ .../test/eunit/global_changes_hooks_tests.erl | 156 +++ .../test/global_changes_hooks_tests.erl | 156 --- src/mem3/test/eunit/mem3_cluster_test.erl | 133 +++ src/mem3/test/eunit/mem3_hash_test.erl | 23 + src/mem3/test/eunit/mem3_rep_test.erl | 321 +++++++ src/mem3/test/eunit/mem3_reshard_api_test.erl | 847 +++++++++++++++++ .../test/eunit/mem3_reshard_changes_feed_test.erl | 389 ++++++++ src/mem3/test/eunit/mem3_reshard_test.erl | 805 ++++++++++++++++ src/mem3/test/eunit/mem3_ring_prop_tests.erl | 144 +++ src/mem3/test/eunit/mem3_seeds_test.erl | 69 ++ src/mem3/test/eunit/mem3_sync_security_test.erl | 54 ++ src/mem3/test/eunit/mem3_util_test.erl | 130 +++ src/mem3/test/mem3_cluster_test.erl | 133 --- src/mem3/test/mem3_hash_test.erl | 23 - src/mem3/test/mem3_rep_test.erl | 321 ------- src/mem3/test/mem3_reshard_api_test.erl | 847 ----------------- src/mem3/test/mem3_reshard_changes_feed_test.erl | 389 -------- src/mem3/test/mem3_reshard_test.erl | 805 ---------------- src/mem3/test/mem3_ring_prop_tests.erl | 144 --- src/mem3/test/mem3_seeds_test.erl | 69 -- src/mem3/test/mem3_sync_security_test.erl | 54 -- src/mem3/test/mem3_util_test.erl | 130 --- 349 files changed, 31209 insertions(+), 31209 deletions(-) delete mode 100644 src/chttpd/test/chttpd_cors_test.erl delete mode 100644 src/chttpd/test/chttpd_csp_tests.erl delete mode 100644 src/chttpd/test/chttpd_db_attachment_size_tests.erl delete mode 100644 src/chttpd/test/chttpd_db_bulk_get_multipart_test.erl delete mode 100644 src/chttpd/test/chttpd_db_bulk_get_test.erl delete mode 100644 src/chttpd/test/chttpd_db_doc_size_tests.erl delete mode 100644 src/chttpd/test/chttpd_db_test.erl delete mode 100644 src/chttpd/test/chttpd_dbs_info_test.erl delete mode 100644 src/chttpd/test/chttpd_error_info_tests.erl delete mode 100644 src/chttpd/test/chttpd_handlers_tests.erl delete mode 100644 src/chttpd/test/chttpd_open_revs_error_test.erl delete mode 100644 src/chttpd/test/chttpd_plugin_tests.erl delete mode 100644 src/chttpd/test/chttpd_prefer_header_test.erl delete mode 100644 src/chttpd/test/chttpd_purge_tests.erl delete mode 100644 src/chttpd/test/chttpd_security_tests.erl delete mode 100644 src/chttpd/test/chttpd_socket_buffer_size_test.erl delete mode 100644 src/chttpd/test/chttpd_view_test.erl delete mode 100644 src/chttpd/test/chttpd_welcome_test.erl delete mode 100644 src/chttpd/test/chttpd_xframe_test.erl create mode 100644 src/chttpd/test/eunit/chttpd_cors_test.erl create mode 100644 src/chttpd/test/eunit/chttpd_csp_tests.erl create mode 100644 src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl create mode 100644 src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl create mode 100644 src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl create mode 100644 src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl create mode 100644 src/chttpd/test/eunit/chttpd_db_test.erl create mode 100644 src/chttpd/test/eunit/chttpd_dbs_info_test.erl create mode 100644 src/chttpd/test/eunit/chttpd_error_info_tests.erl create mode 100644 src/chttpd/test/eunit/chttpd_handlers_tests.erl create mode 100644 src/chttpd/test/eunit/chttpd_open_revs_error_test.erl create mode 100644 src/chttpd/test/eunit/chttpd_plugin_tests.erl create mode 100644 src/chttpd/test/eunit/chttpd_prefer_header_test.erl create mode 100644 src/chttpd/test/eunit/chttpd_purge_tests.erl create mode 100644 src/chttpd/test/eunit/chttpd_security_tests.erl create mode 100644 src/chttpd/test/eunit/chttpd_socket_buffer_size_test.erl create mode 100644 src/chttpd/test/eunit/chttpd_view_test.erl create mode 100644 src/chttpd/test/eunit/chttpd_welcome_test.erl create mode 100644 src/chttpd/test/eunit/chttpd_xframe_test.erl delete mode 100644 src/couch/test/chttpd_endpoints_tests.erl delete mode 100644 src/couch/test/couch_auth_cache_tests.erl delete mode 100644 src/couch/test/couch_base32_tests.erl delete mode 100644 src/couch/test/couch_bt_engine_compactor_tests.erl delete mode 100644 src/couch/test/couch_bt_engine_tests.erl delete mode 100644 src/couch/test/couch_bt_engine_upgrade_tests.erl delete mode 100644 src/couch/test/couch_btree_tests.erl delete mode 100644 src/couch/test/couch_changes_tests.erl delete mode 100644 src/couch/test/couch_compress_tests.erl delete mode 100644 src/couch/test/couch_db_doc_tests.erl delete mode 100644 src/couch/test/couch_db_mpr_tests.erl delete mode 100644 src/couch/test/couch_db_plugin_tests.erl delete mode 100644 src/couch/test/couch_db_props_upgrade_tests.erl delete mode 100644 src/couch/test/couch_db_split_tests.erl delete mode 100644 src/couch/test/couch_db_tests.erl delete mode 100644 src/couch/test/couch_doc_json_tests.erl delete mode 100644 src/couch/test/couch_doc_tests.erl delete mode 100644 src/couch/test/couch_ejson_size_tests.erl delete mode 100644 src/couch/test/couch_etag_tests.erl delete mode 100644 src/couch/test/couch_file_tests.erl delete mode 100644 src/couch/test/couch_flags_config_tests.erl delete mode 100644 src/couch/test/couch_flags_tests.erl delete mode 100644 src/couch/test/couch_hotp_tests.erl delete mode 100644 src/couch/test/couch_index_tests.erl delete mode 100644 src/couch/test/couch_key_tree_prop_tests.erl delete mode 100644 src/couch/test/couch_key_tree_tests.erl delete mode 100644 src/couch/test/couch_passwords_tests.erl delete mode 100644 src/couch/test/couch_query_servers_tests.erl delete mode 100644 src/couch/test/couch_server_tests.erl delete mode 100644 src/couch/test/couch_stream_tests.erl delete mode 100644 src/couch/test/couch_task_status_tests.erl delete mode 100644 src/couch/test/couch_totp_tests.erl delete mode 100644 src/couch/test/couch_util_tests.erl delete mode 100644 src/couch/test/couch_uuids_tests.erl delete mode 100644 src/couch/test/couch_work_queue_tests.erl delete mode 100644 src/couch/test/couchdb_attachments_tests.erl delete mode 100644 src/couch/test/couchdb_auth_tests.erl delete mode 100755 src/couch/test/couchdb_cookie_domain_tests.erl delete mode 100644 src/couch/test/couchdb_cors_tests.erl delete mode 100644 src/couch/test/couchdb_db_tests.erl delete mode 100644 src/couch/test/couchdb_design_doc_tests.erl delete mode 100644 src/couch/test/couchdb_file_compression_tests.erl delete mode 100644 src/couch/test/couchdb_location_header_tests.erl delete mode 100644 src/couch/test/couchdb_mrview_cors_tests.erl delete mode 100644 src/couch/test/couchdb_mrview_tests.erl delete mode 100644 src/couch/test/couchdb_os_proc_pool.erl delete mode 100644 src/couch/test/couchdb_update_conflicts_tests.erl delete mode 100644 src/couch/test/couchdb_vhosts_tests.erl delete mode 100644 src/couch/test/couchdb_views_tests.erl create mode 100644 src/couch/test/eunit/chttpd_endpoints_tests.erl create mode 100644 src/couch/test/eunit/couch_auth_cache_tests.erl create mode 100644 src/couch/test/eunit/couch_base32_tests.erl create mode 100644 src/couch/test/eunit/couch_bt_engine_compactor_tests.erl create mode 100644 src/couch/test/eunit/couch_bt_engine_tests.erl create mode 100644 src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl create mode 100644 src/couch/test/eunit/couch_btree_tests.erl create mode 100644 src/couch/test/eunit/couch_changes_tests.erl create mode 100644 src/couch/test/eunit/couch_compress_tests.erl create mode 100644 src/couch/test/eunit/couch_db_doc_tests.erl create mode 100644 src/couch/test/eunit/couch_db_mpr_tests.erl create mode 100644 src/couch/test/eunit/couch_db_plugin_tests.erl create mode 100644 src/couch/test/eunit/couch_db_props_upgrade_tests.erl create mode 100644 src/couch/test/eunit/couch_db_split_tests.erl create mode 100644 src/couch/test/eunit/couch_db_tests.erl create mode 100644 src/couch/test/eunit/couch_doc_json_tests.erl create mode 100644 src/couch/test/eunit/couch_doc_tests.erl create mode 100644 src/couch/test/eunit/couch_ejson_size_tests.erl create mode 100644 src/couch/test/eunit/couch_etag_tests.erl create mode 100644 src/couch/test/eunit/couch_file_tests.erl create mode 100644 src/couch/test/eunit/couch_flags_config_tests.erl create mode 100644 src/couch/test/eunit/couch_flags_tests.erl create mode 100644 src/couch/test/eunit/couch_hotp_tests.erl create mode 100644 src/couch/test/eunit/couch_index_tests.erl create mode 100644 src/couch/test/eunit/couch_key_tree_prop_tests.erl create mode 100644 src/couch/test/eunit/couch_key_tree_tests.erl create mode 100644 src/couch/test/eunit/couch_passwords_tests.erl create mode 100644 src/couch/test/eunit/couch_query_servers_tests.erl create mode 100644 src/couch/test/eunit/couch_server_tests.erl create mode 100644 src/couch/test/eunit/couch_stream_tests.erl create mode 100644 src/couch/test/eunit/couch_task_status_tests.erl create mode 100644 src/couch/test/eunit/couch_totp_tests.erl create mode 100644 src/couch/test/eunit/couch_util_tests.erl create mode 100644 src/couch/test/eunit/couch_uuids_tests.erl create mode 100644 src/couch/test/eunit/couch_work_queue_tests.erl create mode 100644 src/couch/test/eunit/couchdb_attachments_tests.erl create mode 100644 src/couch/test/eunit/couchdb_auth_tests.erl create mode 100755 src/couch/test/eunit/couchdb_cookie_domain_tests.erl create mode 100644 src/couch/test/eunit/couchdb_cors_tests.erl create mode 100644 src/couch/test/eunit/couchdb_db_tests.erl create mode 100644 src/couch/test/eunit/couchdb_design_doc_tests.erl create mode 100644 src/couch/test/eunit/couchdb_file_compression_tests.erl create mode 100644 src/couch/test/eunit/couchdb_location_header_tests.erl create mode 100644 src/couch/test/eunit/couchdb_mrview_cors_tests.erl create mode 100644 src/couch/test/eunit/couchdb_mrview_tests.erl create mode 100644 src/couch/test/eunit/couchdb_os_proc_pool.erl create mode 100644 src/couch/test/eunit/couchdb_update_conflicts_tests.erl create mode 100644 src/couch/test/eunit/couchdb_vhosts_tests.erl create mode 100644 src/couch/test/eunit/couchdb_views_tests.erl create mode 100644 src/couch/test/eunit/fixtures/3b835456c235b1827e012e25666152f3.view create mode 100644 src/couch/test/eunit/fixtures/couch_stats_aggregates.cfg create mode 100644 src/couch/test/eunit/fixtures/couch_stats_aggregates.ini create mode 100644 src/couch/test/eunit/fixtures/db_non_partitioned.couch create mode 100644 src/couch/test/eunit/fixtures/db_v6_with_1_purge_req.couch create mode 100644 src/couch/test/eunit/fixtures/db_v6_with_1_purge_req_for_2_docs.couch create mode 100644 src/couch/test/eunit/fixtures/db_v6_with_2_purge_req.couch create mode 100644 src/couch/test/eunit/fixtures/db_v6_without_purge_req.couch create mode 100644 src/couch/test/eunit/fixtures/db_v7_with_1_purge_req.couch create mode 100644 src/couch/test/eunit/fixtures/db_v7_with_1_purge_req_for_2_docs.couch create mode 100644 src/couch/test/eunit/fixtures/db_v7_with_2_purge_req.couch create mode 100644 src/couch/test/eunit/fixtures/db_v7_without_purge_req.couch create mode 100644 src/couch/test/eunit/fixtures/logo.png create mode 100644 src/couch/test/eunit/fixtures/multipart.http create mode 100644 src/couch/test/eunit/fixtures/os_daemon_bad_perm.sh create mode 100755 src/couch/test/eunit/fixtures/os_daemon_can_reboot.sh create mode 100755 src/couch/test/eunit/fixtures/os_daemon_configer.escript create mode 100755 src/couch/test/eunit/fixtures/os_daemon_die_on_boot.sh create mode 100755 src/couch/test/eunit/fixtures/os_daemon_die_quickly.sh create mode 100755 src/couch/test/eunit/fixtures/os_daemon_looper.escript create mode 100644 src/couch/test/eunit/fixtures/test.couch create mode 100644 src/couch/test/eunit/global_changes_tests.erl create mode 100644 src/couch/test/eunit/json_stream_parse_tests.erl create mode 100644 src/couch/test/eunit/test_web.erl delete mode 100644 src/couch/test/fixtures/3b835456c235b1827e012e25666152f3.view delete mode 100644 src/couch/test/fixtures/couch_stats_aggregates.cfg delete mode 100644 src/couch/test/fixtures/couch_stats_aggregates.ini delete mode 100644 src/couch/test/fixtures/db_non_partitioned.couch delete mode 100644 src/couch/test/fixtures/db_v6_with_1_purge_req.couch delete mode 100644 src/couch/test/fixtures/db_v6_with_1_purge_req_for_2_docs.couch delete mode 100644 src/couch/test/fixtures/db_v6_with_2_purge_req.couch delete mode 100644 src/couch/test/fixtures/db_v6_without_purge_req.couch delete mode 100644 src/couch/test/fixtures/db_v7_with_1_purge_req.couch delete mode 100644 src/couch/test/fixtures/db_v7_with_1_purge_req_for_2_docs.couch delete mode 100644 src/couch/test/fixtures/db_v7_with_2_purge_req.couch delete mode 100644 src/couch/test/fixtures/db_v7_without_purge_req.couch delete mode 100644 src/couch/test/fixtures/logo.png delete mode 100644 src/couch/test/fixtures/multipart.http delete mode 100644 src/couch/test/fixtures/os_daemon_bad_perm.sh delete mode 100755 src/couch/test/fixtures/os_daemon_can_reboot.sh delete mode 100755 src/couch/test/fixtures/os_daemon_configer.escript delete mode 100755 src/couch/test/fixtures/os_daemon_die_on_boot.sh delete mode 100755 src/couch/test/fixtures/os_daemon_die_quickly.sh delete mode 100755 src/couch/test/fixtures/os_daemon_looper.escript delete mode 100644 src/couch/test/fixtures/test.couch delete mode 100644 src/couch/test/global_changes_tests.erl delete mode 100644 src/couch/test/json_stream_parse_tests.erl delete mode 100644 src/couch/test/test_web.erl delete mode 100644 src/couch_epi/test/couch_epi_basic_test.erl delete mode 100644 src/couch_epi/test/couch_epi_tests.erl create mode 100644 src/couch_epi/test/eunit/couch_epi_basic_test.erl create mode 100644 src/couch_epi/test/eunit/couch_epi_tests.erl create mode 100644 src/couch_epi/test/eunit/fixtures/app_data1.cfg create mode 100644 src/couch_epi/test/eunit/fixtures/app_data2.cfg delete mode 100644 src/couch_epi/test/fixtures/app_data1.cfg delete mode 100644 src/couch_epi/test/fixtures/app_data2.cfg delete mode 100644 src/couch_index/test/couch_index_compaction_tests.erl delete mode 100644 src/couch_index/test/couch_index_ddoc_updated_tests.erl create mode 100644 src/couch_index/test/eunit/couch_index_compaction_tests.erl create mode 100644 src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl delete mode 100644 src/couch_log/test/couch_log_config_listener_test.erl delete mode 100644 src/couch_log/test/couch_log_config_test.erl delete mode 100644 src/couch_log/test/couch_log_error_logger_h_test.erl delete mode 100644 src/couch_log/test/couch_log_formatter_test.erl delete mode 100644 src/couch_log/test/couch_log_monitor_test.erl delete mode 100644 src/couch_log/test/couch_log_server_test.erl delete mode 100644 src/couch_log/test/couch_log_test.erl delete mode 100644 src/couch_log/test/couch_log_test_util.erl delete mode 100644 src/couch_log/test/couch_log_trunc_io_fmt_test.erl delete mode 100644 src/couch_log/test/couch_log_util_test.erl delete mode 100644 src/couch_log/test/couch_log_writer_ets.erl delete mode 100644 src/couch_log/test/couch_log_writer_file_test.erl delete mode 100644 src/couch_log/test/couch_log_writer_stderr_test.erl delete mode 100644 src/couch_log/test/couch_log_writer_syslog_test.erl delete mode 100644 src/couch_log/test/couch_log_writer_test.erl create mode 100644 src/couch_log/test/eunit/couch_log_config_listener_test.erl create mode 100644 src/couch_log/test/eunit/couch_log_config_test.erl create mode 100644 src/couch_log/test/eunit/couch_log_error_logger_h_test.erl create mode 100644 src/couch_log/test/eunit/couch_log_formatter_test.erl create mode 100644 src/couch_log/test/eunit/couch_log_monitor_test.erl create mode 100644 src/couch_log/test/eunit/couch_log_server_test.erl create mode 100644 src/couch_log/test/eunit/couch_log_test.erl create mode 100644 src/couch_log/test/eunit/couch_log_test_util.erl create mode 100644 src/couch_log/test/eunit/couch_log_trunc_io_fmt_test.erl create mode 100644 src/couch_log/test/eunit/couch_log_util_test.erl create mode 100644 src/couch_log/test/eunit/couch_log_writer_ets.erl create mode 100644 src/couch_log/test/eunit/couch_log_writer_file_test.erl create mode 100644 src/couch_log/test/eunit/couch_log_writer_stderr_test.erl create mode 100644 src/couch_log/test/eunit/couch_log_writer_syslog_test.erl create mode 100644 src/couch_log/test/eunit/couch_log_writer_test.erl delete mode 100644 src/couch_mrview/test/couch_mrview_all_docs_tests.erl delete mode 100644 src/couch_mrview/test/couch_mrview_changes_since_tests.erl delete mode 100644 src/couch_mrview/test/couch_mrview_collation_tests.erl delete mode 100644 src/couch_mrview/test/couch_mrview_compact_tests.erl delete mode 100644 src/couch_mrview/test/couch_mrview_ddoc_updated_tests.erl delete mode 100644 src/couch_mrview/test/couch_mrview_ddoc_validation_tests.erl delete mode 100644 src/couch_mrview/test/couch_mrview_design_docs_tests.erl delete mode 100644 src/couch_mrview/test/couch_mrview_http_tests.erl delete mode 100644 src/couch_mrview/test/couch_mrview_index_changes_tests.erl delete mode 100644 src/couch_mrview/test/couch_mrview_index_info_tests.erl delete mode 100644 src/couch_mrview/test/couch_mrview_local_docs_tests.erl delete mode 100644 src/couch_mrview/test/couch_mrview_map_views_tests.erl delete mode 100644 src/couch_mrview/test/couch_mrview_purge_docs_fabric_tests.erl delete mode 100644 src/couch_mrview/test/couch_mrview_purge_docs_tests.erl delete mode 100644 src/couch_mrview/test/couch_mrview_red_views_tests.erl delete mode 100644 src/couch_mrview/test/couch_mrview_util_tests.erl create mode 100644 src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl create mode 100644 src/couch_mrview/test/eunit/couch_mrview_changes_since_tests.erl create mode 100644 src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl create mode 100644 src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl create mode 100644 src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl create mode 100644 src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl create mode 100644 src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl create mode 100644 src/couch_mrview/test/eunit/couch_mrview_http_tests.erl create mode 100644 src/couch_mrview/test/eunit/couch_mrview_index_changes_tests.erl create mode 100644 src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl create mode 100644 src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl create mode 100644 src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl create mode 100644 src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl create mode 100644 src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl create mode 100644 src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl create mode 100644 src/couch_mrview/test/eunit/couch_mrview_util_tests.erl delete mode 100644 src/couch_peruser/test/couch_peruser_test.erl create mode 100644 src/couch_peruser/test/eunit/couch_peruser_test.erl delete mode 100644 src/couch_replicator/test/couch_replicator_attachments_too_large.erl delete mode 100644 src/couch_replicator/test/couch_replicator_compact_tests.erl delete mode 100644 src/couch_replicator/test/couch_replicator_connection_tests.erl delete mode 100644 src/couch_replicator/test/couch_replicator_create_target_with_options_tests.erl delete mode 100644 src/couch_replicator/test/couch_replicator_filtered_tests.erl delete mode 100644 src/couch_replicator/test/couch_replicator_httpc_pool_tests.erl delete mode 100644 src/couch_replicator/test/couch_replicator_id_too_long_tests.erl delete mode 100644 src/couch_replicator/test/couch_replicator_large_atts_tests.erl delete mode 100644 src/couch_replicator/test/couch_replicator_many_leaves_tests.erl delete mode 100644 src/couch_replicator/test/couch_replicator_missing_stubs_tests.erl delete mode 100644 src/couch_replicator/test/couch_replicator_proxy_tests.erl delete mode 100644 src/couch_replicator/test/couch_replicator_rate_limiter_tests.erl delete mode 100644 src/couch_replicator/test/couch_replicator_retain_stats_between_job_runs.erl delete mode 100644 src/couch_replicator/test/couch_replicator_selector_tests.erl delete mode 100644 src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl delete mode 100644 src/couch_replicator/test/couch_replicator_test_helper.erl delete mode 100644 src/couch_replicator/test/couch_replicator_use_checkpoints_tests.erl create mode 100644 src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl create mode 100644 src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl create mode 100644 src/couch_replicator/test/eunit/couch_replicator_connection_tests.erl create mode 100644 src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl create mode 100644 src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl create mode 100644 src/couch_replicator/test/eunit/couch_replicator_httpc_pool_tests.erl create mode 100644 src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl create mode 100644 src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl create mode 100644 src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl create mode 100644 src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl create mode 100644 src/couch_replicator/test/eunit/couch_replicator_proxy_tests.erl create mode 100644 src/couch_replicator/test/eunit/couch_replicator_rate_limiter_tests.erl create mode 100644 src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl create mode 100644 src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl create mode 100644 src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl create mode 100644 src/couch_replicator/test/eunit/couch_replicator_test_helper.erl create mode 100644 src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl delete mode 100644 src/ddoc_cache/test/ddoc_cache_basic_test.erl delete mode 100644 src/ddoc_cache/test/ddoc_cache_coverage_test.erl delete mode 100644 src/ddoc_cache/test/ddoc_cache_disabled_test.erl delete mode 100644 src/ddoc_cache/test/ddoc_cache_entry_test.erl delete mode 100644 src/ddoc_cache/test/ddoc_cache_ev.erl delete mode 100644 src/ddoc_cache/test/ddoc_cache_eviction_test.erl delete mode 100644 src/ddoc_cache/test/ddoc_cache_lru_test.erl delete mode 100644 src/ddoc_cache/test/ddoc_cache_no_cache_test.erl delete mode 100644 src/ddoc_cache/test/ddoc_cache_open_error_test.erl delete mode 100644 src/ddoc_cache/test/ddoc_cache_open_test.erl delete mode 100644 src/ddoc_cache/test/ddoc_cache_opener_test.erl delete mode 100644 src/ddoc_cache/test/ddoc_cache_refresh_test.erl delete mode 100644 src/ddoc_cache/test/ddoc_cache_remove_test.erl delete mode 100644 src/ddoc_cache/test/ddoc_cache_test.hrl delete mode 100644 src/ddoc_cache/test/ddoc_cache_tutil.erl create mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl create mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl create mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl create mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl create mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_ev.erl create mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl create mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl create mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl create mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl create mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl create mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_opener_test.erl create mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl create mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl create mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_test.hrl create mode 100644 src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl create mode 100644 src/fabric/test/eunit/fabric_rpc_purge_tests.erl delete mode 100644 src/fabric/test/fabric_rpc_purge_tests.erl create mode 100644 src/global_changes/test/eunit/global_changes_hooks_tests.erl delete mode 100644 src/global_changes/test/global_changes_hooks_tests.erl create mode 100644 src/mem3/test/eunit/mem3_cluster_test.erl create mode 100644 src/mem3/test/eunit/mem3_hash_test.erl create mode 100644 src/mem3/test/eunit/mem3_rep_test.erl create mode 100644 src/mem3/test/eunit/mem3_reshard_api_test.erl create mode 100644 src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl create mode 100644 src/mem3/test/eunit/mem3_reshard_test.erl create mode 100644 src/mem3/test/eunit/mem3_ring_prop_tests.erl create mode 100644 src/mem3/test/eunit/mem3_seeds_test.erl create mode 100644 src/mem3/test/eunit/mem3_sync_security_test.erl create mode 100644 src/mem3/test/eunit/mem3_util_test.erl delete mode 100644 src/mem3/test/mem3_cluster_test.erl delete mode 100644 src/mem3/test/mem3_hash_test.erl delete mode 100644 src/mem3/test/mem3_rep_test.erl delete mode 100644 src/mem3/test/mem3_reshard_api_test.erl delete mode 100644 src/mem3/test/mem3_reshard_changes_feed_test.erl delete mode 100644 src/mem3/test/mem3_reshard_test.erl delete mode 100644 src/mem3/test/mem3_ring_prop_tests.erl delete mode 100644 src/mem3/test/mem3_seeds_test.erl delete mode 100644 src/mem3/test/mem3_sync_security_test.erl delete mode 100644 src/mem3/test/mem3_util_test.erl diff --git a/src/chttpd/test/chttpd_cors_test.erl b/src/chttpd/test/chttpd_cors_test.erl deleted file mode 100644 index 19e851561..000000000 --- a/src/chttpd/test/chttpd_cors_test.erl +++ /dev/null @@ -1,564 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(chttpd_cors_test). - - --include_lib("couch/include/couch_db.hrl"). --include_lib("eunit/include/eunit.hrl"). --include_lib("chttpd/include/chttpd_cors.hrl"). - - --define(DEFAULT_ORIGIN, "http://example.com"). --define(DEFAULT_ORIGIN_HTTPS, "https://example.com"). --define(EXPOSED_HEADERS, - "content-type, accept-ranges, etag, server, x-couch-request-id, " ++ - "x-couch-update-newrev, x-couchdb-body-time"). - --define(CUSTOM_SUPPORTED_METHODS, ?SUPPORTED_METHODS -- ["CONNECT"]). --define(CUSTOM_SUPPORTED_HEADERS, ["extra" | ?SUPPORTED_HEADERS -- ["pragma"]]). --define(CUSTOM_EXPOSED_HEADERS, ["expose" | ?COUCH_HEADERS]). - --define(CUSTOM_MAX_AGE, round(?CORS_DEFAULT_MAX_AGE / 2)). - -%% Test helpers - - -empty_cors_config() -> - []. - - -minimal_cors_config() -> - [ - {<<"enable_cors">>, true}, - {<<"origins">>, {[]}} - ]. - - -simple_cors_config() -> - [ - {<<"enable_cors">>, true}, - {<<"origins">>, {[ - {list_to_binary(?DEFAULT_ORIGIN), {[]}} - ]}} - ]. - - -wildcard_cors_config() -> - [ - {<<"enable_cors">>, true}, - {<<"origins">>, {[ - {<<"*">>, {[]}} - ]}} - ]. - -custom_cors_config() -> - [ - {<<"enable_cors">>, true}, - {<<"allow_methods">>, ?CUSTOM_SUPPORTED_METHODS}, - {<<"allow_headers">>, ?CUSTOM_SUPPORTED_HEADERS}, - {<<"exposed_headers">>, ?CUSTOM_EXPOSED_HEADERS}, - {<<"max_age">>, ?CUSTOM_MAX_AGE}, - {<<"origins">>, {[ - {<<"*">>, {[]}} - ]}} - ]. - -access_control_cors_config(AllowCredentials) -> - [ - {<<"enable_cors">>, true}, - {<<"allow_credentials">>, AllowCredentials}, - {<<"origins">>, {[ - {list_to_binary(?DEFAULT_ORIGIN), {[]}} - ]}}]. - - -multiple_cors_config() -> - [ - {<<"enable_cors">>, true}, - {<<"origins">>, {[ - {list_to_binary(?DEFAULT_ORIGIN), {[]}}, - {<<"https://example.com">>, {[]}}, - {<<"http://example.com:5984">>, {[]}}, - {<<"https://example.com:5984">>, {[]}} - ]}} - ]. - - -mock_request(Method, Path, Headers0) -> - HeaderKey = "Access-Control-Request-Method", - Headers = case proplists:get_value(HeaderKey, Headers0, undefined) of - nil -> - proplists:delete(HeaderKey, Headers0); - undefined -> - case Method of - 'OPTIONS' -> - [{HeaderKey, atom_to_list(Method)} | Headers0]; - _ -> - Headers0 - end; - _ -> - Headers0 - end, - Headers1 = mochiweb_headers:make(Headers), - MochiReq = mochiweb_request:new(nil, Method, Path, {1, 1}, Headers1), - PathParts = [list_to_binary(chttpd:unquote(Part)) - || Part <- string:tokens(Path, "/")], - #httpd{method=Method, mochi_req=MochiReq, path_parts=PathParts}. - - -header(#httpd{}=Req, Key) -> - chttpd:header_value(Req, Key); -header({mochiweb_response, [_, _, Headers]}, Key) -> - %% header(Headers, Key); - mochiweb_headers:get_value(Key, Headers); -header(Headers, Key) -> - couch_util:get_value(Key, Headers, undefined). - - -string_headers(H) -> - string:join(H, ", "). - - -assert_not_preflight_(Val) -> - ?_assertEqual(not_preflight, Val). - - -%% CORS disabled tests - - -cors_disabled_test_() -> - {"CORS disabled tests", - [ - {"Empty user", - {foreach, - fun empty_cors_config/0, - [ - fun test_no_access_control_method_preflight_request_/1, - fun test_no_headers_/1, - fun test_no_headers_server_/1, - fun test_no_headers_db_/1 - ]}}]}. - - -%% CORS enabled tests - - -cors_enabled_minimal_config_test_() -> - {"Minimal CORS enabled, no Origins", - {foreach, - fun minimal_cors_config/0, - [ - fun test_no_access_control_method_preflight_request_/1, - fun test_incorrect_origin_simple_request_/1, - fun test_incorrect_origin_preflight_request_/1 - ]}}. - - -cors_enabled_simple_config_test_() -> - {"Simple CORS config", - {foreach, - fun simple_cors_config/0, - [ - fun test_no_access_control_method_preflight_request_/1, - fun test_preflight_request_/1, - fun test_bad_headers_preflight_request_/1, - fun test_good_headers_preflight_request_/1, - fun test_db_request_/1, - fun test_db_preflight_request_/1, - fun test_db_host_origin_request_/1, - fun test_preflight_with_port_no_origin_/1, - fun test_preflight_with_scheme_no_origin_/1, - fun test_preflight_with_scheme_port_no_origin_/1, - fun test_case_sensitive_mismatch_of_allowed_origins_/1 - ]}}. - -cors_enabled_custom_config_test_() -> - {"Simple CORS config with custom allow_methods/allow_headers/exposed_headers", - {foreach, - fun custom_cors_config/0, - [ - fun test_good_headers_preflight_request_with_custom_config_/1, - fun test_db_request_with_custom_config_/1 - ]}}. - - -cors_enabled_multiple_config_test_() -> - {"Multiple options CORS config", - {foreach, - fun multiple_cors_config/0, - [ - fun test_no_access_control_method_preflight_request_/1, - fun test_preflight_request_/1, - fun test_db_request_/1, - fun test_db_preflight_request_/1, - fun test_db_host_origin_request_/1, - fun test_preflight_with_port_with_origin_/1, - fun test_preflight_with_scheme_with_origin_/1, - fun test_preflight_with_scheme_port_with_origin_/1 - ]}}. - - -%% Access-Control-Allow-Credentials tests - - -%% http://www.w3.org/TR/cors/#supports-credentials -%% 6.1.3 -%% If the resource supports credentials add a single -%% Access-Control-Allow-Origin header, with the value -%% of the Origin header as value, and add a single -%% Access-Control-Allow-Credentials header with the -%% case-sensitive string "true" as value. -%% Otherwise, add a single Access-Control-Allow-Origin -%% header, with either the value of the Origin header -%% or the string "*" as value. -%% Note: The string "*" cannot be used for a resource -%% that supports credentials. - -db_request_credentials_header_off_test_() -> - {"Allow credentials disabled", - {setup, - fun() -> - access_control_cors_config(false) - end, - fun test_db_request_credentials_header_off_/1 - } - }. - - -db_request_credentials_header_on_test_() -> - {"Allow credentials enabled", - {setup, - fun() -> - access_control_cors_config(true) - end, - fun test_db_request_credentials_header_on_/1 - } - }. - - -%% CORS wildcard tests - - -cors_enabled_wildcard_test_() -> - {"Wildcard CORS config", - {foreach, - fun wildcard_cors_config/0, - [ - fun test_no_access_control_method_preflight_request_/1, - fun test_preflight_request_/1, - fun test_preflight_request_no_allow_credentials_/1, - fun test_preflight_request_empty_request_headers_/1, - fun test_db_request_/1, - fun test_db_preflight_request_/1, - fun test_db_host_origin_request_/1, - fun test_preflight_with_port_with_origin_/1, - fun test_preflight_with_scheme_with_origin_/1, - fun test_preflight_with_scheme_port_with_origin_/1, - fun test_case_sensitive_mismatch_of_allowed_origins_/1 - ]}}. - - -%% Test generators - - -test_no_headers_(OwnerConfig) -> - Req = mock_request('GET', "/", []), - assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)). - - -test_no_headers_server_(OwnerConfig) -> - Req = mock_request('GET', "/", [{"Origin", "http://127.0.0.1"}]), - assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)). - - -test_no_headers_db_(OwnerConfig) -> - Headers = [{"Origin", "http://127.0.0.1"}], - Req = mock_request('GET', "/my_db", Headers), - assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)). - - -test_incorrect_origin_simple_request_(OwnerConfig) -> - Req = mock_request('GET', "/", [{"Origin", "http://127.0.0.1"}]), - [ - ?_assert(chttpd_cors:is_cors_enabled(OwnerConfig)), - assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)) - ]. - - -test_incorrect_origin_preflight_request_(OwnerConfig) -> - Headers = [ - {"Origin", "http://127.0.0.1"}, - {"Access-Control-Request-Method", "GET"} - ], - Req = mock_request('GET', "/", Headers), - [ - ?_assert(chttpd_cors:is_cors_enabled(OwnerConfig)), - assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)) - ]. - - -test_bad_headers_preflight_request_(OwnerConfig) -> - Headers = [ - {"Origin", ?DEFAULT_ORIGIN}, - {"Access-Control-Request-Method", "GET"}, - {"Access-Control-Request-Headers", "X-Not-An-Allowed-Headers"} - ], - Req = mock_request('OPTIONS', "/", Headers), - [ - ?_assert(chttpd_cors:is_cors_enabled(OwnerConfig)), - assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)) - ]. - - -test_good_headers_preflight_request_(OwnerConfig) -> - Headers = [ - {"Origin", ?DEFAULT_ORIGIN}, - {"Access-Control-Request-Method", "GET"}, - {"Access-Control-Request-Headers", "accept-language"} - ], - Req = mock_request('OPTIONS', "/", Headers), - ?assert(chttpd_cors:is_cors_enabled(OwnerConfig)), - {ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig), - [ - ?_assertEqual(?DEFAULT_ORIGIN, - header(Headers1, "Access-Control-Allow-Origin")), - ?_assertEqual(string_headers(?SUPPORTED_METHODS), - header(Headers1, "Access-Control-Allow-Methods")), - ?_assertEqual(string_headers(["accept-language"]), - header(Headers1, "Access-Control-Allow-Headers")) - ]. - -test_good_headers_preflight_request_with_custom_config_(OwnerConfig) -> - Headers = [ - {"Origin", ?DEFAULT_ORIGIN}, - {"Access-Control-Request-Method", "GET"}, - {"Access-Control-Request-Headers", "accept-language, extra"}, - {"Access-Control-Max-Age", ?CORS_DEFAULT_MAX_AGE} - ], - Req = mock_request('OPTIONS', "/", Headers), - ?assert(chttpd_cors:is_cors_enabled(OwnerConfig)), - AllowMethods = couch_util:get_value( - <<"allow_methods">>, OwnerConfig, ?SUPPORTED_METHODS), - MaxAge = couch_util:get_value( - <<"max_age">>, OwnerConfig, ?CORS_DEFAULT_MAX_AGE), - {ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig), - [ - ?_assertEqual(?DEFAULT_ORIGIN, - header(Headers1, "Access-Control-Allow-Origin")), - ?_assertEqual(string_headers(AllowMethods), - header(Headers1, "Access-Control-Allow-Methods")), - ?_assertEqual(string_headers(["accept-language", "extra"]), - header(Headers1, "Access-Control-Allow-Headers")), - ?_assertEqual(MaxAge, - header(Headers1, "Access-Control-Max-Age")) - ]. - - -test_preflight_request_(OwnerConfig) -> - Headers = [ - {"Origin", ?DEFAULT_ORIGIN}, - {"Access-Control-Request-Method", "GET"} - ], - Req = mock_request('OPTIONS', "/", Headers), - {ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig), - [ - ?_assertEqual(?DEFAULT_ORIGIN, - header(Headers1, "Access-Control-Allow-Origin")), - ?_assertEqual(string_headers(?SUPPORTED_METHODS), - header(Headers1, "Access-Control-Allow-Methods")) - ]. - - -test_no_access_control_method_preflight_request_(OwnerConfig) -> - Headers = [ - {"Origin", ?DEFAULT_ORIGIN}, - {"Access-Control-Request-Method", notnil} - ], - Req = mock_request('OPTIONS', "/", Headers), - assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)). - - -test_preflight_request_no_allow_credentials_(OwnerConfig) -> - Headers = [ - {"Origin", ?DEFAULT_ORIGIN}, - {"Access-Control-Request-Method", "GET"} - ], - Req = mock_request('OPTIONS', "/", Headers), - {ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig), - [ - ?_assertEqual(?DEFAULT_ORIGIN, - header(Headers1, "Access-Control-Allow-Origin")), - ?_assertEqual(string_headers(?SUPPORTED_METHODS), - header(Headers1, "Access-Control-Allow-Methods")), - ?_assertEqual(undefined, - header(Headers1, "Access-Control-Allow-Credentials")) - ]. - - -test_preflight_request_empty_request_headers_(OwnerConfig) -> - Headers = [ - {"Origin", ?DEFAULT_ORIGIN}, - {"Access-Control-Request-Method", "POST"}, - {"Access-Control-Request-Headers", ""} - ], - Req = mock_request('OPTIONS', "/", Headers), - {ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig), - [ - ?_assertEqual(?DEFAULT_ORIGIN, - header(Headers1, "Access-Control-Allow-Origin")), - ?_assertEqual(string_headers(?SUPPORTED_METHODS), - header(Headers1, "Access-Control-Allow-Methods")), - ?_assertEqual("", - header(Headers1, "Access-Control-Allow-Headers")) - ]. - - -test_db_request_(OwnerConfig) -> - Origin = ?DEFAULT_ORIGIN, - Headers = [{"Origin", Origin}], - Req = mock_request('GET', "/my_db", Headers), - Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig), - [ - ?_assertEqual(?DEFAULT_ORIGIN, - header(Headers1, "Access-Control-Allow-Origin")), - ?_assertEqual(?EXPOSED_HEADERS, - header(Headers1, "Access-Control-Expose-Headers")) - ]. - -test_db_request_with_custom_config_(OwnerConfig) -> - Origin = ?DEFAULT_ORIGIN, - Headers = [{"Origin", Origin}, {"extra", "EXTRA"}], - Req = mock_request('GET', "/my_db", Headers), - Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig), - ExposedHeaders = couch_util:get_value( - <<"exposed_headers">>, OwnerConfig, ?COUCH_HEADERS), - [ - ?_assertEqual(?DEFAULT_ORIGIN, - header(Headers1, "Access-Control-Allow-Origin")), - ?_assertEqual(lists:sort(["content-type" | ExposedHeaders]), - lists:sort( - split_list(header(Headers1, "Access-Control-Expose-Headers")))) - ]. - - -test_db_preflight_request_(OwnerConfig) -> - Headers = [ - {"Origin", ?DEFAULT_ORIGIN} - ], - Req = mock_request('OPTIONS', "/my_db", Headers), - {ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig), - [ - ?_assertEqual(?DEFAULT_ORIGIN, - header(Headers1, "Access-Control-Allow-Origin")), - ?_assertEqual(string_headers(?SUPPORTED_METHODS), - header(Headers1, "Access-Control-Allow-Methods")) - ]. - - -test_db_host_origin_request_(OwnerConfig) -> - Origin = ?DEFAULT_ORIGIN, - Headers = [ - {"Origin", Origin}, - {"Host", "example.com"} - ], - Req = mock_request('GET', "/my_db", Headers), - Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig), - [ - ?_assertEqual(?DEFAULT_ORIGIN, - header(Headers1, "Access-Control-Allow-Origin")), - ?_assertEqual(?EXPOSED_HEADERS, - header(Headers1, "Access-Control-Expose-Headers")) - ]. - - -test_preflight_origin_helper_(OwnerConfig, Origin, ExpectedOrigin) -> - Headers = [ - {"Origin", Origin}, - {"Access-Control-Request-Method", "GET"} - ], - Req = mock_request('OPTIONS', "/", Headers), - Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig), - [?_assertEqual(ExpectedOrigin, - header(Headers1, "Access-Control-Allow-Origin")) - ]. - - -test_preflight_with_port_no_origin_(OwnerConfig) -> - Origin = ?DEFAULT_ORIGIN ++ ":5984", - test_preflight_origin_helper_(OwnerConfig, Origin, undefined). - - -test_preflight_with_port_with_origin_(OwnerConfig) -> - Origin = ?DEFAULT_ORIGIN ++ ":5984", - test_preflight_origin_helper_(OwnerConfig, Origin, Origin). - - -test_preflight_with_scheme_no_origin_(OwnerConfig) -> - test_preflight_origin_helper_(OwnerConfig, ?DEFAULT_ORIGIN_HTTPS, undefined). - - -test_preflight_with_scheme_with_origin_(OwnerConfig) -> - Origin = ?DEFAULT_ORIGIN_HTTPS, - test_preflight_origin_helper_(OwnerConfig, Origin, Origin). - - -test_preflight_with_scheme_port_no_origin_(OwnerConfig) -> - Origin = ?DEFAULT_ORIGIN_HTTPS ++ ":5984", - test_preflight_origin_helper_(OwnerConfig, Origin, undefined). - - -test_preflight_with_scheme_port_with_origin_(OwnerConfig) -> - Origin = ?DEFAULT_ORIGIN_HTTPS ++ ":5984", - test_preflight_origin_helper_(OwnerConfig, Origin, Origin). - - -test_case_sensitive_mismatch_of_allowed_origins_(OwnerConfig) -> - Origin = "http://EXAMPLE.COM", - Headers = [{"Origin", Origin}], - Req = mock_request('GET', "/", Headers), - Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig), - [ - ?_assertEqual(?DEFAULT_ORIGIN, - header(Headers1, "Access-Control-Allow-Origin")), - ?_assertEqual(?EXPOSED_HEADERS, - header(Headers1, "Access-Control-Expose-Headers")) - ]. - - -test_db_request_credentials_header_off_(OwnerConfig) -> - Origin = ?DEFAULT_ORIGIN, - Headers = [{"Origin", Origin}], - Req = mock_request('GET', "/", Headers), - Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig), - [ - ?_assertEqual(?DEFAULT_ORIGIN, - header(Headers1, "Access-Control-Allow-Origin")), - ?_assertEqual(undefined, - header(Headers1, "Access-Control-Allow-Credentials")) - ]. - - -test_db_request_credentials_header_on_(OwnerConfig) -> - Origin = ?DEFAULT_ORIGIN, - Headers = [{"Origin", Origin}], - Req = mock_request('GET', "/", Headers), - Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig), - [ - ?_assertEqual(?DEFAULT_ORIGIN, - header(Headers1, "Access-Control-Allow-Origin")), - ?_assertEqual("true", - header(Headers1, "Access-Control-Allow-Credentials")) - ]. - -split_list(S) -> - re:split(S, "\\s*,\\s*", [trim, {return, list}]). diff --git a/src/chttpd/test/chttpd_csp_tests.erl b/src/chttpd/test/chttpd_csp_tests.erl deleted file mode 100644 index e86436254..000000000 --- a/src/chttpd/test/chttpd_csp_tests.erl +++ /dev/null @@ -1,81 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(chttpd_csp_tests). - --include_lib("couch/include/couch_eunit.hrl"). - - -setup() -> - ok = config:set("csp", "enable", "true", false), - Addr = config:get("chttpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(chttpd, port), - lists:concat(["http://", Addr, ":", Port, "/_utils/"]). - -teardown(_) -> - ok. - - - -csp_test_() -> - { - "Content Security Policy tests", - { - setup, - fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_not_return_any_csp_headers_when_disabled/1, - fun should_apply_default_policy/1, - fun should_return_custom_policy/1, - fun should_only_enable_csp_when_true/1 - ] - } - } - }. - - -should_not_return_any_csp_headers_when_disabled(Url) -> - ?_assertEqual(undefined, - begin - ok = config:set("csp", "enable", "false", false), - {ok, _, Headers, _} = test_request:get(Url), - proplists:get_value("Content-Security-Policy", Headers) - end). - -should_apply_default_policy(Url) -> - ?_assertEqual( - "default-src 'self'; img-src 'self' data:; font-src 'self'; " - "script-src 'self' 'unsafe-eval'; style-src 'self' 'unsafe-inline';", - begin - {ok, _, Headers, _} = test_request:get(Url), - proplists:get_value("Content-Security-Policy", Headers) - end). - -should_return_custom_policy(Url) -> - ?_assertEqual("default-src 'http://example.com';", - begin - ok = config:set("csp", "header_value", - "default-src 'http://example.com';", false), - {ok, _, Headers, _} = test_request:get(Url), - proplists:get_value("Content-Security-Policy", Headers) - end). - -should_only_enable_csp_when_true(Url) -> - ?_assertEqual(undefined, - begin - ok = config:set("csp", "enable", "tru", false), - {ok, _, Headers, _} = test_request:get(Url), - proplists:get_value("Content-Security-Policy", Headers) - end). diff --git a/src/chttpd/test/chttpd_db_attachment_size_tests.erl b/src/chttpd/test/chttpd_db_attachment_size_tests.erl deleted file mode 100644 index 0ab08dd80..000000000 --- a/src/chttpd/test/chttpd_db_attachment_size_tests.erl +++ /dev/null @@ -1,206 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(chttpd_db_attachment_size_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(USER, "chttpd_db_att_test_admin"). --define(PASS, "pass"). --define(AUTH, {basic_auth, {?USER, ?PASS}}). --define(CONTENT_JSON, {"Content-Type", "application/json"}). --define(CONTENT_MULTI_RELATED, {"Content-Type", - "multipart/related;boundary=\"bound\""}). - - -setup() -> - Hashed = couch_passwords:hash_admin_password(?PASS), - ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), - ok = config:set("couchdb", "max_attachment_size", "50", _Persist=false), - TmpDb = ?tempdb(), - Addr = config:get("chttpd", "bind_address", "127.0.0.1"), - Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)), - Url = "http://" ++ Addr ++ ":" ++ Port ++ "/" ++ ?b2l(TmpDb), - create_db(Url), - add_doc(Url, "doc1"), - Url. - - -teardown(Url) -> - delete_db(Url), - ok = config:delete("admins", ?USER, _Persist=false), - ok = config:delete("couchdb", "max_attachment_size"). - - -attachment_size_test_() -> - { - "chttpd max_attachment_size tests", - { - setup, - fun chttpd_test_util:start_couch/0, - fun chttpd_test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun put_inline/1, - fun put_simple/1, - fun put_simple_chunked/1, - fun put_mp_related/1 - ] - } - } - }. - - -put_inline(Url) -> - ?_test(begin - Status = put_inline(Url, "doc2", 50), - ?assert(Status =:= 201 orelse Status =:= 202), - ?assertEqual(413, put_inline(Url, "doc3", 51)) - end). - - -put_simple(Url) -> - ?_test(begin - Headers = [{"Content-Type", "app/binary"}], - Rev1 = doc_rev(Url, "doc1"), - Data1 = data(50), - Status1 = put_req(Url ++ "/doc1/att2?rev=" ++ Rev1, Headers, Data1), - ?assert(Status1 =:= 201 orelse Status1 =:= 202), - Data2 = data(51), - Rev2 = doc_rev(Url, "doc1"), - Status2 = put_req(Url ++ "/doc1/att3?rev=" ++ Rev2, Headers, Data2), - ?assertEqual(413, Status2) - end). - - -put_simple_chunked(Url) -> - ?_test(begin - Headers = [{"Content-Type", "app/binary"}], - Rev1 = doc_rev(Url, "doc1"), - DataFun1 = data_stream_fun(50), - Status1 = put_req_chunked(Url ++ "/doc1/att2?rev=" ++ Rev1, Headers, DataFun1), - ?assert(Status1 =:= 201 orelse Status1 =:= 202), - DataFun2 = data_stream_fun(51), - Rev2 = doc_rev(Url, "doc1"), - Status2 = put_req_chunked(Url ++ "/doc1/att3?rev=" ++ Rev2, Headers, DataFun2), - ?assertEqual(413, Status2) - end). - - -put_mp_related(Url) -> - ?_test(begin - Headers = [?CONTENT_MULTI_RELATED], - Body1 = mp_body(50), - Status1 = put_req(Url ++ "/doc2", Headers, Body1), - ?assert(Status1 =:= 201 orelse Status1 =:= 202), - Body2 = mp_body(51), - Status2 = put_req(Url ++ "/doc3", Headers, Body2), - ?assertEqual(413, Status2) - end). - - -% Helper functions - -create_db(Url) -> - Status = put_req(Url, "{}"), - ?assert(Status =:= 201 orelse Status =:= 202). - - -add_doc(Url, DocId) -> - Status = put_req(Url ++ "/" ++ DocId, "{}"), - ?assert(Status =:= 201 orelse Status =:= 202). - - -delete_db(Url) -> - {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). - - -put_inline(Url, DocId, Size) -> - Doc = "{\"_attachments\": {\"att1\":{" - "\"content_type\": \"app/binary\", " - "\"data\": \"" ++ data_b64(Size) ++ "\"" - "}}}", - put_req(Url ++ "/" ++ DocId, Doc). - - -mp_body(AttSize) -> - AttData = data(AttSize), - SizeStr = integer_to_list(AttSize), - string:join([ - "--bound", - - "Content-Type: application/json", - - "", - - "{\"_id\":\"doc2\", \"_attachments\":{\"att\":" - "{\"content_type\":\"app/binary\", \"length\":" ++ SizeStr ++ "," - "\"follows\":true}}}", - - "--bound", - - "Content-Disposition: attachment; filename=\"att\"", - - "Content-Type: app/binary", - - "", - - AttData, - - "--bound--" - ], "\r\n"). - - -doc_rev(Url, DocId) -> - {200, ResultProps} = get_req(Url ++ "/" ++ DocId), - {<<"_rev">>, BinRev} = lists:keyfind(<<"_rev">>, 1, ResultProps), - binary_to_list(BinRev). - - -put_req(Url, Body) -> - put_req(Url, [], Body). - - -put_req(Url, Headers, Body) -> - {ok, Status, _, _} = test_request:put(Url, Headers ++ [?AUTH], Body), - Status. - - -put_req_chunked(Url, Headers, Body) -> - Opts = [{transfer_encoding, {chunked, 1}}], - {ok, Status, _, _} = test_request:put(Url, Headers ++ [?AUTH], Body, Opts), - Status. - - -get_req(Url) -> - {ok, Status, _, ResultBody} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]), - {[_ | _] = ResultProps} = ?JSON_DECODE(ResultBody), - {Status, ResultProps}. - -% Data streaming generator for ibrowse client. ibrowse will repeatedly call the -% function with State and it should return {ok, Data, NewState} or eof at end. -data_stream_fun(Size) -> - Fun = fun(0) -> eof; (BytesLeft) -> - {ok, <<"x">>, BytesLeft - 1} - end, - {Fun, Size}. - - -data(Size) -> - string:copies("x", Size). - - -data_b64(Size) -> - base64:encode_to_string(data(Size)). diff --git a/src/chttpd/test/chttpd_db_bulk_get_multipart_test.erl b/src/chttpd/test/chttpd_db_bulk_get_multipart_test.erl deleted file mode 100644 index 8a95c92ac..000000000 --- a/src/chttpd/test/chttpd_db_bulk_get_multipart_test.erl +++ /dev/null @@ -1,313 +0,0 @@ -%% Licensed under the Apache License, Version 2.0 (the "License"); you may not -%% use this file except in compliance with the License. You may obtain a copy of -%% the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -%% License for the specific language governing permissions and limitations under -%% the License. - --module(chttpd_db_bulk_get_multipart_test). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 3000). - - -setup() -> - mock(config), - mock(chttpd), - mock(couch_epi), - mock(couch_httpd), - mock(couch_stats), - mock(fabric), - mock(mochireq), - Pid = spawn_accumulator(), - Pid. - - -teardown(Pid) -> - ok = stop_accumulator(Pid), - meck:unload(). - - -bulk_get_test_() -> - { - "/db/_bulk_get tests", - { - foreach, fun setup/0, fun teardown/1, - [ - fun should_require_docs_field/1, - fun should_not_accept_specific_query_params/1, - fun should_return_empty_results_on_no_docs/1, - fun should_get_doc_with_all_revs/1, - fun should_validate_doc_with_bad_id/1, - fun should_validate_doc_with_bad_rev/1, - fun should_validate_missing_doc/1, - fun should_validate_bad_atts_since/1, - fun should_include_attachments_when_atts_since_specified/1 - ] - } - }. - - -should_require_docs_field(_) -> - Req = fake_request({[{}]}), - Db = test_util:fake_db([{name, <<"foo">>}]), - ?_assertThrow({bad_request, _}, chttpd_db:db_req(Req, Db)). - - -should_not_accept_specific_query_params(_) -> - Req = fake_request({[{<<"docs">>, []}]}), - Db = test_util:fake_db([{name, <<"foo">>}]), - lists:map(fun (Param) -> - {Param, ?_assertThrow({bad_request, _}, - begin - ok = meck:expect(chttpd, qs, - fun(_) -> [{Param, ""}] end), - chttpd_db:db_req(Req, Db) - end)} - end, ["rev", "open_revs", "atts_since", "w", "new_edits"]). - - -should_return_empty_results_on_no_docs(Pid) -> - Req = fake_request({[{<<"docs">>, []}]}), - Db = test_util:fake_db([{name, <<"foo">>}]), - chttpd_db:db_req(Req, Db), - Results = get_results_from_response(Pid), - ?_assertEqual([], Results). - - -should_get_doc_with_all_revs(Pid) -> - DocId = <<"docudoc">>, - Req = fake_request(DocId), - Db = test_util:fake_db([{name, <<"foo">>}]), - - DocRevA = #doc{id = DocId, body = {[{<<"_rev">>, <<"1-ABC">>}]}}, - DocRevB = #doc{id = DocId, body = {[{<<"_rev">>, <<"1-CDE">>}]}}, - - mock_open_revs(all, {ok, [{ok, DocRevA}, {ok, DocRevB}]}), - chttpd_db:db_req(Req, Db), - - Result = get_results_from_response(Pid), - ?_assertEqual(DocId, couch_util:get_value(<<"_id">>, Result)). - - -should_validate_doc_with_bad_id(Pid) -> - DocId = <<"_docudoc">>, - - Req = fake_request(DocId), - Db = test_util:fake_db([{name, <<"foo">>}]), - chttpd_db:db_req(Req, Db), - - Result = get_results_from_response(Pid), - ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)), - - ?_assertMatch([{<<"id">>, DocId}, - {<<"rev">>, null}, - {<<"error">>, <<"illegal_docid">>}, - {<<"reason">>, _}], Result). - - -should_validate_doc_with_bad_rev(Pid) -> - DocId = <<"docudoc">>, - Rev = <<"revorev">>, - - Req = fake_request(DocId, Rev), - Db = test_util:fake_db([{name, <<"foo">>}]), - chttpd_db:db_req(Req, Db), - - Result = get_results_from_response(Pid), - ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)), - - ?_assertMatch([{<<"id">>, DocId}, - {<<"rev">>, Rev}, - {<<"error">>, <<"bad_request">>}, - {<<"reason">>, _}], Result). - - -should_validate_missing_doc(Pid) -> - DocId = <<"docudoc">>, - Rev = <<"1-revorev">>, - - Req = fake_request(DocId, Rev), - Db = test_util:fake_db([{name, <<"foo">>}]), - mock_open_revs([{1,<<"revorev">>}], {ok, []}), - chttpd_db:db_req(Req, Db), - - Result = get_results_from_response(Pid), - ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)), - - ?_assertMatch([{<<"id">>, DocId}, - {<<"rev">>, Rev}, - {<<"error">>, <<"not_found">>}, - {<<"reason">>, _}], Result). - - -should_validate_bad_atts_since(Pid) -> - DocId = <<"docudoc">>, - Rev = <<"1-revorev">>, - - Req = fake_request(DocId, Rev, <<"badattsince">>), - Db = test_util:fake_db([{name, <<"foo">>}]), - mock_open_revs([{1,<<"revorev">>}], {ok, []}), - chttpd_db:db_req(Req, Db), - - Result = get_results_from_response(Pid), - ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)), - - ?_assertMatch([{<<"id">>, DocId}, - {<<"rev">>, <<"badattsince">>}, - {<<"error">>, <<"bad_request">>}, - {<<"reason">>, _}], Result). - - -should_include_attachments_when_atts_since_specified(_) -> - DocId = <<"docudoc">>, - Rev = <<"1-revorev">>, - - Req = fake_request(DocId, Rev, [<<"1-abc">>]), - Db = test_util:fake_db([{name, <<"foo">>}]), - mock_open_revs([{1,<<"revorev">>}], {ok, []}), - chttpd_db:db_req(Req, Db), - - ?_assert(meck:called(fabric, open_revs, - ['_', DocId, [{1, <<"revorev">>}], - [{atts_since, [{1, <<"abc">>}]}, attachments, - {user_ctx, undefined}]])). - -%% helpers - -fake_request(Payload) when is_tuple(Payload) -> - #httpd{method='POST', path_parts=[<<"db">>, <<"_bulk_get">>], - mochi_req=mochireq, req_body=Payload}; -fake_request(DocId) when is_binary(DocId) -> - fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}]}]}]}). - -fake_request(DocId, Rev) -> - fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}, {<<"rev">>, Rev}]}]}]}). - -fake_request(DocId, Rev, AttsSince) -> - fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}, - {<<"rev">>, Rev}, - {<<"atts_since">>, AttsSince}]}]}]}). - - -mock_open_revs(RevsReq0, RevsResp) -> - ok = meck:expect(fabric, open_revs, - fun(_, _, RevsReq1, _) -> - ?assertEqual(RevsReq0, RevsReq1), - RevsResp - end). - - -mock(mochireq) -> - ok = meck:new(mochireq, [non_strict]), - ok = meck:expect(mochireq, parse_qs, fun() -> [] end), - ok = meck:expect(mochireq, accepts_content_type, fun("multipart/mixed") -> true; - ("multipart/related") -> true; - (_) -> false end), - ok; -mock(couch_httpd) -> - ok = meck:new(couch_httpd, [passthrough]), - ok = meck:expect(couch_httpd, validate_ctype, fun(_, _) -> ok end), - ok = meck:expect(couch_httpd, last_chunk, fun(_) -> {ok, nil} end), - ok = meck:expect(couch_httpd, send_chunk, fun send_chunk/2), - ok; -mock(chttpd) -> - ok = meck:new(chttpd, [passthrough]), - ok = meck:expect(chttpd, start_json_response, fun(_, _) -> {ok, nil} end), - ok = meck:expect(chttpd, start_chunked_response, fun(_, _, _) -> {ok, nil} end), - ok = meck:expect(chttpd, end_json_response, fun(_) -> ok end), - ok = meck:expect(chttpd, send_chunk, fun send_chunk/2), - ok = meck:expect(chttpd, json_body_obj, fun (#httpd{req_body=Body}) -> Body end), - ok; -mock(couch_epi) -> - ok = meck:new(couch_epi, [passthrough]), - ok = meck:expect(couch_epi, any, fun(_, _, _, _, _) -> false end), - ok; -mock(couch_stats) -> - ok = meck:new(couch_stats, [passthrough]), - ok = meck:expect(couch_stats, increment_counter, fun(_) -> ok end), - ok = meck:expect(couch_stats, increment_counter, fun(_, _) -> ok end), - ok = meck:expect(couch_stats, decrement_counter, fun(_) -> ok end), - ok = meck:expect(couch_stats, decrement_counter, fun(_, _) -> ok end), - ok = meck:expect(couch_stats, update_histogram, fun(_, _) -> ok end), - ok = meck:expect(couch_stats, update_gauge, fun(_, _) -> ok end), - ok; -mock(fabric) -> - ok = meck:new(fabric, [passthrough]), - ok; -mock(config) -> - ok = meck:new(config, [passthrough]), - ok = meck:expect(config, get, fun(_, _, Default) -> Default end), - ok. - - -spawn_accumulator() -> - Parent = self(), - Pid = spawn(fun() -> accumulator_loop(Parent, []) end), - erlang:put(chunks_gather, Pid), - Pid. - -accumulator_loop(Parent, Acc) -> - receive - {stop, Ref} -> - Parent ! {ok, Ref}; - {get, Ref} -> - Parent ! {ok, Ref, Acc}, - accumulator_loop(Parent, Acc); - {put, Ref, Chunk} -> - Parent ! {ok, Ref}, - accumulator_loop(Parent, [Chunk|Acc]) - end. - -stop_accumulator(Pid) -> - Ref = make_ref(), - Pid ! {stop, Ref}, - receive - {ok, Ref} -> - ok - after ?TIMEOUT -> - throw({timeout, <<"process stop timeout">>}) - end. - - -send_chunk(_, []) -> - {ok, nil}; -send_chunk(_Req, [H|T]=Chunk) when is_list(Chunk) -> - send_chunk(_Req, H), - send_chunk(_Req, T); -send_chunk(_, Chunk) -> - Worker = erlang:get(chunks_gather), - Ref = make_ref(), - Worker ! {put, Ref, Chunk}, - receive - {ok, Ref} -> {ok, nil} - after ?TIMEOUT -> - throw({timeout, <<"send chunk timeout">>}) - end. - - -get_response(Pid) -> - Ref = make_ref(), - Pid ! {get, Ref}, - receive - {ok, Ref, Acc} -> - Acc - after ?TIMEOUT -> - throw({timeout, <<"get response timeout">>}) - end. - -get_results_from_response(Pid) -> - case get_response(Pid) of - [] -> - []; - Result -> - {Result1} = ?JSON_DECODE(lists:nth(2, Result)), - Result1 - end. diff --git a/src/chttpd/test/chttpd_db_bulk_get_test.erl b/src/chttpd/test/chttpd_db_bulk_get_test.erl deleted file mode 100644 index 864e7079a..000000000 --- a/src/chttpd/test/chttpd_db_bulk_get_test.erl +++ /dev/null @@ -1,341 +0,0 @@ -%% Licensed under the Apache License, Version 2.0 (the "License"); you may not -%% use this file except in compliance with the License. You may obtain a copy of -%% the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -%% License for the specific language governing permissions and limitations under -%% the License. - --module(chttpd_db_bulk_get_test). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 3000). - - -setup() -> - mock(config), - mock(chttpd), - mock(couch_epi), - mock(couch_httpd), - mock(couch_stats), - mock(fabric), - mock(mochireq), - Pid = spawn_accumulator(), - Pid. - - -teardown(Pid) -> - ok = stop_accumulator(Pid), - meck:unload(config), - meck:unload(chttpd), - meck:unload(couch_epi), - meck:unload(couch_httpd), - meck:unload(couch_stats), - meck:unload(fabric), - meck:unload(mochireq). - - -bulk_get_test_() -> - { - "/db/_bulk_get tests", - { - foreach, fun setup/0, fun teardown/1, - [ - fun should_require_docs_field/1, - fun should_not_accept_specific_query_params/1, - fun should_return_empty_results_on_no_docs/1, - fun should_get_doc_with_all_revs/1, - fun should_validate_doc_with_bad_id/1, - fun should_validate_doc_with_bad_rev/1, - fun should_validate_missing_doc/1, - fun should_validate_bad_atts_since/1, - fun should_include_attachments_when_atts_since_specified/1 - ] - } - }. - - -should_require_docs_field(_) -> - Req = fake_request({[{}]}), - ?_assertThrow({bad_request, _}, chttpd_db:db_req(Req, nil)). - - -should_not_accept_specific_query_params(_) -> - Req = fake_request({[{<<"docs">>, []}]}), - lists:map(fun (Param) -> - {Param, ?_assertThrow({bad_request, _}, - begin - ok = meck:expect(chttpd, qs, - fun(_) -> [{Param, ""}] end), - chttpd_db:db_req(Req, nil) - end)} - end, ["rev", "open_revs", "atts_since", "w", "new_edits"]). - - -should_return_empty_results_on_no_docs(Pid) -> - Req = fake_request({[{<<"docs">>, []}]}), - chttpd_db:db_req(Req, nil), - Results = get_results_from_response(Pid), - ?_assertEqual([], Results). - - -should_get_doc_with_all_revs(Pid) -> - DocId = <<"docudoc">>, - Req = fake_request(DocId), - - RevA = {[{<<"_id">>, DocId}, {<<"_rev">>, <<"1-ABC">>}]}, - RevB = {[{<<"_id">>, DocId}, {<<"_rev">>, <<"1-CDE">>}]}, - DocRevA = #doc{id = DocId, body = {[{<<"_rev">>, <<"1-ABC">>}]}}, - DocRevB = #doc{id = DocId, body = {[{<<"_rev">>, <<"1-CDE">>}]}}, - - mock_open_revs(all, {ok, [{ok, DocRevA}, {ok, DocRevB}]}), - chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])), - - [{Result}] = get_results_from_response(Pid), - ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)), - - Docs = couch_util:get_value(<<"docs">>, Result), - ?assertEqual(2, length(Docs)), - - [{DocA0}, {DocB0}] = Docs, - - DocA = couch_util:get_value(<<"ok">>, DocA0), - DocB = couch_util:get_value(<<"ok">>, DocB0), - - ?_assertEqual([RevA, RevB], [DocA, DocB]). - - -should_validate_doc_with_bad_id(Pid) -> - DocId = <<"_docudoc">>, - - Req = fake_request(DocId), - chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])), - - [{Result}] = get_results_from_response(Pid), - ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)), - - Docs = couch_util:get_value(<<"docs">>, Result), - ?assertEqual(1, length(Docs)), - [{DocResult}] = Docs, - - Doc = couch_util:get_value(<<"error">>, DocResult), - - ?_assertMatch({[{<<"id">>, DocId}, - {<<"rev">>, null}, - {<<"error">>, <<"illegal_docid">>}, - {<<"reason">>, _}]}, - Doc). - - -should_validate_doc_with_bad_rev(Pid) -> - DocId = <<"docudoc">>, - Rev = <<"revorev">>, - - Req = fake_request(DocId, Rev), - chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])), - - [{Result}] = get_results_from_response(Pid), - ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)), - - Docs = couch_util:get_value(<<"docs">>, Result), - ?assertEqual(1, length(Docs)), - [{DocResult}] = Docs, - - Doc = couch_util:get_value(<<"error">>, DocResult), - - ?_assertMatch({[{<<"id">>, DocId}, - {<<"rev">>, Rev}, - {<<"error">>, <<"bad_request">>}, - {<<"reason">>, _}]}, - Doc). - - -should_validate_missing_doc(Pid) -> - DocId = <<"docudoc">>, - Rev = <<"1-revorev">>, - - Req = fake_request(DocId, Rev), - mock_open_revs([{1,<<"revorev">>}], {ok, []}), - chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])), - - [{Result}] = get_results_from_response(Pid), - ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)), - - Docs = couch_util:get_value(<<"docs">>, Result), - ?assertEqual(1, length(Docs)), - [{DocResult}] = Docs, - - Doc = couch_util:get_value(<<"error">>, DocResult), - - ?_assertMatch({[{<<"id">>, DocId}, - {<<"rev">>, Rev}, - {<<"error">>, <<"not_found">>}, - {<<"reason">>, _}]}, - Doc). - - -should_validate_bad_atts_since(Pid) -> - DocId = <<"docudoc">>, - Rev = <<"1-revorev">>, - - Req = fake_request(DocId, Rev, <<"badattsince">>), - mock_open_revs([{1,<<"revorev">>}], {ok, []}), - chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])), - - [{Result}] = get_results_from_response(Pid), - ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)), - - Docs = couch_util:get_value(<<"docs">>, Result), - ?assertEqual(1, length(Docs)), - [{DocResult}] = Docs, - - Doc = couch_util:get_value(<<"error">>, DocResult), - - ?_assertMatch({[{<<"id">>, DocId}, - {<<"rev">>, <<"badattsince">>}, - {<<"error">>, <<"bad_request">>}, - {<<"reason">>, _}]}, - Doc). - - -should_include_attachments_when_atts_since_specified(_) -> - DocId = <<"docudoc">>, - Rev = <<"1-revorev">>, - - Req = fake_request(DocId, Rev, [<<"1-abc">>]), - mock_open_revs([{1,<<"revorev">>}], {ok, []}), - chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])), - - ?_assert(meck:called(fabric, open_revs, - ['_', DocId, [{1, <<"revorev">>}], - [{atts_since, [{1, <<"abc">>}]}, attachments, - {user_ctx, undefined}]])). - -%% helpers - -fake_request(Payload) when is_tuple(Payload) -> - #httpd{method='POST', path_parts=[<<"db">>, <<"_bulk_get">>], - mochi_req=mochireq, req_body=Payload}; -fake_request(DocId) when is_binary(DocId) -> - fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}]}]}]}). - -fake_request(DocId, Rev) -> - fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}, {<<"rev">>, Rev}]}]}]}). - -fake_request(DocId, Rev, AttsSince) -> - fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}, - {<<"rev">>, Rev}, - {<<"atts_since">>, AttsSince}]}]}]}). - - -mock_open_revs(RevsReq0, RevsResp) -> - ok = meck:expect(fabric, open_revs, - fun(_, _, RevsReq1, _) -> - ?assertEqual(RevsReq0, RevsReq1), - RevsResp - end). - - -mock(mochireq) -> - ok = meck:new(mochireq, [non_strict]), - ok = meck:expect(mochireq, parse_qs, fun() -> [] end), - ok = meck:expect(mochireq, accepts_content_type, fun(_) -> false end), - ok; -mock(couch_httpd) -> - ok = meck:new(couch_httpd, [passthrough]), - ok = meck:expect(couch_httpd, validate_ctype, fun(_, _) -> ok end), - ok; -mock(chttpd) -> - ok = meck:new(chttpd, [passthrough]), - ok = meck:expect(chttpd, start_json_response, fun(_, _) -> {ok, nil} end), - ok = meck:expect(chttpd, end_json_response, fun(_) -> ok end), - ok = meck:expect(chttpd, send_chunk, fun send_chunk/2), - ok = meck:expect(chttpd, json_body_obj, fun (#httpd{req_body=Body}) -> Body end), - ok; -mock(couch_epi) -> - ok = meck:new(couch_epi, [passthrough]), - ok = meck:expect(couch_epi, any, fun(_, _, _, _, _) -> false end), - ok; -mock(couch_stats) -> - ok = meck:new(couch_stats, [passthrough]), - ok = meck:expect(couch_stats, increment_counter, fun(_) -> ok end), - ok = meck:expect(couch_stats, increment_counter, fun(_, _) -> ok end), - ok = meck:expect(couch_stats, decrement_counter, fun(_) -> ok end), - ok = meck:expect(couch_stats, decrement_counter, fun(_, _) -> ok end), - ok = meck:expect(couch_stats, update_histogram, fun(_, _) -> ok end), - ok = meck:expect(couch_stats, update_gauge, fun(_, _) -> ok end), - ok; -mock(fabric) -> - ok = meck:new(fabric, [passthrough]), - ok; -mock(config) -> - ok = meck:new(config, [passthrough]), - ok = meck:expect(config, get, fun(_, _, Default) -> Default end), - ok. - - -spawn_accumulator() -> - Parent = self(), - Pid = spawn(fun() -> accumulator_loop(Parent, []) end), - erlang:put(chunks_gather, Pid), - Pid. - -accumulator_loop(Parent, Acc) -> - receive - {stop, Ref} -> - Parent ! {ok, Ref}; - {get, Ref} -> - Parent ! {ok, Ref, Acc}, - accumulator_loop(Parent, Acc); - {put, Ref, Chunk} -> - Parent ! {ok, Ref}, - accumulator_loop(Parent, [Chunk|Acc]) - end. - -stop_accumulator(Pid) -> - Ref = make_ref(), - Pid ! {stop, Ref}, - receive - {ok, Ref} -> - ok - after ?TIMEOUT -> - throw({timeout, <<"process stop timeout">>}) - end. - - -send_chunk(_, []) -> - {ok, nil}; -send_chunk(_Req, [H|T]=Chunk) when is_list(Chunk) -> - send_chunk(_Req, H), - send_chunk(_Req, T); -send_chunk(_, Chunk) -> - Worker = erlang:get(chunks_gather), - Ref = make_ref(), - Worker ! {put, Ref, Chunk}, - receive - {ok, Ref} -> {ok, nil} - after ?TIMEOUT -> - throw({timeout, <<"send chunk timeout">>}) - end. - - -get_response(Pid) -> - Ref = make_ref(), - Pid ! {get, Ref}, - receive - {ok, Ref, Acc} -> - ?JSON_DECODE(iolist_to_binary(lists:reverse(Acc))) - after ?TIMEOUT -> - throw({timeout, <<"get response timeout">>}) - end. - - -get_results_from_response(Pid) -> - {Resp} = get_response(Pid), - couch_util:get_value(<<"results">>, Resp). diff --git a/src/chttpd/test/chttpd_db_doc_size_tests.erl b/src/chttpd/test/chttpd_db_doc_size_tests.erl deleted file mode 100644 index 88e2797a3..000000000 --- a/src/chttpd/test/chttpd_db_doc_size_tests.erl +++ /dev/null @@ -1,179 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(chttpd_db_doc_size_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(USER, "chttpd_db_test_admin"). --define(PASS, "pass"). --define(AUTH, {basic_auth, {?USER, ?PASS}}). --define(CONTENT_JSON, {"Content-Type", "application/json"}). --define(CONTENT_MULTI_RELATED, {"Content-Type", - "multipart/related;boundary=\"bound\""}). --define(CONTENT_MULTI_FORM, {"Content-Type", - "multipart/form-data;boundary=\"bound\""}). - - -setup() -> - Hashed = couch_passwords:hash_admin_password(?PASS), - ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), - ok = config:set("couchdb", "max_document_size", "50"), - TmpDb = ?tempdb(), - Addr = config:get("chttpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(chttpd, port), - Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]), - create_db(Url), - Url. - -teardown(Url) -> - delete_db(Url), - ok = config:delete("admins", ?USER, _Persist=false), - ok = config:delete("couchdb", "max_document_size"). - -create_db(Url) -> - {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), - case Status of - 201 -> ok; - 202 -> ok; - _ -> io:format(user, "~n HTTP Status Code: ~p~n", [Status]) - end, - ?assert(Status =:= 201 orelse Status =:= 202). - -delete_db(Url) -> - {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). - -all_test_() -> - { - "chttpd db max_document_size tests", - { - setup, - fun chttpd_test_util:start_couch/0, - fun chttpd_test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun post_single_doc/1, - fun put_single_doc/1, - fun bulk_doc/1, - fun put_post_doc_attach_inline/1, - fun put_multi_part_related/1, - fun post_multi_part_form/1 - ] - } - } - }. - -post_single_doc(Url) -> - NewDoc = "{\"post_single_doc\": \"some_doc\", - \"_id\": \"testdoc\", \"should_be\" : \"too_large\"}", - {ok, _, _, ResultBody} = test_request:post(Url, - [?CONTENT_JSON, ?AUTH], NewDoc), - {[ErrorMsg | _]} = ?JSON_DECODE(ResultBody), - ?_assertEqual({<<"error">>, <<"document_too_large">>}, ErrorMsg). - -put_single_doc(Url) -> - NewDoc = "{\"post_single_doc\": \"some_doc\", - \"_id\": \"testdoc\", \"should_be\" : \"too_large\"}", - {ok, _, _, ResultBody} = test_request:put(Url ++ "/" ++ "testid", - [?CONTENT_JSON, ?AUTH], NewDoc), - {[ErrorMsg | _]} = ?JSON_DECODE(ResultBody), - ?_assertEqual({<<"error">>, <<"document_too_large">>}, ErrorMsg). - -bulk_doc(Url) -> - NewDoc = "{\"docs\": [{\"doc1\": 1}, {\"errordoc\": - \"this_should_be_the_too_large_error_document\"}]}", - {ok, _, _, ResultBody} = test_request:post(Url ++ "/_bulk_docs/", - [?CONTENT_JSON, ?AUTH], NewDoc), - ResultJson = ?JSON_DECODE(ResultBody), - Expect = {[{<<"error">>,<<"document_too_large">>},{<<"reason">>,<<>>}]}, - ?_assertEqual(Expect, ResultJson). - -put_post_doc_attach_inline(Url) -> - Body1 = "{\"body\":\"This is a body.\",", - Body2 = lists:concat(["{\"body\":\"This is a body it should fail", - "because there are too many characters.\","]), - DocRest = lists:concat(["\"_attachments\":{\"foo.txt\":{", - "\"content_type\":\"text/plain\",", - "\"data\": \"VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ=\"}}}"]), - Doc1 = lists:concat([Body1, DocRest]), - Doc2 = lists:concat([Body2, DocRest]), - - {ok, _, _, ResultBody} = test_request:post(Url, - [?CONTENT_JSON, ?AUTH], Doc1), - {[Msg | _]} = ?JSON_DECODE(ResultBody), - {ok, _, _, ResultBody1} = test_request:post(Url, - [?CONTENT_JSON, ?AUTH], Doc2), - {[Msg1 | _]} = ?JSON_DECODE(ResultBody1), - - {ok, _, _, ResultBody2} = test_request:put(Url ++ "/" ++ "accept", - [?CONTENT_JSON, ?AUTH], Doc1), - {[Msg2 | _]} = ?JSON_DECODE(ResultBody2), - {ok, _, _, ResultBody3} = test_request:put(Url ++ "/" ++ "fail", - [?CONTENT_JSON, ?AUTH], Doc2), - {[Msg3 | _]} = ?JSON_DECODE(ResultBody3), - [ - ?_assertEqual({<<"ok">>, true}, Msg), - ?_assertEqual({<<"error">>, <<"document_too_large">>}, Msg1), - ?_assertEqual({<<"ok">>, true}, Msg2), - ?_assertEqual({<<"error">>, <<"document_too_large">>}, Msg3) - ]. - -put_multi_part_related(Url) -> - Body1 = "{\"body\":\"This is a body.\",", - Body2 = lists:concat(["{\"body\":\"This is a body it should fail", - "because there are too many characters.\","]), - DocBeg = "--bound\r\nContent-Type: application/json\r\n\r\n", - DocRest = lists:concat(["\"_attachments\":{\"foo.txt\":{\"follows\":true,", - "\"content_type\":\"text/plain\",\"length\":21},\"bar.txt\":", - "{\"follows\":true,\"content_type\":\"text/plain\",", - "\"length\":20}}}\r\n--bound\r\n\r\nthis is 21 chars long", - "\r\n--bound\r\n\r\nthis is 20 chars lon\r\n--bound--epilogue"]), - Doc1 = lists:concat([DocBeg, Body1, DocRest]), - Doc2 = lists:concat([DocBeg, Body2, DocRest]), - {ok, _, _, ResultBody} = test_request:put(Url ++ "/" ++ "accept", - [?CONTENT_MULTI_RELATED, ?AUTH], Doc1), - {[Msg | _]} = ?JSON_DECODE(ResultBody), - {ok, _, _, ResultBody1} = test_request:put(Url ++ "/" ++ "faildoc", - [?CONTENT_MULTI_RELATED, ?AUTH], Doc2), - {[Msg1 | _]} = ?JSON_DECODE(ResultBody1), - [ - ?_assertEqual({<<"ok">>, true}, Msg), - ?_assertEqual({<<"error">>, <<"document_too_large">>}, Msg1) - ]. - -post_multi_part_form(Url) -> - Port = mochiweb_socket_server:get(chttpd, port), - Host = lists:concat([ "http://127.0.0.1:", Port]), - Referer = {"Referer", Host}, - Body1 = "{\"body\":\"This is a body.\"}", - Body2 = lists:concat(["{\"body\":\"This is a body it should fail", - "because there are too many characters.\"}"]), - DocBeg = "--bound\r\nContent-Disposition: form-data; name=\"_doc\"\r\n\r\n", - DocRest = lists:concat(["\r\n--bound\r\nContent-Disposition:", - "form-data; name=\"_attachments\"; filename=\"file.txt\"\r\n", - "Content-Type: text/plain\r\n\r\ncontents of file.txt\r\n\r\n", - "--bound--"]), - Doc1 = lists:concat([DocBeg, Body1, DocRest]), - Doc2 = lists:concat([DocBeg, Body2, DocRest]), - {ok, _, _, ResultBody} = test_request:post(Url ++ "/" ++ "accept", - [?CONTENT_MULTI_FORM, ?AUTH, Referer], Doc1), - {[Msg | _]} = ?JSON_DECODE(ResultBody), - {ok, _, _, ResultBody1} = test_request:post(Url ++ "/" ++ "fail", - [?CONTENT_MULTI_FORM, ?AUTH, Referer], Doc2), - {[Msg1 | _]} = ?JSON_DECODE(ResultBody1), - [ - ?_assertEqual({<<"ok">>, true}, Msg), - ?_assertEqual({<<"error">>, <<"document_too_large">>}, Msg1) - ]. diff --git a/src/chttpd/test/chttpd_db_test.erl b/src/chttpd/test/chttpd_db_test.erl deleted file mode 100644 index 2708aa033..000000000 --- a/src/chttpd/test/chttpd_db_test.erl +++ /dev/null @@ -1,437 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(chttpd_db_test). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(USER, "chttpd_db_test_admin"). --define(PASS, "pass"). --define(AUTH, {basic_auth, {?USER, ?PASS}}). --define(CONTENT_JSON, {"Content-Type", "application/json"}). --define(DESTHEADER1, {"Destination", "foo%E5%95%8Abar"}). --define(DESTHEADER2, {"Destination", "foo%2Fbar%23baz%3Fpow%3Afiz"}). --define(FIXTURE_TXT, ?ABS_PATH(?FILE)). --define(i2l(I), integer_to_list(I)). - -setup() -> - Hashed = couch_passwords:hash_admin_password(?PASS), - ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), - TmpDb = ?tempdb(), - Addr = config:get("chttpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(chttpd, port), - Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]), - create_db(Url), - Url. - -teardown(Url) -> - delete_db(Url), - ok = config:delete("admins", ?USER, _Persist=false). - -create_db(Url) -> - {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), - ?assert(Status =:= 201 orelse Status =:= 202). - - -create_doc(Url, Id) -> - test_request:put(Url ++ "/" ++ Id, - [?CONTENT_JSON, ?AUTH], "{\"mr\": \"rockoartischocko\"}"). - -delete_db(Url) -> - {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). - -all_test_() -> - { - "chttpd db tests", - { - setup, - fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_return_ok_true_on_bulk_update/1, - fun should_return_ok_true_on_ensure_full_commit/1, - fun should_return_404_for_ensure_full_commit_on_no_db/1, - fun should_accept_live_as_an_alias_for_continuous/1, - fun should_return_404_for_delete_att_on_notadoc/1, - fun should_return_409_for_del_att_without_rev/1, - fun should_return_200_for_del_att_with_rev/1, - fun should_return_409_for_put_att_nonexistent_rev/1, - fun should_return_update_seq_when_set_on_all_docs/1, - fun should_not_return_update_seq_when_unset_on_all_docs/1, - fun should_return_correct_id_on_doc_copy/1, - fun should_return_400_for_bad_engine/1, - fun should_succeed_on_all_docs_with_queries_keys/1, - fun should_succeed_on_all_docs_with_queries_limit_skip/1, - fun should_succeed_on_all_docs_with_multiple_queries/1, - fun should_succeed_on_design_docs_with_queries_keys/1, - fun should_succeed_on_design_docs_with_queries_limit_skip/1, - fun should_succeed_on_design_docs_with_multiple_queries/1, - fun should_succeed_on_local_docs_with_queries_keys/1, - fun should_succeed_on_local_docs_with_queries_limit_skip/1, - fun should_succeed_on_local_docs_with_multiple_queries/1 - ] - } - } - }. - - -should_return_ok_true_on_bulk_update(Url) -> - ?_assertEqual(true, - begin - {ok, _, _, Body} = create_doc(Url, "testdoc"), - {Json} = ?JSON_DECODE(Body), - Ref = couch_util:get_value(<<"rev">>, Json, undefined), - NewDoc = "{\"docs\": [{\"_rev\": \"" ++ ?b2l(Ref) ++ "\", \"_id\": \"testdoc\"}]}", - {ok, _, _, ResultBody} = test_request:post(Url ++ "/_bulk_docs/", - [?CONTENT_JSON, ?AUTH], NewDoc), - ResultJson = ?JSON_DECODE(ResultBody), - {InnerJson} = lists:nth(1, ResultJson), - couch_util:get_value(<<"ok">>, InnerJson, undefined) - end). - - -should_return_ok_true_on_ensure_full_commit(Url0) -> - ?_test(begin - Url = Url0 ++ "/_ensure_full_commit", - {ok, RC, _, Body} = test_request:post(Url, [?CONTENT_JSON, ?AUTH], []), - {Json} = ?JSON_DECODE(Body), - ?assertEqual(201, RC), - ?assert(couch_util:get_value(<<"ok">>, Json)) - end). - - -should_return_404_for_ensure_full_commit_on_no_db(Url0) -> - ?_test(begin - Url = Url0 ++ "-missing-db" ++ "/_ensure_full_commit", - {ok, RC, _, Body} = test_request:post(Url, [?CONTENT_JSON, ?AUTH], []), - {Json} = ?JSON_DECODE(Body), - ?assertEqual(404, RC), - ?assertEqual(<<"not_found">>, couch_util:get_value(<<"error">>, Json)) - end). - - -should_accept_live_as_an_alias_for_continuous(Url) -> - GetLastSeq = fun(Bin) -> - Parts = binary:split(Bin, <<"\n">>, [global]), - Filtered = [P || P <- Parts, size(P) > 0], - LastSeqBin = lists:last(Filtered), - {Result} = try ?JSON_DECODE(LastSeqBin) of - Data -> Data - catch - _:_ -> - ?assert(false) % should not happen, abort - end, - couch_util:get_value(<<"last_seq">>, Result, undefined) - end, - ?_test(begin - {ok, _, _, ResultBody1} = - test_request:get(Url ++ "/_changes?feed=live&timeout=1", [?AUTH]), - LastSeq1 = GetLastSeq(ResultBody1), - - {ok, _, _, _} = create_doc(Url, "testdoc2"), - {ok, _, _, ResultBody2} = - test_request:get(Url ++ "/_changes?feed=live&timeout=1", [?AUTH]), - LastSeq2 = GetLastSeq(ResultBody2), - - ?assertNotEqual(LastSeq1, LastSeq2) - end). - - -should_return_404_for_delete_att_on_notadoc(Url) -> - ?_test(begin - {ok, RC, _, RespBody} = test_request:delete( - Url ++ "/notadoc/att.pdf", - [?CONTENT_JSON, ?AUTH], - [] - ), - ?assertEqual(404, RC), - ?assertEqual( - {[{<<"error">>,<<"not_found">>}, - {<<"reason">>,<<"missing">>}]}, - jiffy:decode(RespBody) - ), - {ok, RC1, _, _} = test_request:get( - Url ++ "/notadoc", - [?CONTENT_JSON, ?AUTH], - [] - ), - ?assertEqual(404, RC1) - end). - - -should_return_409_for_del_att_without_rev(Url) -> - ?_test(begin - {ok, RC, _, _} = test_request:put( - Url ++ "/testdoc3", - [?CONTENT_JSON, ?AUTH], - jiffy:encode(attachment_doc()) - ), - ?assertEqual(201, RC), - - {ok, RC1, _, _} = test_request:delete( - Url ++ "/testdoc3/file.erl", - [?CONTENT_JSON, ?AUTH], - [] - ), - ?assertEqual(409, RC1) - end). - - -should_return_200_for_del_att_with_rev(Url) -> - ?_test(begin - {ok, RC, _Headers, RespBody} = test_request:put( - Url ++ "/testdoc4", - [?CONTENT_JSON, ?AUTH], - jiffy:encode(attachment_doc()) - ), - ?assertEqual(201, RC), - - {ResultJson} = ?JSON_DECODE(RespBody), - Rev = couch_util:get_value(<<"rev">>, ResultJson, undefined), - - {ok, RC1, _, _} = test_request:delete( - Url ++ "/testdoc4/file.erl?rev=" ++ Rev, - [?CONTENT_JSON, ?AUTH], - [] - ), - ?assertEqual(200, RC1) - end). - - -should_return_409_for_put_att_nonexistent_rev(Url) -> - ?_test(begin - {ok, RC, _Headers, RespBody} = test_request:put( - Url ++ "/should_return_404/file.erl?rev=1-000", - [?CONTENT_JSON, ?AUTH], - jiffy:encode(attachment_doc()) - ), - ?assertEqual(409, RC), - ?assertMatch({[ - {<<"error">>,<<"not_found">>}, - {<<"reason">>,<<"missing_rev">>}]}, - ?JSON_DECODE(RespBody)) - end). - - -should_return_update_seq_when_set_on_all_docs(Url) -> - ?_test(begin - [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)], - {ok, RC, _, RespBody} = test_request:get(Url ++ "/_all_docs/" - ++ "?update_seq=true&keys=[\"testdoc1\"]",[?CONTENT_JSON, ?AUTH]), - ?assertEqual(200, RC), - {ResultJson} = ?JSON_DECODE(RespBody), - ?assertNotEqual(undefined, - couch_util:get_value(<<"update_seq">>, ResultJson)), - ?assertNotEqual(undefined, - couch_util:get_value(<<"offset">>, ResultJson)) - end). - - -should_not_return_update_seq_when_unset_on_all_docs(Url) -> - ?_test(begin - [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)], - {ok, RC, _, RespBody} = test_request:get(Url ++ "/_all_docs/" - ++ "?update_seq=false&keys=[\"testdoc1\"]",[?CONTENT_JSON, ?AUTH]), - ?assertEqual(200, RC), - {ResultJson} = ?JSON_DECODE(RespBody), - ?assertEqual(undefined, - couch_util:get_value(<<"update_seq">>, ResultJson)), - ?assertNotEqual(undefined, - couch_util:get_value(<<"offset">>, ResultJson)) - end). - - -should_return_correct_id_on_doc_copy(Url) -> - ?_test(begin - {ok, _, _, _} = create_doc(Url, "testdoc"), - {_, _, _, ResultBody1} = test_request:copy(Url ++ "/testdoc/", - [?CONTENT_JSON, ?AUTH, ?DESTHEADER1]), - {ResultJson1} = ?JSON_DECODE(ResultBody1), - Id1 = couch_util:get_value(<<"id">>, ResultJson1), - - {_, _, _, ResultBody2} = test_request:copy(Url ++ "/testdoc/", - [?CONTENT_JSON, ?AUTH, ?DESTHEADER2]), - {ResultJson2} = ?JSON_DECODE(ResultBody2), - Id2 = couch_util:get_value(<<"id">>, ResultJson2), - [ - ?assertEqual(<<102,111,111,229,149,138,98,97,114>>, Id1), - ?assertEqual(<<"foo/bar#baz?pow:fiz">>, Id2) - ] - end). - - -attachment_doc() -> - {ok, Data} = file:read_file(?FIXTURE_TXT), - {[ - {<<"_attachments">>, {[ - {<<"file.erl">>, {[ - {<<"content_type">>, <<"text/plain">>}, - {<<"data">>, base64:encode(Data)} - ]} - }]}} - ]}. - - -should_return_400_for_bad_engine(_) -> - ?_test(begin - TmpDb = ?tempdb(), - Addr = config:get("chttpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(chttpd, port), - BaseUrl = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]), - Url = BaseUrl ++ "?engine=cowabunga", - {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), - ?assertEqual(400, Status) - end). - - -should_succeed_on_all_docs_with_queries_keys(Url) -> - ?_test(begin - [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], - QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]}]}", - {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/queries/", - [?CONTENT_JSON, ?AUTH], QueryDoc), - ?assertEqual(200, RC), - {ResultJson} = ?JSON_DECODE(RespBody), - ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), - {InnerJson} = lists:nth(1, ResultJsonBody), - ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson))) - end). - - -should_succeed_on_all_docs_with_queries_limit_skip(Url) -> - ?_test(begin - [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], - QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}", - {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/queries/", - [?CONTENT_JSON, ?AUTH], QueryDoc), - ?assertEqual(200, RC), - {ResultJson} = ?JSON_DECODE(RespBody), - ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), - {InnerJson} = lists:nth(1, ResultJsonBody), - ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)), - ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson))) - end). - - -should_succeed_on_all_docs_with_multiple_queries(Url) -> - ?_test(begin - [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], - QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]}, - {\"limit\": 5, \"skip\": 2}]}", - {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/queries/", - [?CONTENT_JSON, ?AUTH], QueryDoc), - ?assertEqual(200, RC), - {ResultJson} = ?JSON_DECODE(RespBody), - ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), - {InnerJson1} = lists:nth(1, ResultJsonBody), - ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))), - {InnerJson2} = lists:nth(2, ResultJsonBody), - ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)), - ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2))) - end). - - -should_succeed_on_design_docs_with_queries_keys(Url) -> - ?_test(begin - [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], - QueryDoc = "{\"queries\": [{\"keys\": [ \"_design/ddoc3\", - \"_design/ddoc8\"]}]}", - {ok, RC, _, RespBody} = test_request:post(Url ++ - "/_design_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc), - ?assertEqual(200, RC), - {ResultJson} = ?JSON_DECODE(RespBody), - ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), - {InnerJson} = lists:nth(1, ResultJsonBody), - ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson))) - end). - - -should_succeed_on_design_docs_with_queries_limit_skip(Url) -> - ?_test(begin - [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], - QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}", - {ok, RC, _, RespBody} = test_request:post(Url ++ - "/_design_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc), - ?assertEqual(200, RC), - {ResultJson} = ?JSON_DECODE(RespBody), - ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), - {InnerJson} = lists:nth(1, ResultJsonBody), - ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)), - ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson))) - end). - - -should_succeed_on_design_docs_with_multiple_queries(Url) -> - ?_test(begin - [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], - QueryDoc = "{\"queries\": [{\"keys\": [ \"_design/ddoc3\", - \"_design/ddoc8\"]}, {\"limit\": 5, \"skip\": 2}]}", - {ok, RC, _, RespBody} = test_request:post(Url ++ - "/_design_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc), - ?assertEqual(200, RC), - {ResultJson} = ?JSON_DECODE(RespBody), - ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), - {InnerJson1} = lists:nth(1, ResultJsonBody), - ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))), - {InnerJson2} = lists:nth(2, ResultJsonBody), - ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)), - ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2))) - end). - - -should_succeed_on_local_docs_with_queries_keys(Url) -> - ?_test(begin - [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], - QueryDoc = "{\"queries\": [{\"keys\": - [ \"_local/doc3\", \"_local/doc8\"]}]}", - {ok, RC, _, RespBody} = test_request:post(Url ++ "/_local_docs/queries/", - [?CONTENT_JSON, ?AUTH], QueryDoc), - ?assertEqual(200, RC), - {ResultJson} = ?JSON_DECODE(RespBody), - ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), - {InnerJson} = lists:nth(1, ResultJsonBody), - ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson))) - end). - - -should_succeed_on_local_docs_with_queries_limit_skip(Url) -> - ?_test(begin - [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], - QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}", - {ok, RC, _, RespBody} = test_request:post(Url ++ - "/_local_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc), - ?assertEqual(200, RC), - {ResultJson} = ?JSON_DECODE(RespBody), - ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), - {InnerJson} = lists:nth(1, ResultJsonBody), - ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson))) - end). - - -should_succeed_on_local_docs_with_multiple_queries(Url) -> - ?_test(begin - [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], - QueryDoc = "{\"queries\": [{\"keys\": [ \"_local/doc3\", - \"_local/doc8\"]}, {\"limit\": 5, \"skip\": 2}]}", - {ok, RC, _, RespBody} = test_request:post(Url ++ - "/_local_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc), - ?assertEqual(200, RC), - {ResultJson} = ?JSON_DECODE(RespBody), - ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), - {InnerJson1} = lists:nth(1, ResultJsonBody), - ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))), - {InnerJson2} = lists:nth(2, ResultJsonBody), - ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2))) - end). diff --git a/src/chttpd/test/chttpd_dbs_info_test.erl b/src/chttpd/test/chttpd_dbs_info_test.erl deleted file mode 100644 index 5b61d8831..000000000 --- a/src/chttpd/test/chttpd_dbs_info_test.erl +++ /dev/null @@ -1,169 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(chttpd_dbs_info_test). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(USER, "chttpd_db_test_admin"). --define(PASS, "pass"). --define(AUTH, {basic_auth, {?USER, ?PASS}}). --define(CONTENT_JSON, {"Content-Type", "application/json"}). - - -setup() -> - Hashed = couch_passwords:hash_admin_password(?PASS), - ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), - Addr = config:get("chttpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(chttpd, port), - Url = lists:concat(["http://", Addr, ":", Port, "/"]), - Db1Url = lists:concat([Url, "db1"]), - create_db(Db1Url), - Db2Url = lists:concat([Url, "db2"]), - create_db(Db2Url), - Url. - -teardown(Url) -> - Db1Url = lists:concat([Url, "db1"]), - Db2Url = lists:concat([Url, "db2"]), - delete_db(Db1Url), - delete_db(Db2Url), - ok = config:delete("admins", ?USER, _Persist=false). - -create_db(Url) -> - {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), - ?assert(Status =:= 201 orelse Status =:= 202). - -delete_db(Url) -> - {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). - -dbs_info_test_() -> - { - "chttpd dbs info tests", - { - setup, - fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_return_error_for_get_db_info/1, - fun should_return_dbs_info_for_single_db/1, - fun should_return_dbs_info_for_multiple_dbs/1, - fun should_return_error_for_exceeded_keys/1, - fun should_return_error_for_missing_keys/1, - fun should_return_dbs_info_for_dbs_with_mixed_state/1 - ] - } - } - }. - - -should_return_error_for_get_db_info(Url) -> - ?_test(begin - {ok, Code, _, ResultBody} = test_request:get(Url ++ "/_dbs_info?" - ++ "keys=[\"db1\"]", [?CONTENT_JSON, ?AUTH]), - {Body} = jiffy:decode(ResultBody), - [ - ?assertEqual(<<"method_not_allowed">>, - couch_util:get_value(<<"error">>, Body)), - ?assertEqual(405, Code) - ] - end). - - -should_return_dbs_info_for_single_db(Url) -> - ?_test(begin - NewDoc = "{\"keys\": [\"db1\"]}", - {ok, _, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/", - [?CONTENT_JSON, ?AUTH], NewDoc), - BodyJson = jiffy:decode(ResultBody), - {Db1Data} = lists:nth(1, BodyJson), - [ - ?assertEqual(<<"db1">>, - couch_util:get_value(<<"key">>, Db1Data)), - ?assertNotEqual(undefined, - couch_util:get_value(<<"info">>, Db1Data)) - ] - end). - - -should_return_dbs_info_for_multiple_dbs(Url) -> - ?_test(begin - NewDoc = "{\"keys\": [\"db1\", \"db2\"]}", - {ok, _, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/", - [?CONTENT_JSON, ?AUTH], NewDoc), - BodyJson = jiffy:decode(ResultBody), - {Db1Data} = lists:nth(1, BodyJson), - {Db2Data} = lists:nth(2, BodyJson), - [ - ?assertEqual(<<"db1">>, - couch_util:get_value(<<"key">>, Db1Data)), - ?assertNotEqual(undefined, - couch_util:get_value(<<"info">>, Db1Data)), - ?assertEqual(<<"db2">>, - couch_util:get_value(<<"key">>, Db2Data)), - ?assertNotEqual(undefined, - couch_util:get_value(<<"info">>, Db2Data)) - ] - end). - - -should_return_error_for_exceeded_keys(Url) -> - ?_test(begin - NewDoc = "{\"keys\": [\"db1\", \"db2\"]}", - ok = config:set("chttpd", "max_db_number_for_dbs_info_req", "1"), - {ok, Code, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/", - [?CONTENT_JSON, ?AUTH], NewDoc), - {Body} = jiffy:decode(ResultBody), - ok = config:delete("chttpd", "max_db_number_for_dbs_info_req"), - [ - ?assertEqual(<<"bad_request">>, - couch_util:get_value(<<"error">>, Body)), - ?assertEqual(400, Code) - ] - end). - - -should_return_error_for_missing_keys(Url) -> - ?_test(begin - NewDoc = "{\"missingkeys\": [\"db1\", \"db2\"]}", - {ok, Code, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/", - [?CONTENT_JSON, ?AUTH], NewDoc), - {Body} = jiffy:decode(ResultBody), - [ - ?assertEqual(<<"bad_request">>, - couch_util:get_value(<<"error">>, Body)), - ?assertEqual(400, Code) - ] - end). - - -should_return_dbs_info_for_dbs_with_mixed_state(Url) -> - ?_test(begin - NewDoc = "{\"keys\": [\"db1\", \"noexisteddb\"]}", - {ok, _, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/", - [?CONTENT_JSON, ?AUTH], NewDoc), - Json = jiffy:decode(ResultBody), - {Db1Data} = lists:nth(1, Json), - {Db2Data} = lists:nth(2, Json), - [ - ?assertEqual( - <<"db1">>, couch_util:get_value(<<"key">>, Db1Data)), - ?assertNotEqual(undefined, - couch_util:get_value(<<"info">>, Db1Data)), - ?assertEqual( - <<"noexisteddb">>, couch_util:get_value(<<"key">>, Db2Data)), - ?assertEqual(undefined, couch_util:get_value(<<"info">>, Db2Data)) - ] - end). diff --git a/src/chttpd/test/chttpd_error_info_tests.erl b/src/chttpd/test/chttpd_error_info_tests.erl deleted file mode 100644 index fdb015c08..000000000 --- a/src/chttpd/test/chttpd_error_info_tests.erl +++ /dev/null @@ -1,168 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(chttpd_error_info_tests). - --include_lib("eunit/include/eunit.hrl"). - - -error_info_test() -> - Error = <<"error">>, - Reason = <<"reason">>, - ArgResult = [ - { - bad_request, - {400, <<"bad_request">>, <<>>} - }, - { - {bad_request, Reason}, - {400, <<"bad_request">>, Reason} - }, - { - {bad_request, "error", "reason"}, - {400, Error, Reason} - }, - { - {query_parse_error, Reason}, - {400, <<"query_parse_error">>, Reason} - }, - { - database_does_not_exist, - {404, <<"not_found">>, <<"Database does not exist.">>} - }, - { - not_found, - {404, <<"not_found">>, <<"missing">>} - }, - { - {not_found, Reason}, - {404, <<"not_found">>, Reason} - }, - { - {not_acceptable, Reason}, - {406, <<"not_acceptable">>, Reason} - }, - { - conflict, - {409, <<"conflict">>, <<"Document update conflict.">>} - }, - { - {conflict, Reason}, - %% yes, the reason is ignored - {409, <<"conflict">>, <<"Document update conflict.">>} - }, - { - {forbidden, Reason}, - {403, <<"forbidden">>, Reason} - }, - { - {forbidden, Error, Reason}, - {403, Error, Reason} - }, - { - {unauthorized, Reason}, - {401, <<"unauthorized">>, Reason} - }, - { - file_exists, - {412, <<"file_exists">>, - <<"The database could not be created, the file already exists.">>} - }, - { - {error, {nodedown, Reason}}, {412, <<"nodedown">>, Reason} - }, - { - {maintenance_mode, Reason}, - {412, <<"nodedown">>, Reason} - }, - { - {maintenance_mode, nil, Reason}, - {412, <<"nodedown">>, Reason} - }, - { - {w_quorum_not_met, Reason}, - {500, <<"write_quorum_not_met">>, Reason} - }, - { - request_uri_too_long, - {414, <<"too_long">>, <<"the request uri is too long">>} - }, - { - {bad_ctype, Reason}, - {415, <<"bad_content_type">>, Reason} - }, - { - requested_range_not_satisfiable, - {416, <<"requested_range_not_satisfiable">>, - <<"Requested range not satisfiable">>} - }, - { - {error, {illegal_database_name, <<"foo">>}}, - {400, <<"illegal_database_name">>, - <<"Name: 'foo'. Only lowercase characters (a-z), digits (0-9), and any of" - " the characters _, $, (, ), +, -, and / are allowed." - " Must begin with a letter.">>} - }, - { - {Error, {illegal_docid,1}}, - {400, <<"illegal_docid">>, 1} - }, - { - {missing_stub, Reason}, - {412, <<"missing_stub">>, Reason} - }, - { - request_entity_too_large, - {413, <<"too_large">>, <<"the request entity is too large">>} - }, - { - not_implemented, - {501, <<"not_implemented">>, - <<"this feature is not yet implemented">>} - }, - { - timeout, - {500, <<"timeout">>, - <<"The request could not be processed in a reasonable" - " amount of time.">>} - }, - { - {timeout, Error}, - {500, <<"timeout">>, - <<"The request could not be processed in a reasonable" - " amount of time.">>} - }, - { - {Error, null}, - {500, <<"unknown_error">>, Error} - }, - { - {Error, Reason}, - {500, Error, Reason} - }, - { - {Error, nil, [{}]}, - {500, <<"unknown_error">>, Error} - }, - { - {Error, Reason, [{}]}, - {500, Error, Reason} - }, - { - Error, - {500, <<"unknown_error">>, Error} - } - ], - - lists:foreach(fun({Arg, Result}) -> - ?assertEqual(Result, chttpd:error_info(Arg)) - end, ArgResult). diff --git a/src/chttpd/test/chttpd_handlers_tests.erl b/src/chttpd/test/chttpd_handlers_tests.erl deleted file mode 100644 index f3e8f5dcd..000000000 --- a/src/chttpd/test/chttpd_handlers_tests.erl +++ /dev/null @@ -1,87 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(chttpd_handlers_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - - -setup() -> - Addr = config:get("chttpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(chttpd, port), - BaseUrl = lists:concat(["http://", Addr, ":", Port]), - BaseUrl. - -teardown(_Url) -> - ok. - - -replicate_test_() -> - { - "_replicate", - { - setup, - fun chttpd_test_util:start_couch/0, - fun chttpd_test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_escape_dbname_on_replicate/1 - ] - } - } - }. - - -should_escape_dbname_on_replicate(Url) -> - ?_test( - begin - UrlBin = ?l2b(Url), - Request = couch_util:json_encode({[ - {<<"source">>, <>}, - {<<"target">>, <<"bar/baz">>}, - {<<"create_target">>, true} - ]}), - {ok, 200, _, Body} = request_replicate(Url ++ "/_replicate", Request), - JSON = couch_util:json_decode(Body), - - Source = json_value(JSON, [<<"source">>]), - Target = json_value(JSON, [<<"target">>, <<"url">>]), - ?assertEqual(<>, Source), - ?assertEqual(<>, Target) - end). - - -json_value(JSON, Keys) -> - couch_util:get_nested_json_value(JSON, Keys). - -request_replicate(Url, Body) -> - Headers = [{"Content-Type", "application/json"}], - Handler = {chttpd_misc, handle_replicate_req}, - request(post, Url, Headers, Body, Handler, fun(Req) -> - chttpd:send_json(Req, 200, get(post_body)) - end). - -request(Method, Url, Headers, Body, {M, F}, MockFun) -> - meck:new(M, [passthrough, non_strict]), - try - meck:expect(M, F, MockFun), - Result = test_request:Method(Url, Headers, Body), - ?assert(meck:validate(M)), - Result - catch Kind:Reason -> - {Kind, Reason} - after - meck:unload(M) - end. diff --git a/src/chttpd/test/chttpd_open_revs_error_test.erl b/src/chttpd/test/chttpd_open_revs_error_test.erl deleted file mode 100644 index d53d370f8..000000000 --- a/src/chttpd/test/chttpd_open_revs_error_test.erl +++ /dev/null @@ -1,112 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(chttpd_open_revs_error_test). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(USER, "chttpd_db_test_admin"). --define(PASS, "pass"). --define(AUTH, {basic_auth, {?USER, ?PASS}}). --define(CONTENT_JSON, {"Content-Type", "application/json"}). --define(CONTENT_MULTI_FORM, {"Content-Type", - "multipart/form-data;boundary=\"bound\""}). - -setup() -> - Hashed = couch_passwords:hash_admin_password(?PASS), - ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), - TmpDb = ?tempdb(), - Addr = config:get("chttpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(chttpd, port), - Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]), - mock(fabric), - create_db(Url), - Url. - -teardown(Url) -> - delete_db(Url), - (catch meck:unload(fabric)), - ok = config:delete("admins", ?USER, _Persist=false). - -create_db(Url) -> - {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), - ?assert(Status =:= 201 orelse Status =:= 202). - - -create_doc(Url, Id) -> - test_request:put(Url ++ "/" ++ Id, - [?CONTENT_JSON, ?AUTH], "{\"mr\": \"rockoartischocko\"}"). - -delete_db(Url) -> - {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). - -open_revs_error_test_() -> - { - "open revs error tests", - { - setup, - fun chttpd_test_util:start_couch/0, - fun chttpd_test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_return_503_error_for_open_revs_get/1, - fun should_return_503_error_for_open_revs_post_form/1 - ] - } - } - }. - -should_return_503_error_for_open_revs_get(Url) -> - {ok, _, _, Body} = create_doc(Url, "testdoc"), - {Json} = ?JSON_DECODE(Body), - Ref = couch_util:get_value(<<"rev">>, Json, undefined), - mock_open_revs({error, all_workers_died}), - {ok, Code, _, _} = test_request:get(Url ++ - "/testdoc?rev=" ++ ?b2l(Ref), [?AUTH]), - ?_assertEqual(503, Code). - -should_return_503_error_for_open_revs_post_form(Url) -> - Port = mochiweb_socket_server:get(chttpd, port), - Host = lists:concat([ "http://127.0.0.1:", Port]), - Referer = {"Referer", Host}, - Body1 = "{\"body\":\"This is a body.\"}", - DocBeg = "--bound\r\nContent-Disposition: form-data; name=\"_doc\"\r\n\r\n", - DocRev = "--bound\r\nContent-Disposition: form-data; name=\"_rev\"\r\n\r\n", - DocRest = "\r\n--bound\r\nContent-Disposition:" - "form-data; name=\"_attachments\"; filename=\"file.txt\"\r\n" - "Content-Type: text/plain\r\n\r\ncontents of file.txt\r\n\r\n" - "--bound--", - Doc1 = lists:concat([DocBeg, Body1, DocRest]), - {ok, _, _, ResultBody} = test_request:post(Url ++ "/" ++ "RevDoc", - [?CONTENT_MULTI_FORM, ?AUTH, Referer], Doc1), - {Json} = ?JSON_DECODE(ResultBody), - Ref = couch_util:get_value(<<"rev">>, Json, undefined), - Doc2 = lists:concat([DocRev, ?b2l(Ref) , DocRest]), - - mock_open_revs({error, all_workers_died}), - {ok, Code, _, ResultBody1} = test_request:post(Url ++ "/" ++ "RevDoc", - [?CONTENT_MULTI_FORM, ?AUTH, Referer], Doc2), - {Json1} = ?JSON_DECODE(ResultBody1), - ErrorMessage = couch_util:get_value(<<"error">>, Json1), - [ - ?_assertEqual(503, Code), - ?_assertEqual(<<"service unvailable">>, ErrorMessage) - ]. - -mock_open_revs(RevsResp) -> - ok = meck:expect(fabric, open_revs, fun(_, _, _, _) -> RevsResp end). - -mock(fabric) -> - ok = meck:new(fabric, [passthrough]). diff --git a/src/chttpd/test/chttpd_plugin_tests.erl b/src/chttpd/test/chttpd_plugin_tests.erl deleted file mode 100644 index 36572a419..000000000 --- a/src/chttpd/test/chttpd_plugin_tests.erl +++ /dev/null @@ -1,187 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(chttpd_plugin_tests). - --export([ - before_request/1, - after_request/2, - handle_error/1, - before_response/4, - before_serve_file/5 -]). - --export([ %% couch_epi_plugin behaviour - app/0, - providers/0, - services/0, - data_providers/0, - data_subscriptions/0, - processes/0, - notify/3 -]). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - -%% couch_epi_plugin behaviour - -app() -> test_app. -providers() -> [{chttpd, ?MODULE}]. -services() -> []. -data_providers() -> []. -data_subscriptions() -> []. -processes() -> []. -notify(_, _, _) -> ok. - - -setup() -> - couch_tests:setup([ - couch_epi_dispatch:dispatch(chttpd, ?MODULE) - ]). - -teardown(Ctx) -> - couch_tests:teardown(Ctx). - -before_request({true, Id}) -> [{true, [{before_request, Id}]}]; -before_request({false, Id}) -> [{false, Id}]; -before_request({fail, Id}) -> throw({before_request, Id}). - -after_request({true, Id}, A) -> [{true, [{after_request, Id}]}, A]; -after_request({false, Id}, A) -> [{false, Id}, A]; -after_request({fail, Id}, _A) -> throw({after_request, Id}). - -handle_error({true, Id}) -> [{true, [{handle_error, Id}]}]; -handle_error({false, Id}) -> [{false, Id}]; -handle_error({fail, Id}) -> throw({handle_error, Id}). - -before_response({true, Id}, A, B, C) -> - [{true, [{before_response, Id}]}, A, B, C]; -before_response({false, Id}, A, B, C) -> - [{false, Id}, A, B, C]; -before_response({fail, Id}, _A, _B, _C) -> - throw({before_response, Id}). - -before_serve_file({true, Id}, A, B, C, D) -> - [{true, [{before_serve_file, Id}]}, A, B, C, D]; -before_serve_file({false, Id}, A, B, C, D) -> - [{false, Id}, A, B, C, D]; -before_serve_file({fail, _Id}, _A, _B, _C, _D) -> - throw(before_serve_file). - -callback_test_() -> - { - "callback tests", - { - setup, fun setup/0, fun teardown/1, - [ - fun before_request_match/0, - fun before_request_no_match/0, - fun before_request_throw/0, - - fun after_request_match/0, - fun after_request_no_match/0, - fun after_request_throw/0, - - fun handle_error_match/0, - fun handle_error_no_match/0, - fun handle_error_throw/0, - - fun before_response_match/0, - fun before_response_no_match/0, - fun before_response_throw/0, - - fun before_serve_file_match/0, - fun before_serve_file_no_match/0, - fun before_serve_file_throw/0 - ] - } - }. - - -before_request_match() -> - ?assertEqual( - {ok, {true, [{before_request, foo}]}}, - chttpd_plugin:before_request({true, foo})). - -before_request_no_match() -> - ?assertEqual( - {ok, {false, foo}}, - chttpd_plugin:before_request({false, foo})). - -before_request_throw() -> - ?assertThrow( - {before_request, foo}, - chttpd_plugin:before_request({fail, foo})). - - -after_request_match() -> - ?assertEqual( - {ok, bar}, - chttpd_plugin:after_request({true, foo}, bar)). - -after_request_no_match() -> - ?assertEqual( - {ok, bar}, - chttpd_plugin:after_request({false, foo}, bar)). - -after_request_throw() -> - ?assertThrow( - {after_request, foo}, - chttpd_plugin:after_request({fail, foo}, bar)). - - -handle_error_match() -> - ?assertEqual( - {true, [{handle_error, foo}]}, - chttpd_plugin:handle_error({true, foo})). - -handle_error_no_match() -> - ?assertEqual( - {false, foo}, - chttpd_plugin:handle_error({false, foo})). - -handle_error_throw() -> - ?assertThrow( - {handle_error, foo}, - chttpd_plugin:handle_error({fail, foo})). - -before_response_match() -> - ?assertEqual( - {ok, {{true, [{before_response, foo}]}, 1, 2, 3}}, - chttpd_plugin:before_response({true, foo}, 1, 2, 3)). - -before_response_no_match() -> - ?assertEqual( - {ok, {{false, foo}, 1, 2, 3}}, - chttpd_plugin:before_response({false, foo}, 1, 2, 3)). - -before_response_throw() -> - ?assertThrow( - {before_response, foo}, - chttpd_plugin:before_response({fail, foo}, 1, 2, 3)). - - -before_serve_file_match() -> - ?assertEqual( - {ok, {{true, [{before_serve_file, foo}]}, 1, 2, 3, 4}}, - chttpd_plugin:before_serve_file({true, foo}, 1, 2, 3, 4)). - -before_serve_file_no_match() -> - ?assertEqual( - {ok, {{false, foo}, 1, 2, 3, 4}}, - chttpd_plugin:before_serve_file({false, foo}, 1, 2, 3, 4)). - -before_serve_file_throw() -> - ?assertThrow( - before_serve_file, - chttpd_plugin:before_serve_file({fail, foo}, 1, 2, 3, 4)). diff --git a/src/chttpd/test/chttpd_prefer_header_test.erl b/src/chttpd/test/chttpd_prefer_header_test.erl deleted file mode 100644 index 0f43ba437..000000000 --- a/src/chttpd/test/chttpd_prefer_header_test.erl +++ /dev/null @@ -1,112 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(chttpd_prefer_header_test). - --compile(tuple_calls). - --include_lib("couch/include/couch_db.hrl"). --include_lib("eunit/include/eunit.hrl"). - - -mock_request(ExcludeHeader) -> - Headers = mochiweb_headers:make(ExcludeHeader), - MochiReq = mochiweb_request:new(nil, 'GET', "/", {1, 1}, Headers), - MochiReq:cleanup(), - #httpd{mochi_req = MochiReq}. - - -default_headers() -> - [ - {"Cache-Control","must-revalidate"}, - {"Content-Type","application/json"}, - {"Content-Length", "100"}, - {"ETag","\"12343\""}, - {"X-Couch-Request-ID","7bd1adab86"}, - {"X-CouchDB-Body-Time","0"}, - {"Vary", "Accept-Encoding"}, - {"Server","CouchDB/2.1.0-f1a1d7f1c (Erlang OTP/19)"} - ]. - - -minimal_options_headers() -> - [ - {"Cache-Control","must-revalidate"}, - {"Content-Type","application/json"}, - {"Content-Length", "100"}, - {"ETag","\"12343\""}, - {"Vary", "Accept-Encoding"}, - {"Server","CouchDB/2.1.0-f1a1d7f1c (Erlang OTP/19)"} - ]. - - -default_no_exclude_header_test() -> - Headers = chttpd_prefer_header:maybe_return_minimal( - mock_request([]), - default_headers() - ), - ?assertEqual(default_headers(), Headers). - - -unsupported_exclude_header_test() -> - Req = mock_request([{"prefer", "Wrong"}]), - Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()), - ?assertEqual(default_headers(), Headers). - - -empty_header_test() -> - Req = mock_request([{"prefer", ""}]), - Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()), - ?assertEqual(default_headers(), Headers). - -setup() -> - ok = meck:new(config), - ok = meck:expect(config, get, fun("chttpd", "prefer_minimal", _) -> - "Cache-Control, Content-Length, Content-Type, ETag, Server, Vary" - end), - ok. - - -teardown(_) -> - meck:unload(config). - - -exclude_headers_test_() -> - { - "Test Prefer headers", - { - foreach, fun setup/0, fun teardown/1, - [ - fun minimal_options/1, - fun minimal_options_check_header_case/1, - fun minimal_options_check_header_value_case/1 - ] - } - }. - - -minimal_options(_) -> - Req = mock_request([{"Prefer", "return=minimal"}]), - Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()), - ?_assertEqual(minimal_options_headers(), Headers). - - -minimal_options_check_header_case(_) -> - Req = mock_request([{"prefer", "return=minimal"}]), - Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()), - ?_assertEqual(minimal_options_headers(), Headers). - - -minimal_options_check_header_value_case(_) -> - Req = mock_request([{"prefer", "RETURN=MINIMAL"}]), - Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()), - ?_assertEqual(minimal_options_headers(), Headers). \ No newline at end of file diff --git a/src/chttpd/test/chttpd_purge_tests.erl b/src/chttpd/test/chttpd_purge_tests.erl deleted file mode 100644 index dbd73de1f..000000000 --- a/src/chttpd/test/chttpd_purge_tests.erl +++ /dev/null @@ -1,406 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(chttpd_purge_tests). - - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - - --define(USER, "chttpd_db_test_admin"). --define(PASS, "pass"). --define(AUTH, {basic_auth, {?USER, ?PASS}}). --define(CONTENT_JSON, {"Content-Type", "application/json"}). - - -setup() -> - ok = config:set("admins", ?USER, ?PASS, _Persist=false), - TmpDb = ?tempdb(), - Addr = config:get("chttpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(chttpd, port), - Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]), - create_db(Url), - Url. - - -teardown(Url) -> - delete_db(Url), - ok = config:delete("admins", ?USER, _Persist=false). - - -create_db(Url) -> - {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), - ?assert(Status =:= 201 orelse Status =:= 202). - - -create_doc(Url, Id) -> - test_request:put(Url ++ "/" ++ Id, - [?CONTENT_JSON, ?AUTH], "{\"mr\": \"rockoartischocko\"}"). - -create_doc(Url, Id, Content) -> - test_request:put(Url ++ "/" ++ Id, - [?CONTENT_JSON, ?AUTH], "{\"mr\": \"" ++ Content ++ "\"}"). - - -create_docs(Url, Docs) -> - test_request:post(Url ++ "/_bulk_docs", - [?CONTENT_JSON, ?AUTH], ?JSON_ENCODE({[{docs, Docs}]})). - - -delete_db(Url) -> - {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). - - -purge_test_() -> - { - "chttpd db tests", - { - setup, - fun chttpd_test_util:start_couch/0, - fun chttpd_test_util:stop_couch/1, - { - foreach, - fun setup/0, - fun teardown/1, - [ - fun test_empty_purge_request/1, - fun test_ok_purge_request/1, - fun test_ok_purge_request_with_101_docid/1, - fun test_accepted_purge_request/1, - fun test_partial_purge_request/1, - fun test_mixed_purge_request/1, - fun test_overmany_ids_or_revs_purge_request/1, - fun test_exceed_limits_on_purge_infos/1, - fun should_error_set_purged_docs_limit_to0/1, - fun test_timeout_set_purged_infos_limit/1 - ] - } - } - }. - - -test_empty_purge_request(Url) -> - ?_test(begin - IdsRevs = "{}", - {ok, Status, _, ResultBody} = test_request:post(Url ++ "/_purge/", - [?CONTENT_JSON, ?AUTH], IdsRevs), - ResultJson = ?JSON_DECODE(ResultBody), - ?assert(Status =:= 201 orelse Status =:= 202), - ?assertEqual( - {[ - {<<"purge_seq">>, null}, - {<<"purged">>,{[]}} - ]}, - ResultJson - ) - end). - - -test_ok_purge_request(Url) -> - ?_test(begin - {ok, _, _, Body} = create_doc(Url, "doc1"), - {Json} = ?JSON_DECODE(Body), - Rev1 = couch_util:get_value(<<"rev">>, Json, undefined), - {ok, _, _, Body2} = create_doc(Url, "doc2"), - {Json2} = ?JSON_DECODE(Body2), - Rev2 = couch_util:get_value(<<"rev">>, Json2, undefined), - {ok, _, _, Body3} = create_doc(Url, "doc3"), - {Json3} = ?JSON_DECODE(Body3), - Rev3 = couch_util:get_value(<<"rev">>, Json3, undefined), - - IdsRevsEJson = {[ - {<<"doc1">>, [Rev1]}, - {<<"doc2">>, [Rev2]}, - {<<"doc3">>, [Rev3]} - ]}, - IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)), - - {ok, Status, _, ResultBody} = test_request:post(Url ++ "/_purge/", - [?CONTENT_JSON, ?AUTH], IdsRevs), - ResultJson = ?JSON_DECODE(ResultBody), - ?assert(Status =:= 201 orelse Status =:= 202), - ?assertEqual( - {[ - {<<"purge_seq">>, null}, - {<<"purged">>, {[ - {<<"doc1">>, [Rev1]}, - {<<"doc2">>, [Rev2]}, - {<<"doc3">>, [Rev3]} - ]}} - ]}, - ResultJson - ) - end). - - -test_ok_purge_request_with_101_docid(Url) -> - ?_test(begin - PurgedDocsNum = 101, - Docs = lists:foldl(fun(I, Acc) -> - Id = list_to_binary(integer_to_list(I)), - Doc = {[{<<"_id">>, Id}, {value, I}]}, - [Doc | Acc] - end, [], lists:seq(1, PurgedDocsNum)), - - {ok, _, _, Body} = create_docs(Url, Docs), - BodyJson = ?JSON_DECODE(Body), - - PurgeBody = lists:map(fun({DocResp}) -> - Id = couch_util:get_value(<<"id">>, DocResp, undefined), - Rev = couch_util:get_value(<<"rev">>, DocResp, undefined), - {Id, [Rev]} - end, BodyJson), - - ok = config:set("purge", "max_document_id_number", "101"), - try - {ok, Status, _, _} = test_request:post(Url ++ "/_purge/", - [?CONTENT_JSON, ?AUTH], ?JSON_ENCODE({PurgeBody})), - ?assert(Status =:= 201 orelse Status =:= 202) - after - ok = config:delete("purge", "max_document_id_number") - end - end). - - -test_accepted_purge_request(Url) -> - ?_test(begin - {ok, _, _, Body} = create_doc(Url, "doc1"), - {Json} = ?JSON_DECODE(Body), - Rev1 = couch_util:get_value(<<"rev">>, Json, undefined), - IdsRevsEJson = {[ - {<<"doc1">>, [Rev1]} - ]}, - IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)), - meck:new(fabric, [passthrough]), - meck:expect(fabric, purge_docs, - fun(_, _, _) -> {accepted,[{accepted,[{1, - <<57,27,64,134,152,18,73,243,40,1,141,214,135,104,79,188>>}]}]} - end - ), - {ok, Status, _, ResultBody} = test_request:post(Url ++ "/_purge/", - [?CONTENT_JSON, ?AUTH], IdsRevs), - ResultJson = ?JSON_DECODE(ResultBody), - meck:unload(fabric), - ?assert(Status =:= 202), - ?assertEqual( - {[ - {<<"purge_seq">>, null}, - {<<"purged">>, {[ - {<<"doc1">>, [Rev1]} - ]}} - ]}, - ResultJson - ) - end). - - -test_partial_purge_request(Url) -> - ?_test(begin - {ok, _, _, Body} = create_doc(Url, "doc1"), - {Json} = ?JSON_DECODE(Body), - Rev1 = couch_util:get_value(<<"rev">>, Json, undefined), - - NewDoc = "{\"new_edits\": false, \"docs\": [{\"_id\": \"doc1\", - \"_revisions\": {\"start\": 1, \"ids\": [\"12345\", \"67890\"]}, - \"content\": \"updated\", \"_rev\": \"" ++ ?b2l(Rev1) ++ "\"}]}", - {ok, _, _, _} = test_request:post(Url ++ "/_bulk_docs/", - [?CONTENT_JSON, ?AUTH], NewDoc), - - IdsRevsEJson = {[{<<"doc1">>, [Rev1]}]}, - IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)), - {ok, Status, _, ResultBody} = test_request:post(Url ++ "/_purge/", - [?CONTENT_JSON, ?AUTH], IdsRevs), - ResultJson = ?JSON_DECODE(ResultBody), - ?assert(Status =:= 201 orelse Status =:= 202), - ?assertEqual( - {[ - {<<"purge_seq">>, null}, - {<<"purged">>, {[ - {<<"doc1">>, [Rev1]} - ]}} - ]}, - ResultJson - ), - {ok, Status2, _, ResultBody2} = test_request:get(Url - ++ "/doc1/", [?AUTH]), - {Json2} = ?JSON_DECODE(ResultBody2), - Content = couch_util:get_value(<<"content">>, Json2, undefined), - ?assertEqual(<<"updated">>, Content), - ?assert(Status2 =:= 200) - end). - - -test_mixed_purge_request(Url) -> - ?_test(begin - {ok, _, _, Body} = create_doc(Url, "doc1"), - {Json} = ?JSON_DECODE(Body), - Rev1 = couch_util:get_value(<<"rev">>, Json, undefined), - - NewDoc = "{\"new_edits\": false, \"docs\": [{\"_id\": \"doc1\", - \"_revisions\": {\"start\": 1, \"ids\": [\"12345\", \"67890\"]}, - \"content\": \"updated\", \"_rev\": \"" ++ ?b2l(Rev1) ++ "\"}]}", - {ok, _, _, _} = test_request:post(Url ++ "/_bulk_docs/", - [?CONTENT_JSON, ?AUTH], NewDoc), - - {ok, _, _, _Body2} = create_doc(Url, "doc2", "content2"), - {ok, _, _, Body3} = create_doc(Url, "doc3", "content3"), - {Json3} = ?JSON_DECODE(Body3), - Rev3 = couch_util:get_value(<<"rev">>, Json3, undefined), - - - IdsRevsEJson = {[ - {<<"doc1">>, [Rev1]}, % partial purge - {<<"doc2">>, [Rev3, Rev1]}, % correct format, but invalid rev - {<<"doc3">>, [Rev3]} % correct format and rev - ]}, - IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)), - {ok, Status, _, Body4} = test_request:post(Url ++ "/_purge/", - [?CONTENT_JSON, ?AUTH], IdsRevs), - ResultJson = ?JSON_DECODE(Body4), - ?assert(Status =:= 201 orelse Status =:= 202), - ?assertEqual( - {[ - {<<"purge_seq">>, null}, - {<<"purged">>, {[ - {<<"doc1">>, [Rev1]}, - {<<"doc2">>, []}, - {<<"doc3">>, [Rev3]} - ]}} - ]}, - ResultJson - ), - {ok, Status2, _, Body5} = test_request:get(Url - ++ "/doc1/", [?AUTH]), - {Json5} = ?JSON_DECODE(Body5), - Content = couch_util:get_value(<<"content">>, Json5, undefined), - ?assertEqual(<<"updated">>, Content), - ?assert(Status2 =:= 200) - end). - - -test_overmany_ids_or_revs_purge_request(Url) -> - ?_test(begin - {ok, _, _, Body} = create_doc(Url, "doc1"), - {Json} = ?JSON_DECODE(Body), - Rev1 = couch_util:get_value(<<"rev">>, Json, undefined), - - NewDoc = "{\"new_edits\": false, \"docs\": [{\"_id\": \"doc1\", - \"_revisions\": {\"start\": 1, \"ids\": [\"12345\", \"67890\"]}, - \"content\": \"updated\", \"_rev\": \"" ++ ?b2l(Rev1) ++ "\"}]}", - {ok, _, _, _} = test_request:post(Url ++ "/_bulk_docs/", - [?CONTENT_JSON, ?AUTH], NewDoc), - - {ok, _, _, _Body2} = create_doc(Url, "doc2", "content2"), - {ok, _, _, Body3} = create_doc(Url, "doc3", "content3"), - {Json3} = ?JSON_DECODE(Body3), - Rev3 = couch_util:get_value(<<"rev">>, Json3, undefined), - - IdsRevsEJson = {[ - {<<"doc1">>, [Rev1]}, % partial purge - {<<"doc2">>, [Rev3, Rev1]}, % correct format, but invalid rev - {<<"doc3">>, [Rev3]} % correct format and rev - ]}, - IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)), - - % Ids larger than expected - config:set("purge", "max_document_id_number", "1"), - {ok, Status, _, Body4} = test_request:post(Url ++ "/_purge/", - [?CONTENT_JSON, ?AUTH], IdsRevs), - config:delete("purge", "max_document_id_number"), - ResultJson = ?JSON_DECODE(Body4), - ?assertEqual(400, Status), - ?assertMatch({[ - {<<"error">>,<<"bad_request">>}, - {<<"reason">>,<<"Exceeded maximum number of documents.">>}]}, - ResultJson), - - % Revs larger than expected - config:set("purge", "max_revisions_number", "1"), - {ok, Status2, _, Body5} = test_request:post(Url ++ "/_purge/", - [?CONTENT_JSON, ?AUTH], IdsRevs), - config:delete("purge", "max_revisions_number"), - ResultJson2 = ?JSON_DECODE(Body5), - ?assertEqual(400, Status2), - ?assertMatch({[ - {<<"error">>,<<"bad_request">>}, - {<<"reason">>,<<"Exceeded maximum number of revisions.">>}]}, - ResultJson2) - end). - - -test_exceed_limits_on_purge_infos(Url) -> - ?_test(begin - {ok, Status1, _, _} = test_request:put(Url ++ "/_purged_infos_limit/", - [?CONTENT_JSON, ?AUTH], "2"), - ?assert(Status1 =:= 200), - - {ok, _, _, Body} = create_doc(Url, "doc1"), - {Json} = ?JSON_DECODE(Body), - Rev1 = couch_util:get_value(<<"rev">>, Json, undefined), - {ok, _, _, Body2} = create_doc(Url, "doc2"), - {Json2} = ?JSON_DECODE(Body2), - Rev2 = couch_util:get_value(<<"rev">>, Json2, undefined), - {ok, _, _, Body3} = create_doc(Url, "doc3"), - {Json3} = ?JSON_DECODE(Body3), - Rev3 = couch_util:get_value(<<"rev">>, Json3, undefined), - - IdsRevsEJson = {[ - {<<"doc1">>, [Rev1]}, - {<<"doc2">>, [Rev2]}, - {<<"doc3">>, [Rev3]} - ]}, - IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)), - - {ok, Status2, _, ResultBody} = test_request:post(Url ++ "/_purge/", - [?CONTENT_JSON, ?AUTH], IdsRevs), - - ResultJson = ?JSON_DECODE(ResultBody), - ?assert(Status2 =:= 201 orelse Status2 =:= 202), - ?assertEqual( - {[ - {<<"purge_seq">>, null}, - {<<"purged">>, {[ - {<<"doc1">>, [Rev1]}, - {<<"doc2">>, [Rev2]}, - {<<"doc3">>, [Rev3]} - ]}} - ]}, - ResultJson - ) - - end). - - -should_error_set_purged_docs_limit_to0(Url) -> - ?_test(begin - {ok, Status, _, _} = test_request:put(Url ++ "/_purged_infos_limit/", - [?CONTENT_JSON, ?AUTH], "0"), - ?assert(Status =:= 400) - end). - - -test_timeout_set_purged_infos_limit(Url) -> - ?_test(begin - meck:new(fabric, [passthrough]), - meck:expect(fabric, set_purge_infos_limit, fun(_, _, _) -> - {error, timeout} end), - {ok, Status, _, ResultBody} = test_request:put(Url - ++ "/_purged_infos_limit/", [?CONTENT_JSON, ?AUTH], "2"), - meck:unload(fabric), - ResultJson = ?JSON_DECODE(ResultBody), - ?assert(Status =:= 500), - ?assertMatch({[ - {<<"error">>,<<"error">>}, - {<<"reason">>,<<"timeout">>}]}, - ResultJson) - end). diff --git a/src/chttpd/test/chttpd_security_tests.erl b/src/chttpd/test/chttpd_security_tests.erl deleted file mode 100644 index 955b4ff01..000000000 --- a/src/chttpd/test/chttpd_security_tests.erl +++ /dev/null @@ -1,384 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(chttpd_security_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(USER, "chttpd_db_test_admin"). --define(PASS, "pass"). --define(AUTH, {basic_auth, {?USER, ?PASS}}). - --define(TEST_MEMBER, "test_member"). --define(TEST_MEMBER_PASS, "test_member_pass"). --define(TEST_MEMBER_AUTH, {basic_auth, {?TEST_MEMBER, ?TEST_MEMBER_PASS}}). - --define(TEST_ADMIN, "test_admin"). --define(TEST_ADMIN_PASS, "test_admin_pass"). --define(TEST_ADMIN_AUTH, {basic_auth, {?TEST_ADMIN, ?TEST_ADMIN_PASS}}). - - - --define(CONTENT_JSON, {"Content-Type", "application/json"}). --define(FIXTURE_TXT, ?ABS_PATH(?FILE)). - -setup() -> - Hashed = couch_passwords:hash_admin_password(?PASS), - Persist = false, - ok = config:set("admins", ?USER, ?b2l(Hashed), Persist), - UserDb = ?tempdb(), - TmpDb = ?tempdb(), - ok = config:set("chttpd_auth", "authentication_db", ?b2l(UserDb), Persist), - - Addr = config:get("chttpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(chttpd, port), - BaseUrl = lists:concat(["http://", Addr, ":", Port, "/"]), - Url = lists:concat([BaseUrl, ?b2l(TmpDb)]), - UsersUrl = lists:concat([BaseUrl, ?b2l(UserDb)]), - create_db(UsersUrl), - create_db(Url), - create_design_doc(Url), - create_user(UsersUrl,?TEST_MEMBER,?TEST_MEMBER_PASS,[<>]), - create_user(UsersUrl,?TEST_ADMIN,?TEST_ADMIN_PASS,[<>]), - set_security(Url), - [Url, UsersUrl]. - -teardown([Url,UsersUrl]) -> - delete_db(Url), - delete_db(UsersUrl), - ok = config:delete("admins", ?USER, _Persist=false). - -create_db(Url) -> - {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), - ?assert(Status =:= 201 orelse Status =:= 202). - -create_design_doc(Url) -> - {ok, Status, _, _} = test_request:put(lists:concat([Url, '/_design/test']), [?CONTENT_JSON, ?AUTH], - "{\"id\":\"_design/test\"}"), - ?assert(Status =:= 201 orelse Status =:= 202). - -set_security(Url) -> - SecurityUrl = lists:concat([Url, "/_security"]), - SecurityProperties = [ - {<<"admins">>,{[{<<"roles">>,[<>]}]}}, - {<<"members">>,{[{<<"roles">>,[<>]}]}} - ], - - Body = jiffy:encode({SecurityProperties}), - {ok, Status, _, _} = test_request:put(SecurityUrl, [?CONTENT_JSON, ?AUTH], Body), - ?assert(Status =:= 200). - -delete_db(Url) -> - {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). - -create_user(UsersUrl, Name, Password, Roles) -> - - Body = "{\"name\":\"" ++ Name ++ - "\",\"type\":\"user\",\"roles\":" ++ erlang:binary_to_list(jiffy:encode(Roles)) ++ ",\"password\":\"" ++ Password ++"\"}", - - Url = lists:concat([ - UsersUrl, "/org.couchdb.user:", Name]), - {ok, 201, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], Body). - - -all_test_() -> - { - "chttpd security tests", - { - setup, - fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_allow_admin_db_compaction/1, - fun should_allow_valid_password_to_create_user/1, - fun should_disallow_invalid_password_to_create_user/1, - fun should_disallow_anonymous_db_compaction/1, - fun should_disallow_db_member_db_compaction/1, - fun should_allow_db_admin_db_compaction/1, - fun should_allow_admin_view_compaction/1, - fun should_disallow_anonymous_view_compaction/1, - fun should_allow_admin_db_view_cleanup/1, - fun should_disallow_anonymous_db_view_cleanup/1, - fun should_allow_admin_purge/1, - fun should_disallow_anonymous_purge/1, - fun should_disallow_db_member_purge/1, - fun should_allow_admin_purged_infos_limit/1, - fun should_disallow_anonymous_purged_infos_limit/1, - fun should_disallow_db_member_purged_infos_limit/1 - ] - } - } - }. - -security_object_validate_test_() -> - { - "chttpd security object validate tests", - { - setup, - fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_return_ok_for_sec_obj_with_roles/1, - fun should_return_ok_for_sec_obj_with_names/1, - fun should_return_ok_for_sec_obj_with_roles_and_names/1, - fun should_return_error_for_sec_obj_with_incorrect_roles_and_names/1, - fun should_return_error_for_sec_obj_with_incorrect_roles/1, - fun should_return_error_for_sec_obj_with_incorrect_names/1 - ] - } - } - }. - -should_allow_admin_db_compaction([Url,_UsersUrl]) -> - ?_assertEqual(true, - begin - {ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact", - [?CONTENT_JSON, ?AUTH], ""), - ResultJson = ?JSON_DECODE(ResultBody), - {InnerJson} = ResultJson, - couch_util:get_value(<<"ok">>, InnerJson, undefined) - end). - -should_allow_valid_password_to_create_user([_Url, UsersUrl]) -> - UserDoc = "{\"_id\": \"org.couchdb.user:foo\", \"name\": \"foo\", - \"type\": \"user\", \"roles\": [], \"password\": \"bar\"}", - {ok, _, _, ResultBody} = test_request:post(UsersUrl, - [?CONTENT_JSON, ?AUTH], UserDoc), - ResultJson = ?JSON_DECODE(ResultBody), - {InnerJson} = ResultJson, - ?_assertEqual(true, couch_util:get_value(<<"ok">>, InnerJson)). - -should_disallow_invalid_password_to_create_user([_Url, UsersUrl]) -> - UserDoc = "{\"_id\": \"org.couchdb.user:foo\", \"name\": \"foo\", - \"type\": \"user\", \"roles\": [], \"password\": 123}", - {ok, _, _, ResultBody} = test_request:post(UsersUrl, - [?CONTENT_JSON, ?AUTH], UserDoc), - ResultJson = ?JSON_DECODE(ResultBody), - {InnerJson} = ResultJson, - ErrType = couch_util:get_value(<<"error">>, InnerJson), - ?_assertEqual(<<"forbidden">>, ErrType). - -should_disallow_anonymous_db_compaction([Url,_UsersUrl]) -> - {ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact", - [?CONTENT_JSON], ""), - ResultJson = ?JSON_DECODE(ResultBody), - {InnerJson} = ResultJson, - ErrType = couch_util:get_value(<<"error">>, InnerJson), - ?_assertEqual(<<"unauthorized">>,ErrType). - -should_disallow_db_member_db_compaction([Url,_UsersUrl]) -> - {ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact", - [?CONTENT_JSON, ?TEST_MEMBER_AUTH], ""), - ResultJson = ?JSON_DECODE(ResultBody), - {InnerJson} = ResultJson, - ErrType = couch_util:get_value(<<"error">>, InnerJson), - ?_assertEqual(<<"unauthorized">>,ErrType). - -should_allow_db_admin_db_compaction([Url,_UsersUrl]) -> - ?_assertEqual(true, - begin - {ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact", - [?CONTENT_JSON, ?TEST_ADMIN_AUTH], ""), - ResultJson = ?JSON_DECODE(ResultBody), - {InnerJson} = ResultJson, - couch_util:get_value(<<"ok">>, InnerJson, undefined) - end). - -should_allow_admin_view_compaction([Url,_UsersUrl]) -> - ?_assertEqual(true, - begin - {ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact/test", - [?CONTENT_JSON, ?AUTH], ""), - ResultJson = ?JSON_DECODE(ResultBody), - {InnerJson} = ResultJson, - couch_util:get_value(<<"ok">>, InnerJson, undefined) - end). - -should_disallow_anonymous_view_compaction([Url,_UsersUrl]) -> - {ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact/test", - [?CONTENT_JSON], ""), - ResultJson = ?JSON_DECODE(ResultBody), - {InnerJson} = ResultJson, - ErrType = couch_util:get_value(<<"error">>, InnerJson), - ?_assertEqual(<<"unauthorized">>,ErrType). - -should_allow_admin_db_view_cleanup([Url,_UsersUrl]) -> - ?_assertEqual(true, - begin - {ok, _, _, ResultBody} = test_request:post(Url ++ "/_view_cleanup", - [?CONTENT_JSON, ?AUTH], ""), - ResultJson = ?JSON_DECODE(ResultBody), - {InnerJson} = ResultJson, - couch_util:get_value(<<"ok">>, InnerJson, undefined) - end). - -should_disallow_anonymous_db_view_cleanup([Url,_UsersUrl]) -> - {ok, _, _, ResultBody} = test_request:post(Url ++ "/_view_cleanup", - [?CONTENT_JSON], ""), - ResultJson = ?JSON_DECODE(ResultBody), - {InnerJson} = ResultJson, - ErrType = couch_util:get_value(<<"error">>, InnerJson), - ?_assertEqual(<<"unauthorized">>, ErrType). - -should_allow_admin_purge([Url,_UsersUrl]) -> - ?_assertEqual(null, - begin - IdsRevs = "{}", - {ok, _, _, ResultBody} = test_request:post(Url ++ "/_purge", - [?CONTENT_JSON, ?AUTH], IdsRevs), - ResultJson = ?JSON_DECODE(ResultBody), - {InnerJson} = ResultJson, - couch_util:get_value(<<"purge_seq">>, InnerJson, undefined) - end). - -should_disallow_anonymous_purge([Url,_UsersUrl]) -> - {ok, _, _, ResultBody} = test_request:post(Url ++ "/_purge", - [?CONTENT_JSON], ""), - ResultJson = ?JSON_DECODE(ResultBody), - {InnerJson} = ResultJson, - ErrType = couch_util:get_value(<<"error">>, InnerJson), - ?_assertEqual(<<"unauthorized">>, ErrType). - -should_disallow_db_member_purge([Url,_UsersUrl]) -> - {ok, _, _, ResultBody} = test_request:post(Url ++ "/_purge", - [?CONTENT_JSON, ?TEST_MEMBER_AUTH], ""), - ResultJson = ?JSON_DECODE(ResultBody), - {InnerJson} = ResultJson, - ErrType = couch_util:get_value(<<"error">>, InnerJson), - ?_assertEqual(<<"unauthorized">>,ErrType). - -should_allow_admin_purged_infos_limit([Url,_UsersUrl]) -> - ?_assertEqual(true, - begin - {ok, _, _, ResultBody} = test_request:put(Url - ++ "/_purged_infos_limit/", [?CONTENT_JSON, ?AUTH], "2"), - ResultJson = ?JSON_DECODE(ResultBody), - {InnerJson} = ResultJson, - couch_util:get_value(<<"ok">>, InnerJson, undefined) - end). - -should_disallow_anonymous_purged_infos_limit([Url,_UsersUrl]) -> - {ok, _, _, ResultBody} = test_request:put(Url ++ "/_purged_infos_limit/", - [?CONTENT_JSON, ?TEST_MEMBER_AUTH], "2"), - ResultJson = ?JSON_DECODE(ResultBody), - {InnerJson} = ResultJson, - ErrType = couch_util:get_value(<<"error">>, InnerJson), - ?_assertEqual(<<"unauthorized">>, ErrType). - -should_disallow_db_member_purged_infos_limit([Url,_UsersUrl]) -> - {ok, _, _, ResultBody} = test_request:put(Url ++ "/_purged_infos_limit/", - [?CONTENT_JSON, ?TEST_MEMBER_AUTH], "2"), - ResultJson = ?JSON_DECODE(ResultBody), - {InnerJson} = ResultJson, - ErrType = couch_util:get_value(<<"error">>, InnerJson), - ?_assertEqual(<<"unauthorized">>,ErrType). - -should_return_ok_for_sec_obj_with_roles([Url,_UsersUrl]) -> - SecurityUrl = lists:concat([Url, "/_security"]), - SecurityProperties = [ - {<<"admins">>,{[{<<"roles">>,[<>]}]}}, - {<<"members">>,{[{<<"roles">>,[<>]}]}} - ], - - Body = jiffy:encode({SecurityProperties}), - {ok, Status, _, _} = test_request:put(SecurityUrl, - [?CONTENT_JSON, ?AUTH], Body), - ?_assertEqual(200, Status). - -should_return_ok_for_sec_obj_with_names([Url,_UsersUrl]) -> - SecurityUrl = lists:concat([Url, "/_security"]), - SecurityProperties = [ - {<<"admins">>,{[{<<"names">>,[<>]}]}}, - {<<"members">>,{[{<<"names">>,[<>]}]}} - ], - - Body = jiffy:encode({SecurityProperties}), - {ok, Status, _, _} = test_request:put(SecurityUrl, - [?CONTENT_JSON, ?AUTH], Body), - ?_assertEqual(200, Status). - -should_return_ok_for_sec_obj_with_roles_and_names([Url,_UsersUrl]) -> - SecurityUrl = lists:concat([Url, "/_security"]), - SecurityProperties = [ - {<<"admins">>, {[{<<"names">>,[<>]}, - {<<"roles">>,[<>]}]}}, - {<<"members">>,{[{<<"names">>,[<>]}, - {<<"roles">>,[<>]}]}} - ], - - Body = jiffy:encode({SecurityProperties}), - {ok, Status, _, _} = test_request:put(SecurityUrl, - [?CONTENT_JSON, ?AUTH], Body), - ?_assertEqual(200, Status). - -should_return_error_for_sec_obj_with_incorrect_roles_and_names( - [Url,_UsersUrl]) -> - SecurityUrl = lists:concat([Url, "/_security"]), - SecurityProperties = [ - {<<"admins">>,{[{<<"names">>,[123]}]}}, - {<<"members">>,{[{<<"roles">>,["foo"]}]}} - ], - - Body = jiffy:encode({SecurityProperties}), - {ok, Status, _, RespBody} = test_request:put(SecurityUrl, - [?CONTENT_JSON, ?AUTH], Body), - ResultJson = ?JSON_DECODE(RespBody), - [ - ?_assertEqual(500, Status), - ?_assertEqual({[ - {<<"error">>,<<"error">>}, - {<<"reason">>,<<"no_majority">>} - ]}, ResultJson) - ]. - -should_return_error_for_sec_obj_with_incorrect_roles([Url,_UsersUrl]) -> - SecurityUrl = lists:concat([Url, "/_security"]), - SecurityProperties = [ - {<<"admins">>,{[{<<"roles">>,[?TEST_ADMIN]}]}}, - {<<"members">>,{[{<<"roles">>,[<>]}]}} - ], - - Body = jiffy:encode({SecurityProperties}), - {ok, Status, _, RespBody} = test_request:put(SecurityUrl, - [?CONTENT_JSON, ?AUTH], Body), - ResultJson = ?JSON_DECODE(RespBody), - [ - ?_assertEqual(500, Status), - ?_assertEqual({[ - {<<"error">>,<<"error">>}, - {<<"reason">>,<<"no_majority">>} - ]}, ResultJson) - ]. - -should_return_error_for_sec_obj_with_incorrect_names([Url,_UsersUrl]) -> - SecurityUrl = lists:concat([Url, "/_security"]), - SecurityProperties = [ - {<<"admins">>,{[{<<"names">>,[<>]}]}}, - {<<"members">>,{[{<<"names">>,[?TEST_MEMBER]}]}} - ], - - Body = jiffy:encode({SecurityProperties}), - {ok, Status, _, RespBody} = test_request:put(SecurityUrl, - [?CONTENT_JSON, ?AUTH], Body), - ResultJson = ?JSON_DECODE(RespBody), - [ - ?_assertEqual(500, Status), - ?_assertEqual({[ - {<<"error">>,<<"error">>}, - {<<"reason">>,<<"no_majority">>} - ]}, ResultJson) - ]. diff --git a/src/chttpd/test/chttpd_socket_buffer_size_test.erl b/src/chttpd/test/chttpd_socket_buffer_size_test.erl deleted file mode 100644 index 937880621..000000000 --- a/src/chttpd/test/chttpd_socket_buffer_size_test.erl +++ /dev/null @@ -1,127 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(chttpd_socket_buffer_size_test). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(USER, "chttpd_db_socket_buffer_size_test_admin"). --define(PASS, "pass"). --define(AUTH, {basic_auth, {?USER, ?PASS}}). --define(CONTENT_JSON, {"Content-Type", "application/json"}). - - -setup(SocketOpts) -> - StartCtx = start_couch_with_cfg(SocketOpts), - Db = ?tempdb(), - create_db(url(Db)), - {StartCtx, Db}. - - -teardown(_, {StartCtx, Db}) -> - delete_db(url(Db)), - ok = config:delete("admins", ?USER, _Persist=false), - test_util:stop_couch(StartCtx). - - -socket_buffer_size_test_() -> - { - "chttpd socket_buffer_size_test", - { - foreachx, - fun setup/1, fun teardown/2, - [ - {"[{recbuf, undefined}]", fun default_buffer/2}, - {"[{recbuf, 1024}]", fun small_recbuf/2}, - {"[{buffer, 1024}]", fun small_buffer/2} - ] - } - }. - - -small_recbuf(_, {_, Db}) -> - {timeout, 30, ?_test(begin - Id = data(2048), - Response = put_req(url(Db) ++ "/" ++ Id, "{}"), - ?assert(Response =:= 400 orelse Response =:= request_failed) - end)}. - - -small_buffer(_, {_, Db}) -> - {timeout, 30, ?_test(begin - Id = data(2048), - Response = put_req(url(Db) ++ "/" ++ Id, "{}"), - ?assert(Response =:= 400 orelse Response =:= request_failed) - end)}. - - -default_buffer(_, {_, Db}) -> - {timeout, 30, ?_test(begin - Id = data(7000), - Headers = [{"Blah", data(7000)}], - Status = put_req(url(Db) ++ "/" ++ Id, Headers, "{}"), - ?assert(Status =:= 201 orelse Status =:= 202) - end)}. - - -% Helper functions - -url() -> - Addr = config:get("chttpd", "bind_address", "127.0.0.1"), - Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)), - "http://" ++ Addr ++ ":" ++ Port. - - -url(Db) -> - url() ++ "/" ++ ?b2l(Db). - - -create_db(Url) -> - Status = put_req(Url ++ "?q=1&n=1", "{}"), - ?assert(Status =:= 201 orelse Status =:= 202). - - -delete_db(Url) -> - {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). - - -put_req(Url, Body) -> - put_req(Url, [], Body). - - -put_req(Url, Headers, Body) -> - AllHeaders = Headers ++ [?CONTENT_JSON, ?AUTH], - case test_request:put(Url, AllHeaders, Body) of - {ok, Status, _, _} -> Status; - {error, Error} -> Error - end. - - -data(Size) -> - string:copies("x", Size). - - -append_to_cfg_chain(Cfg) -> - CfgDir = filename:dirname(lists:last(?CONFIG_CHAIN)), - CfgFile = filename:join([CfgDir, "chttpd_socket_buffer_extra_cfg.ini"]), - CfgSect = io_lib:format("[chttpd]~nserver_options = ~s~n", [Cfg]), - ok = file:write_file(CfgFile, CfgSect), - ?CONFIG_CHAIN ++ [CfgFile]. - - -start_couch_with_cfg(Cfg) -> - CfgChain = append_to_cfg_chain(Cfg), - StartCtx = test_util:start_couch(CfgChain, [chttpd]), - Hashed = couch_passwords:hash_admin_password(?PASS), - ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), - StartCtx. diff --git a/src/chttpd/test/chttpd_view_test.erl b/src/chttpd/test/chttpd_view_test.erl deleted file mode 100644 index 3457c6f30..000000000 --- a/src/chttpd/test/chttpd_view_test.erl +++ /dev/null @@ -1,123 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(chttpd_view_test). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(USER, "chttpd_view_test_admin"). --define(PASS, "pass"). --define(AUTH, {basic_auth, {?USER, ?PASS}}). --define(CONTENT_JSON, {"Content-Type", "application/json"}). --define(DDOC, "{\"_id\": \"_design/bar\", \"views\": {\"baz\": - {\"map\": \"function(doc) {emit(doc._id, doc._id);}\"}}}"). - --define(FIXTURE_TXT, ?ABS_PATH(?FILE)). --define(i2l(I), integer_to_list(I)). - -setup() -> - Hashed = couch_passwords:hash_admin_password(?PASS), - ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), - TmpDb = ?tempdb(), - Addr = config:get("chttpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(chttpd, port), - Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]), - create_db(Url), - Url. - -teardown(Url) -> - delete_db(Url), - ok = config:delete("admins", ?USER, _Persist=false). - -create_db(Url) -> - {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), - ?assert(Status =:= 201 orelse Status =:= 202). - - -create_doc(Url, Id) -> - test_request:put(Url ++ "/" ++ Id, - [?CONTENT_JSON, ?AUTH], "{\"mr\": \"rockoartischocko\"}"). - -delete_db(Url) -> - {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). - -all_view_test_() -> - { - "chttpd view tests", - { - setup, - fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_succeed_on_view_with_queries_keys/1, - fun should_succeed_on_view_with_queries_limit_skip/1, - fun should_succeed_on_view_with_multiple_queries/1 - ] - } - } - }. - - -should_succeed_on_view_with_queries_keys(Url) -> - ?_test(begin - [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], - {ok, _, _, _} = test_request:put(Url ++ "/_design/bar", - [?CONTENT_JSON, ?AUTH], ?DDOC), - QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", - \"testdoc8\"]}]}", - {ok, _, _, RespBody} = test_request:post(Url ++ "/_design/bar/" - ++ "_view/baz/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc), - {ResultJson} = ?JSON_DECODE(RespBody), - ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), - {InnerJson} = lists:nth(1, ResultJsonBody), - ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson))) - end). - - -should_succeed_on_view_with_queries_limit_skip(Url) -> - ?_test(begin - [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], - {ok, _, _, _} = test_request:put(Url ++ "/_design/bar", - [?CONTENT_JSON, ?AUTH], ?DDOC), - QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}", - {ok, RC, _, RespBody} = test_request:post(Url ++ "/_design/bar/" - ++ "_view/baz/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc), - ?assertEqual(200, RC), - {ResultJson} = ?JSON_DECODE(RespBody), - ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), - {InnerJson} = lists:nth(1, ResultJsonBody), - ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)), - ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson))) - end). - - -should_succeed_on_view_with_multiple_queries(Url) -> - ?_test(begin - [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], - {ok, _, _, _} = test_request:put(Url ++ "/_design/bar", - [?CONTENT_JSON, ?AUTH], ?DDOC), - QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", - \"testdoc8\"]}, {\"limit\": 5, \"skip\": 2}]}", - {ok, RC, _, RespBody} = test_request:post(Url ++ "/_design/bar/" - ++ "_view/baz/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc), - ?assertEqual(200, RC), - {ResultJson} = ?JSON_DECODE(RespBody), - ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), - {InnerJson1} = lists:nth(1, ResultJsonBody), - ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))), - {InnerJson2} = lists:nth(2, ResultJsonBody), - ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)), - ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2))) - end). diff --git a/src/chttpd/test/chttpd_welcome_test.erl b/src/chttpd/test/chttpd_welcome_test.erl deleted file mode 100644 index e427f4dff..000000000 --- a/src/chttpd/test/chttpd_welcome_test.erl +++ /dev/null @@ -1,104 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(chttpd_welcome_test). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(USER, "chttpd_db_test_admin"). --define(PASS, "pass"). --define(AUTH, {basic_auth, {?USER, ?PASS}}). --define(CONTENT_JSON, {"Content-Type", "application/json"}). - - -setup() -> - Hashed = couch_passwords:hash_admin_password(?PASS), - ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), - Addr = config:get("chttpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(chttpd, port), - Url = lists:concat(["http://", Addr, ":", Port, "/"]), - Url. - - -teardown(_Url) -> - ok = config:delete("admins", ?USER, _Persist=false). - - -welcome_test_() -> - { - "chttpd welcome endpoint tests", - { - setup, - fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_have_version/1, - fun should_have_features/1, - fun should_have_uuid/1 - ] - } - } - }. - -should_have_uuid(Url) -> - ?_test(begin - {ok, Status, _, Body} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]), - ?assertEqual(200, Status), - {Json} = ?JSON_DECODE(Body), - CouchDB = couch_util:get_value(<<"couchdb">>, Json, undefined), - Uuid = couch_util:get_value(<<"uuid">>, Json, undefined), - Features = couch_util:get_value(<<"features">>, Json, undefined), - Sha = couch_util:get_value(<<"git_sha">>, Json, undefined), - ?assertNotEqual(Sha, undefined), - ?assertEqual(<<"Welcome">>, CouchDB), - RealUuid = couch_server:get_uuid(), - - ?assertEqual(RealUuid, Uuid), - ?assert(is_list(Features)) - end). - - -should_have_version(Url) -> - ?_test(begin - {ok, Status, _, Body} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]), - ?assertEqual(200, Status), - {Json} = ?JSON_DECODE(Body), - Version = couch_util:get_value(<<"version">>, Json, undefined), - CouchDB = couch_util:get_value(<<"couchdb">>, Json, undefined), - Features = couch_util:get_value(<<"features">>, Json, undefined), - Sha = couch_util:get_value(<<"git_sha">>, Json, undefined), - ?assertNotEqual(Sha, undefined), - ?assertEqual(<<"Welcome">>, CouchDB), - RealVersion = list_to_binary(couch_server:get_version()), - ?assertEqual(RealVersion, Version), - ?assert(is_list(Features)) - end). - - -should_have_features(Url) -> - ?_test(begin - config:enable_feature(snek), - {ok, 200, _, Body1} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]), - {Json1} = ?JSON_DECODE(Body1), - Features1 = couch_util:get_value(<<"features">>, Json1, undefined), - ?assert(is_list(Features1)), - ?assert(lists:member(<<"snek">>, Features1)), - config:disable_feature(snek), - {ok, 200, _, Body2} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]), - {Json2} = ?JSON_DECODE(Body2), - Features2 = couch_util:get_value(<<"features">>, Json2, undefined), - ?assert(is_list(Features2)), - ?assertNot(lists:member(<<"snek">>, Features2)) - end). diff --git a/src/chttpd/test/chttpd_xframe_test.erl b/src/chttpd/test/chttpd_xframe_test.erl deleted file mode 100644 index 1272c198c..000000000 --- a/src/chttpd/test/chttpd_xframe_test.erl +++ /dev/null @@ -1,84 +0,0 @@ --module(chttpd_xframe_test). - - --include_lib("couch/include/couch_db.hrl"). --include_lib("eunit/include/eunit.hrl"). - -setup() -> - ok = meck:new(config), - ok = meck:expect(config, get, fun(_, _, _) -> "X-Forwarded-Host" end), - ok. - -teardown(_) -> - meck:unload(config). - -mock_request() -> - Headers = mochiweb_headers:make([{"Host", "examples.com"}]), - MochiReq = mochiweb_request:new(nil, 'GET', '/', {1, 1}, Headers), - #httpd{mochi_req = MochiReq}. - -config_disabled() -> - [ - {enabled, false} - ]. - -config_sameorigin() -> - [ - {enabled, true}, - {same_origin, true} - ]. - -config_wildcard() -> - [ - {enabled, true}, - {same_origin, false}, - {hosts, ["*"]} - ]. - -config_specific_hosts() -> - [ - {enabled, true}, - {same_origin, false}, - {hosts, ["http://couchdb.org", "http://examples.com"]} - ]. - -config_diffent_specific_hosts() -> - [ - {enabled, true}, - {same_origin, false}, - {hosts, ["http://couchdb.org"]} - ]. - -no_header_if_xframe_disabled_test() -> - Headers = chttpd_xframe_options:header(mock_request(), [], config_disabled()), - ?assertEqual(Headers, []). - -enabled_with_same_origin_test() -> - Headers = chttpd_xframe_options:header(mock_request(), [], config_sameorigin()), - ?assertEqual(Headers, [{"X-Frame-Options", "SAMEORIGIN"}]). - - -xframe_host_test_() -> - { - "xframe host tests", - { - foreach, fun setup/0, fun teardown/1, - [ - fun allow_with_wildcard_host/1, - fun allow_with_specific_host/1, - fun deny_with_different_host/1 - ] - } - }. - -allow_with_wildcard_host(_) -> - Headers = chttpd_xframe_options:header(mock_request(), [], config_wildcard()), - ?_assertEqual([{"X-Frame-Options", "ALLOW-FROM http://examples.com"}], Headers). - -allow_with_specific_host(_) -> - Headers = chttpd_xframe_options:header(mock_request(), [], config_specific_hosts()), - ?_assertEqual([{"X-Frame-Options", "ALLOW-FROM http://examples.com"}], Headers). - -deny_with_different_host(_) -> - Headers = chttpd_xframe_options:header(mock_request(), [], config_diffent_specific_hosts()), - ?_assertEqual([{"X-Frame-Options", "DENY"}], Headers). diff --git a/src/chttpd/test/eunit/chttpd_cors_test.erl b/src/chttpd/test/eunit/chttpd_cors_test.erl new file mode 100644 index 000000000..19e851561 --- /dev/null +++ b/src/chttpd/test/eunit/chttpd_cors_test.erl @@ -0,0 +1,564 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(chttpd_cors_test). + + +-include_lib("couch/include/couch_db.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("chttpd/include/chttpd_cors.hrl"). + + +-define(DEFAULT_ORIGIN, "http://example.com"). +-define(DEFAULT_ORIGIN_HTTPS, "https://example.com"). +-define(EXPOSED_HEADERS, + "content-type, accept-ranges, etag, server, x-couch-request-id, " ++ + "x-couch-update-newrev, x-couchdb-body-time"). + +-define(CUSTOM_SUPPORTED_METHODS, ?SUPPORTED_METHODS -- ["CONNECT"]). +-define(CUSTOM_SUPPORTED_HEADERS, ["extra" | ?SUPPORTED_HEADERS -- ["pragma"]]). +-define(CUSTOM_EXPOSED_HEADERS, ["expose" | ?COUCH_HEADERS]). + +-define(CUSTOM_MAX_AGE, round(?CORS_DEFAULT_MAX_AGE / 2)). + +%% Test helpers + + +empty_cors_config() -> + []. + + +minimal_cors_config() -> + [ + {<<"enable_cors">>, true}, + {<<"origins">>, {[]}} + ]. + + +simple_cors_config() -> + [ + {<<"enable_cors">>, true}, + {<<"origins">>, {[ + {list_to_binary(?DEFAULT_ORIGIN), {[]}} + ]}} + ]. + + +wildcard_cors_config() -> + [ + {<<"enable_cors">>, true}, + {<<"origins">>, {[ + {<<"*">>, {[]}} + ]}} + ]. + +custom_cors_config() -> + [ + {<<"enable_cors">>, true}, + {<<"allow_methods">>, ?CUSTOM_SUPPORTED_METHODS}, + {<<"allow_headers">>, ?CUSTOM_SUPPORTED_HEADERS}, + {<<"exposed_headers">>, ?CUSTOM_EXPOSED_HEADERS}, + {<<"max_age">>, ?CUSTOM_MAX_AGE}, + {<<"origins">>, {[ + {<<"*">>, {[]}} + ]}} + ]. + +access_control_cors_config(AllowCredentials) -> + [ + {<<"enable_cors">>, true}, + {<<"allow_credentials">>, AllowCredentials}, + {<<"origins">>, {[ + {list_to_binary(?DEFAULT_ORIGIN), {[]}} + ]}}]. + + +multiple_cors_config() -> + [ + {<<"enable_cors">>, true}, + {<<"origins">>, {[ + {list_to_binary(?DEFAULT_ORIGIN), {[]}}, + {<<"https://example.com">>, {[]}}, + {<<"http://example.com:5984">>, {[]}}, + {<<"https://example.com:5984">>, {[]}} + ]}} + ]. + + +mock_request(Method, Path, Headers0) -> + HeaderKey = "Access-Control-Request-Method", + Headers = case proplists:get_value(HeaderKey, Headers0, undefined) of + nil -> + proplists:delete(HeaderKey, Headers0); + undefined -> + case Method of + 'OPTIONS' -> + [{HeaderKey, atom_to_list(Method)} | Headers0]; + _ -> + Headers0 + end; + _ -> + Headers0 + end, + Headers1 = mochiweb_headers:make(Headers), + MochiReq = mochiweb_request:new(nil, Method, Path, {1, 1}, Headers1), + PathParts = [list_to_binary(chttpd:unquote(Part)) + || Part <- string:tokens(Path, "/")], + #httpd{method=Method, mochi_req=MochiReq, path_parts=PathParts}. + + +header(#httpd{}=Req, Key) -> + chttpd:header_value(Req, Key); +header({mochiweb_response, [_, _, Headers]}, Key) -> + %% header(Headers, Key); + mochiweb_headers:get_value(Key, Headers); +header(Headers, Key) -> + couch_util:get_value(Key, Headers, undefined). + + +string_headers(H) -> + string:join(H, ", "). + + +assert_not_preflight_(Val) -> + ?_assertEqual(not_preflight, Val). + + +%% CORS disabled tests + + +cors_disabled_test_() -> + {"CORS disabled tests", + [ + {"Empty user", + {foreach, + fun empty_cors_config/0, + [ + fun test_no_access_control_method_preflight_request_/1, + fun test_no_headers_/1, + fun test_no_headers_server_/1, + fun test_no_headers_db_/1 + ]}}]}. + + +%% CORS enabled tests + + +cors_enabled_minimal_config_test_() -> + {"Minimal CORS enabled, no Origins", + {foreach, + fun minimal_cors_config/0, + [ + fun test_no_access_control_method_preflight_request_/1, + fun test_incorrect_origin_simple_request_/1, + fun test_incorrect_origin_preflight_request_/1 + ]}}. + + +cors_enabled_simple_config_test_() -> + {"Simple CORS config", + {foreach, + fun simple_cors_config/0, + [ + fun test_no_access_control_method_preflight_request_/1, + fun test_preflight_request_/1, + fun test_bad_headers_preflight_request_/1, + fun test_good_headers_preflight_request_/1, + fun test_db_request_/1, + fun test_db_preflight_request_/1, + fun test_db_host_origin_request_/1, + fun test_preflight_with_port_no_origin_/1, + fun test_preflight_with_scheme_no_origin_/1, + fun test_preflight_with_scheme_port_no_origin_/1, + fun test_case_sensitive_mismatch_of_allowed_origins_/1 + ]}}. + +cors_enabled_custom_config_test_() -> + {"Simple CORS config with custom allow_methods/allow_headers/exposed_headers", + {foreach, + fun custom_cors_config/0, + [ + fun test_good_headers_preflight_request_with_custom_config_/1, + fun test_db_request_with_custom_config_/1 + ]}}. + + +cors_enabled_multiple_config_test_() -> + {"Multiple options CORS config", + {foreach, + fun multiple_cors_config/0, + [ + fun test_no_access_control_method_preflight_request_/1, + fun test_preflight_request_/1, + fun test_db_request_/1, + fun test_db_preflight_request_/1, + fun test_db_host_origin_request_/1, + fun test_preflight_with_port_with_origin_/1, + fun test_preflight_with_scheme_with_origin_/1, + fun test_preflight_with_scheme_port_with_origin_/1 + ]}}. + + +%% Access-Control-Allow-Credentials tests + + +%% http://www.w3.org/TR/cors/#supports-credentials +%% 6.1.3 +%% If the resource supports credentials add a single +%% Access-Control-Allow-Origin header, with the value +%% of the Origin header as value, and add a single +%% Access-Control-Allow-Credentials header with the +%% case-sensitive string "true" as value. +%% Otherwise, add a single Access-Control-Allow-Origin +%% header, with either the value of the Origin header +%% or the string "*" as value. +%% Note: The string "*" cannot be used for a resource +%% that supports credentials. + +db_request_credentials_header_off_test_() -> + {"Allow credentials disabled", + {setup, + fun() -> + access_control_cors_config(false) + end, + fun test_db_request_credentials_header_off_/1 + } + }. + + +db_request_credentials_header_on_test_() -> + {"Allow credentials enabled", + {setup, + fun() -> + access_control_cors_config(true) + end, + fun test_db_request_credentials_header_on_/1 + } + }. + + +%% CORS wildcard tests + + +cors_enabled_wildcard_test_() -> + {"Wildcard CORS config", + {foreach, + fun wildcard_cors_config/0, + [ + fun test_no_access_control_method_preflight_request_/1, + fun test_preflight_request_/1, + fun test_preflight_request_no_allow_credentials_/1, + fun test_preflight_request_empty_request_headers_/1, + fun test_db_request_/1, + fun test_db_preflight_request_/1, + fun test_db_host_origin_request_/1, + fun test_preflight_with_port_with_origin_/1, + fun test_preflight_with_scheme_with_origin_/1, + fun test_preflight_with_scheme_port_with_origin_/1, + fun test_case_sensitive_mismatch_of_allowed_origins_/1 + ]}}. + + +%% Test generators + + +test_no_headers_(OwnerConfig) -> + Req = mock_request('GET', "/", []), + assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)). + + +test_no_headers_server_(OwnerConfig) -> + Req = mock_request('GET', "/", [{"Origin", "http://127.0.0.1"}]), + assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)). + + +test_no_headers_db_(OwnerConfig) -> + Headers = [{"Origin", "http://127.0.0.1"}], + Req = mock_request('GET', "/my_db", Headers), + assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)). + + +test_incorrect_origin_simple_request_(OwnerConfig) -> + Req = mock_request('GET', "/", [{"Origin", "http://127.0.0.1"}]), + [ + ?_assert(chttpd_cors:is_cors_enabled(OwnerConfig)), + assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)) + ]. + + +test_incorrect_origin_preflight_request_(OwnerConfig) -> + Headers = [ + {"Origin", "http://127.0.0.1"}, + {"Access-Control-Request-Method", "GET"} + ], + Req = mock_request('GET', "/", Headers), + [ + ?_assert(chttpd_cors:is_cors_enabled(OwnerConfig)), + assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)) + ]. + + +test_bad_headers_preflight_request_(OwnerConfig) -> + Headers = [ + {"Origin", ?DEFAULT_ORIGIN}, + {"Access-Control-Request-Method", "GET"}, + {"Access-Control-Request-Headers", "X-Not-An-Allowed-Headers"} + ], + Req = mock_request('OPTIONS', "/", Headers), + [ + ?_assert(chttpd_cors:is_cors_enabled(OwnerConfig)), + assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)) + ]. + + +test_good_headers_preflight_request_(OwnerConfig) -> + Headers = [ + {"Origin", ?DEFAULT_ORIGIN}, + {"Access-Control-Request-Method", "GET"}, + {"Access-Control-Request-Headers", "accept-language"} + ], + Req = mock_request('OPTIONS', "/", Headers), + ?assert(chttpd_cors:is_cors_enabled(OwnerConfig)), + {ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig), + [ + ?_assertEqual(?DEFAULT_ORIGIN, + header(Headers1, "Access-Control-Allow-Origin")), + ?_assertEqual(string_headers(?SUPPORTED_METHODS), + header(Headers1, "Access-Control-Allow-Methods")), + ?_assertEqual(string_headers(["accept-language"]), + header(Headers1, "Access-Control-Allow-Headers")) + ]. + +test_good_headers_preflight_request_with_custom_config_(OwnerConfig) -> + Headers = [ + {"Origin", ?DEFAULT_ORIGIN}, + {"Access-Control-Request-Method", "GET"}, + {"Access-Control-Request-Headers", "accept-language, extra"}, + {"Access-Control-Max-Age", ?CORS_DEFAULT_MAX_AGE} + ], + Req = mock_request('OPTIONS', "/", Headers), + ?assert(chttpd_cors:is_cors_enabled(OwnerConfig)), + AllowMethods = couch_util:get_value( + <<"allow_methods">>, OwnerConfig, ?SUPPORTED_METHODS), + MaxAge = couch_util:get_value( + <<"max_age">>, OwnerConfig, ?CORS_DEFAULT_MAX_AGE), + {ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig), + [ + ?_assertEqual(?DEFAULT_ORIGIN, + header(Headers1, "Access-Control-Allow-Origin")), + ?_assertEqual(string_headers(AllowMethods), + header(Headers1, "Access-Control-Allow-Methods")), + ?_assertEqual(string_headers(["accept-language", "extra"]), + header(Headers1, "Access-Control-Allow-Headers")), + ?_assertEqual(MaxAge, + header(Headers1, "Access-Control-Max-Age")) + ]. + + +test_preflight_request_(OwnerConfig) -> + Headers = [ + {"Origin", ?DEFAULT_ORIGIN}, + {"Access-Control-Request-Method", "GET"} + ], + Req = mock_request('OPTIONS', "/", Headers), + {ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig), + [ + ?_assertEqual(?DEFAULT_ORIGIN, + header(Headers1, "Access-Control-Allow-Origin")), + ?_assertEqual(string_headers(?SUPPORTED_METHODS), + header(Headers1, "Access-Control-Allow-Methods")) + ]. + + +test_no_access_control_method_preflight_request_(OwnerConfig) -> + Headers = [ + {"Origin", ?DEFAULT_ORIGIN}, + {"Access-Control-Request-Method", notnil} + ], + Req = mock_request('OPTIONS', "/", Headers), + assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)). + + +test_preflight_request_no_allow_credentials_(OwnerConfig) -> + Headers = [ + {"Origin", ?DEFAULT_ORIGIN}, + {"Access-Control-Request-Method", "GET"} + ], + Req = mock_request('OPTIONS', "/", Headers), + {ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig), + [ + ?_assertEqual(?DEFAULT_ORIGIN, + header(Headers1, "Access-Control-Allow-Origin")), + ?_assertEqual(string_headers(?SUPPORTED_METHODS), + header(Headers1, "Access-Control-Allow-Methods")), + ?_assertEqual(undefined, + header(Headers1, "Access-Control-Allow-Credentials")) + ]. + + +test_preflight_request_empty_request_headers_(OwnerConfig) -> + Headers = [ + {"Origin", ?DEFAULT_ORIGIN}, + {"Access-Control-Request-Method", "POST"}, + {"Access-Control-Request-Headers", ""} + ], + Req = mock_request('OPTIONS', "/", Headers), + {ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig), + [ + ?_assertEqual(?DEFAULT_ORIGIN, + header(Headers1, "Access-Control-Allow-Origin")), + ?_assertEqual(string_headers(?SUPPORTED_METHODS), + header(Headers1, "Access-Control-Allow-Methods")), + ?_assertEqual("", + header(Headers1, "Access-Control-Allow-Headers")) + ]. + + +test_db_request_(OwnerConfig) -> + Origin = ?DEFAULT_ORIGIN, + Headers = [{"Origin", Origin}], + Req = mock_request('GET', "/my_db", Headers), + Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig), + [ + ?_assertEqual(?DEFAULT_ORIGIN, + header(Headers1, "Access-Control-Allow-Origin")), + ?_assertEqual(?EXPOSED_HEADERS, + header(Headers1, "Access-Control-Expose-Headers")) + ]. + +test_db_request_with_custom_config_(OwnerConfig) -> + Origin = ?DEFAULT_ORIGIN, + Headers = [{"Origin", Origin}, {"extra", "EXTRA"}], + Req = mock_request('GET', "/my_db", Headers), + Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig), + ExposedHeaders = couch_util:get_value( + <<"exposed_headers">>, OwnerConfig, ?COUCH_HEADERS), + [ + ?_assertEqual(?DEFAULT_ORIGIN, + header(Headers1, "Access-Control-Allow-Origin")), + ?_assertEqual(lists:sort(["content-type" | ExposedHeaders]), + lists:sort( + split_list(header(Headers1, "Access-Control-Expose-Headers")))) + ]. + + +test_db_preflight_request_(OwnerConfig) -> + Headers = [ + {"Origin", ?DEFAULT_ORIGIN} + ], + Req = mock_request('OPTIONS', "/my_db", Headers), + {ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig), + [ + ?_assertEqual(?DEFAULT_ORIGIN, + header(Headers1, "Access-Control-Allow-Origin")), + ?_assertEqual(string_headers(?SUPPORTED_METHODS), + header(Headers1, "Access-Control-Allow-Methods")) + ]. + + +test_db_host_origin_request_(OwnerConfig) -> + Origin = ?DEFAULT_ORIGIN, + Headers = [ + {"Origin", Origin}, + {"Host", "example.com"} + ], + Req = mock_request('GET', "/my_db", Headers), + Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig), + [ + ?_assertEqual(?DEFAULT_ORIGIN, + header(Headers1, "Access-Control-Allow-Origin")), + ?_assertEqual(?EXPOSED_HEADERS, + header(Headers1, "Access-Control-Expose-Headers")) + ]. + + +test_preflight_origin_helper_(OwnerConfig, Origin, ExpectedOrigin) -> + Headers = [ + {"Origin", Origin}, + {"Access-Control-Request-Method", "GET"} + ], + Req = mock_request('OPTIONS', "/", Headers), + Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig), + [?_assertEqual(ExpectedOrigin, + header(Headers1, "Access-Control-Allow-Origin")) + ]. + + +test_preflight_with_port_no_origin_(OwnerConfig) -> + Origin = ?DEFAULT_ORIGIN ++ ":5984", + test_preflight_origin_helper_(OwnerConfig, Origin, undefined). + + +test_preflight_with_port_with_origin_(OwnerConfig) -> + Origin = ?DEFAULT_ORIGIN ++ ":5984", + test_preflight_origin_helper_(OwnerConfig, Origin, Origin). + + +test_preflight_with_scheme_no_origin_(OwnerConfig) -> + test_preflight_origin_helper_(OwnerConfig, ?DEFAULT_ORIGIN_HTTPS, undefined). + + +test_preflight_with_scheme_with_origin_(OwnerConfig) -> + Origin = ?DEFAULT_ORIGIN_HTTPS, + test_preflight_origin_helper_(OwnerConfig, Origin, Origin). + + +test_preflight_with_scheme_port_no_origin_(OwnerConfig) -> + Origin = ?DEFAULT_ORIGIN_HTTPS ++ ":5984", + test_preflight_origin_helper_(OwnerConfig, Origin, undefined). + + +test_preflight_with_scheme_port_with_origin_(OwnerConfig) -> + Origin = ?DEFAULT_ORIGIN_HTTPS ++ ":5984", + test_preflight_origin_helper_(OwnerConfig, Origin, Origin). + + +test_case_sensitive_mismatch_of_allowed_origins_(OwnerConfig) -> + Origin = "http://EXAMPLE.COM", + Headers = [{"Origin", Origin}], + Req = mock_request('GET', "/", Headers), + Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig), + [ + ?_assertEqual(?DEFAULT_ORIGIN, + header(Headers1, "Access-Control-Allow-Origin")), + ?_assertEqual(?EXPOSED_HEADERS, + header(Headers1, "Access-Control-Expose-Headers")) + ]. + + +test_db_request_credentials_header_off_(OwnerConfig) -> + Origin = ?DEFAULT_ORIGIN, + Headers = [{"Origin", Origin}], + Req = mock_request('GET', "/", Headers), + Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig), + [ + ?_assertEqual(?DEFAULT_ORIGIN, + header(Headers1, "Access-Control-Allow-Origin")), + ?_assertEqual(undefined, + header(Headers1, "Access-Control-Allow-Credentials")) + ]. + + +test_db_request_credentials_header_on_(OwnerConfig) -> + Origin = ?DEFAULT_ORIGIN, + Headers = [{"Origin", Origin}], + Req = mock_request('GET', "/", Headers), + Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig), + [ + ?_assertEqual(?DEFAULT_ORIGIN, + header(Headers1, "Access-Control-Allow-Origin")), + ?_assertEqual("true", + header(Headers1, "Access-Control-Allow-Credentials")) + ]. + +split_list(S) -> + re:split(S, "\\s*,\\s*", [trim, {return, list}]). diff --git a/src/chttpd/test/eunit/chttpd_csp_tests.erl b/src/chttpd/test/eunit/chttpd_csp_tests.erl new file mode 100644 index 000000000..e86436254 --- /dev/null +++ b/src/chttpd/test/eunit/chttpd_csp_tests.erl @@ -0,0 +1,81 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(chttpd_csp_tests). + +-include_lib("couch/include/couch_eunit.hrl"). + + +setup() -> + ok = config:set("csp", "enable", "true", false), + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(chttpd, port), + lists:concat(["http://", Addr, ":", Port, "/_utils/"]). + +teardown(_) -> + ok. + + + +csp_test_() -> + { + "Content Security Policy tests", + { + setup, + fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_not_return_any_csp_headers_when_disabled/1, + fun should_apply_default_policy/1, + fun should_return_custom_policy/1, + fun should_only_enable_csp_when_true/1 + ] + } + } + }. + + +should_not_return_any_csp_headers_when_disabled(Url) -> + ?_assertEqual(undefined, + begin + ok = config:set("csp", "enable", "false", false), + {ok, _, Headers, _} = test_request:get(Url), + proplists:get_value("Content-Security-Policy", Headers) + end). + +should_apply_default_policy(Url) -> + ?_assertEqual( + "default-src 'self'; img-src 'self' data:; font-src 'self'; " + "script-src 'self' 'unsafe-eval'; style-src 'self' 'unsafe-inline';", + begin + {ok, _, Headers, _} = test_request:get(Url), + proplists:get_value("Content-Security-Policy", Headers) + end). + +should_return_custom_policy(Url) -> + ?_assertEqual("default-src 'http://example.com';", + begin + ok = config:set("csp", "header_value", + "default-src 'http://example.com';", false), + {ok, _, Headers, _} = test_request:get(Url), + proplists:get_value("Content-Security-Policy", Headers) + end). + +should_only_enable_csp_when_true(Url) -> + ?_assertEqual(undefined, + begin + ok = config:set("csp", "enable", "tru", false), + {ok, _, Headers, _} = test_request:get(Url), + proplists:get_value("Content-Security-Policy", Headers) + end). diff --git a/src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl b/src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl new file mode 100644 index 000000000..0ab08dd80 --- /dev/null +++ b/src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl @@ -0,0 +1,206 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(chttpd_db_attachment_size_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(USER, "chttpd_db_att_test_admin"). +-define(PASS, "pass"). +-define(AUTH, {basic_auth, {?USER, ?PASS}}). +-define(CONTENT_JSON, {"Content-Type", "application/json"}). +-define(CONTENT_MULTI_RELATED, {"Content-Type", + "multipart/related;boundary=\"bound\""}). + + +setup() -> + Hashed = couch_passwords:hash_admin_password(?PASS), + ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), + ok = config:set("couchdb", "max_attachment_size", "50", _Persist=false), + TmpDb = ?tempdb(), + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)), + Url = "http://" ++ Addr ++ ":" ++ Port ++ "/" ++ ?b2l(TmpDb), + create_db(Url), + add_doc(Url, "doc1"), + Url. + + +teardown(Url) -> + delete_db(Url), + ok = config:delete("admins", ?USER, _Persist=false), + ok = config:delete("couchdb", "max_attachment_size"). + + +attachment_size_test_() -> + { + "chttpd max_attachment_size tests", + { + setup, + fun chttpd_test_util:start_couch/0, + fun chttpd_test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun put_inline/1, + fun put_simple/1, + fun put_simple_chunked/1, + fun put_mp_related/1 + ] + } + } + }. + + +put_inline(Url) -> + ?_test(begin + Status = put_inline(Url, "doc2", 50), + ?assert(Status =:= 201 orelse Status =:= 202), + ?assertEqual(413, put_inline(Url, "doc3", 51)) + end). + + +put_simple(Url) -> + ?_test(begin + Headers = [{"Content-Type", "app/binary"}], + Rev1 = doc_rev(Url, "doc1"), + Data1 = data(50), + Status1 = put_req(Url ++ "/doc1/att2?rev=" ++ Rev1, Headers, Data1), + ?assert(Status1 =:= 201 orelse Status1 =:= 202), + Data2 = data(51), + Rev2 = doc_rev(Url, "doc1"), + Status2 = put_req(Url ++ "/doc1/att3?rev=" ++ Rev2, Headers, Data2), + ?assertEqual(413, Status2) + end). + + +put_simple_chunked(Url) -> + ?_test(begin + Headers = [{"Content-Type", "app/binary"}], + Rev1 = doc_rev(Url, "doc1"), + DataFun1 = data_stream_fun(50), + Status1 = put_req_chunked(Url ++ "/doc1/att2?rev=" ++ Rev1, Headers, DataFun1), + ?assert(Status1 =:= 201 orelse Status1 =:= 202), + DataFun2 = data_stream_fun(51), + Rev2 = doc_rev(Url, "doc1"), + Status2 = put_req_chunked(Url ++ "/doc1/att3?rev=" ++ Rev2, Headers, DataFun2), + ?assertEqual(413, Status2) + end). + + +put_mp_related(Url) -> + ?_test(begin + Headers = [?CONTENT_MULTI_RELATED], + Body1 = mp_body(50), + Status1 = put_req(Url ++ "/doc2", Headers, Body1), + ?assert(Status1 =:= 201 orelse Status1 =:= 202), + Body2 = mp_body(51), + Status2 = put_req(Url ++ "/doc3", Headers, Body2), + ?assertEqual(413, Status2) + end). + + +% Helper functions + +create_db(Url) -> + Status = put_req(Url, "{}"), + ?assert(Status =:= 201 orelse Status =:= 202). + + +add_doc(Url, DocId) -> + Status = put_req(Url ++ "/" ++ DocId, "{}"), + ?assert(Status =:= 201 orelse Status =:= 202). + + +delete_db(Url) -> + {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). + + +put_inline(Url, DocId, Size) -> + Doc = "{\"_attachments\": {\"att1\":{" + "\"content_type\": \"app/binary\", " + "\"data\": \"" ++ data_b64(Size) ++ "\"" + "}}}", + put_req(Url ++ "/" ++ DocId, Doc). + + +mp_body(AttSize) -> + AttData = data(AttSize), + SizeStr = integer_to_list(AttSize), + string:join([ + "--bound", + + "Content-Type: application/json", + + "", + + "{\"_id\":\"doc2\", \"_attachments\":{\"att\":" + "{\"content_type\":\"app/binary\", \"length\":" ++ SizeStr ++ "," + "\"follows\":true}}}", + + "--bound", + + "Content-Disposition: attachment; filename=\"att\"", + + "Content-Type: app/binary", + + "", + + AttData, + + "--bound--" + ], "\r\n"). + + +doc_rev(Url, DocId) -> + {200, ResultProps} = get_req(Url ++ "/" ++ DocId), + {<<"_rev">>, BinRev} = lists:keyfind(<<"_rev">>, 1, ResultProps), + binary_to_list(BinRev). + + +put_req(Url, Body) -> + put_req(Url, [], Body). + + +put_req(Url, Headers, Body) -> + {ok, Status, _, _} = test_request:put(Url, Headers ++ [?AUTH], Body), + Status. + + +put_req_chunked(Url, Headers, Body) -> + Opts = [{transfer_encoding, {chunked, 1}}], + {ok, Status, _, _} = test_request:put(Url, Headers ++ [?AUTH], Body, Opts), + Status. + + +get_req(Url) -> + {ok, Status, _, ResultBody} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]), + {[_ | _] = ResultProps} = ?JSON_DECODE(ResultBody), + {Status, ResultProps}. + +% Data streaming generator for ibrowse client. ibrowse will repeatedly call the +% function with State and it should return {ok, Data, NewState} or eof at end. +data_stream_fun(Size) -> + Fun = fun(0) -> eof; (BytesLeft) -> + {ok, <<"x">>, BytesLeft - 1} + end, + {Fun, Size}. + + +data(Size) -> + string:copies("x", Size). + + +data_b64(Size) -> + base64:encode_to_string(data(Size)). diff --git a/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl b/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl new file mode 100644 index 000000000..8a95c92ac --- /dev/null +++ b/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl @@ -0,0 +1,313 @@ +%% Licensed under the Apache License, Version 2.0 (the "License"); you may not +%% use this file except in compliance with the License. You may obtain a copy of +%% the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +%% License for the specific language governing permissions and limitations under +%% the License. + +-module(chttpd_db_bulk_get_multipart_test). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 3000). + + +setup() -> + mock(config), + mock(chttpd), + mock(couch_epi), + mock(couch_httpd), + mock(couch_stats), + mock(fabric), + mock(mochireq), + Pid = spawn_accumulator(), + Pid. + + +teardown(Pid) -> + ok = stop_accumulator(Pid), + meck:unload(). + + +bulk_get_test_() -> + { + "/db/_bulk_get tests", + { + foreach, fun setup/0, fun teardown/1, + [ + fun should_require_docs_field/1, + fun should_not_accept_specific_query_params/1, + fun should_return_empty_results_on_no_docs/1, + fun should_get_doc_with_all_revs/1, + fun should_validate_doc_with_bad_id/1, + fun should_validate_doc_with_bad_rev/1, + fun should_validate_missing_doc/1, + fun should_validate_bad_atts_since/1, + fun should_include_attachments_when_atts_since_specified/1 + ] + } + }. + + +should_require_docs_field(_) -> + Req = fake_request({[{}]}), + Db = test_util:fake_db([{name, <<"foo">>}]), + ?_assertThrow({bad_request, _}, chttpd_db:db_req(Req, Db)). + + +should_not_accept_specific_query_params(_) -> + Req = fake_request({[{<<"docs">>, []}]}), + Db = test_util:fake_db([{name, <<"foo">>}]), + lists:map(fun (Param) -> + {Param, ?_assertThrow({bad_request, _}, + begin + ok = meck:expect(chttpd, qs, + fun(_) -> [{Param, ""}] end), + chttpd_db:db_req(Req, Db) + end)} + end, ["rev", "open_revs", "atts_since", "w", "new_edits"]). + + +should_return_empty_results_on_no_docs(Pid) -> + Req = fake_request({[{<<"docs">>, []}]}), + Db = test_util:fake_db([{name, <<"foo">>}]), + chttpd_db:db_req(Req, Db), + Results = get_results_from_response(Pid), + ?_assertEqual([], Results). + + +should_get_doc_with_all_revs(Pid) -> + DocId = <<"docudoc">>, + Req = fake_request(DocId), + Db = test_util:fake_db([{name, <<"foo">>}]), + + DocRevA = #doc{id = DocId, body = {[{<<"_rev">>, <<"1-ABC">>}]}}, + DocRevB = #doc{id = DocId, body = {[{<<"_rev">>, <<"1-CDE">>}]}}, + + mock_open_revs(all, {ok, [{ok, DocRevA}, {ok, DocRevB}]}), + chttpd_db:db_req(Req, Db), + + Result = get_results_from_response(Pid), + ?_assertEqual(DocId, couch_util:get_value(<<"_id">>, Result)). + + +should_validate_doc_with_bad_id(Pid) -> + DocId = <<"_docudoc">>, + + Req = fake_request(DocId), + Db = test_util:fake_db([{name, <<"foo">>}]), + chttpd_db:db_req(Req, Db), + + Result = get_results_from_response(Pid), + ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)), + + ?_assertMatch([{<<"id">>, DocId}, + {<<"rev">>, null}, + {<<"error">>, <<"illegal_docid">>}, + {<<"reason">>, _}], Result). + + +should_validate_doc_with_bad_rev(Pid) -> + DocId = <<"docudoc">>, + Rev = <<"revorev">>, + + Req = fake_request(DocId, Rev), + Db = test_util:fake_db([{name, <<"foo">>}]), + chttpd_db:db_req(Req, Db), + + Result = get_results_from_response(Pid), + ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)), + + ?_assertMatch([{<<"id">>, DocId}, + {<<"rev">>, Rev}, + {<<"error">>, <<"bad_request">>}, + {<<"reason">>, _}], Result). + + +should_validate_missing_doc(Pid) -> + DocId = <<"docudoc">>, + Rev = <<"1-revorev">>, + + Req = fake_request(DocId, Rev), + Db = test_util:fake_db([{name, <<"foo">>}]), + mock_open_revs([{1,<<"revorev">>}], {ok, []}), + chttpd_db:db_req(Req, Db), + + Result = get_results_from_response(Pid), + ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)), + + ?_assertMatch([{<<"id">>, DocId}, + {<<"rev">>, Rev}, + {<<"error">>, <<"not_found">>}, + {<<"reason">>, _}], Result). + + +should_validate_bad_atts_since(Pid) -> + DocId = <<"docudoc">>, + Rev = <<"1-revorev">>, + + Req = fake_request(DocId, Rev, <<"badattsince">>), + Db = test_util:fake_db([{name, <<"foo">>}]), + mock_open_revs([{1,<<"revorev">>}], {ok, []}), + chttpd_db:db_req(Req, Db), + + Result = get_results_from_response(Pid), + ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)), + + ?_assertMatch([{<<"id">>, DocId}, + {<<"rev">>, <<"badattsince">>}, + {<<"error">>, <<"bad_request">>}, + {<<"reason">>, _}], Result). + + +should_include_attachments_when_atts_since_specified(_) -> + DocId = <<"docudoc">>, + Rev = <<"1-revorev">>, + + Req = fake_request(DocId, Rev, [<<"1-abc">>]), + Db = test_util:fake_db([{name, <<"foo">>}]), + mock_open_revs([{1,<<"revorev">>}], {ok, []}), + chttpd_db:db_req(Req, Db), + + ?_assert(meck:called(fabric, open_revs, + ['_', DocId, [{1, <<"revorev">>}], + [{atts_since, [{1, <<"abc">>}]}, attachments, + {user_ctx, undefined}]])). + +%% helpers + +fake_request(Payload) when is_tuple(Payload) -> + #httpd{method='POST', path_parts=[<<"db">>, <<"_bulk_get">>], + mochi_req=mochireq, req_body=Payload}; +fake_request(DocId) when is_binary(DocId) -> + fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}]}]}]}). + +fake_request(DocId, Rev) -> + fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}, {<<"rev">>, Rev}]}]}]}). + +fake_request(DocId, Rev, AttsSince) -> + fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}, + {<<"rev">>, Rev}, + {<<"atts_since">>, AttsSince}]}]}]}). + + +mock_open_revs(RevsReq0, RevsResp) -> + ok = meck:expect(fabric, open_revs, + fun(_, _, RevsReq1, _) -> + ?assertEqual(RevsReq0, RevsReq1), + RevsResp + end). + + +mock(mochireq) -> + ok = meck:new(mochireq, [non_strict]), + ok = meck:expect(mochireq, parse_qs, fun() -> [] end), + ok = meck:expect(mochireq, accepts_content_type, fun("multipart/mixed") -> true; + ("multipart/related") -> true; + (_) -> false end), + ok; +mock(couch_httpd) -> + ok = meck:new(couch_httpd, [passthrough]), + ok = meck:expect(couch_httpd, validate_ctype, fun(_, _) -> ok end), + ok = meck:expect(couch_httpd, last_chunk, fun(_) -> {ok, nil} end), + ok = meck:expect(couch_httpd, send_chunk, fun send_chunk/2), + ok; +mock(chttpd) -> + ok = meck:new(chttpd, [passthrough]), + ok = meck:expect(chttpd, start_json_response, fun(_, _) -> {ok, nil} end), + ok = meck:expect(chttpd, start_chunked_response, fun(_, _, _) -> {ok, nil} end), + ok = meck:expect(chttpd, end_json_response, fun(_) -> ok end), + ok = meck:expect(chttpd, send_chunk, fun send_chunk/2), + ok = meck:expect(chttpd, json_body_obj, fun (#httpd{req_body=Body}) -> Body end), + ok; +mock(couch_epi) -> + ok = meck:new(couch_epi, [passthrough]), + ok = meck:expect(couch_epi, any, fun(_, _, _, _, _) -> false end), + ok; +mock(couch_stats) -> + ok = meck:new(couch_stats, [passthrough]), + ok = meck:expect(couch_stats, increment_counter, fun(_) -> ok end), + ok = meck:expect(couch_stats, increment_counter, fun(_, _) -> ok end), + ok = meck:expect(couch_stats, decrement_counter, fun(_) -> ok end), + ok = meck:expect(couch_stats, decrement_counter, fun(_, _) -> ok end), + ok = meck:expect(couch_stats, update_histogram, fun(_, _) -> ok end), + ok = meck:expect(couch_stats, update_gauge, fun(_, _) -> ok end), + ok; +mock(fabric) -> + ok = meck:new(fabric, [passthrough]), + ok; +mock(config) -> + ok = meck:new(config, [passthrough]), + ok = meck:expect(config, get, fun(_, _, Default) -> Default end), + ok. + + +spawn_accumulator() -> + Parent = self(), + Pid = spawn(fun() -> accumulator_loop(Parent, []) end), + erlang:put(chunks_gather, Pid), + Pid. + +accumulator_loop(Parent, Acc) -> + receive + {stop, Ref} -> + Parent ! {ok, Ref}; + {get, Ref} -> + Parent ! {ok, Ref, Acc}, + accumulator_loop(Parent, Acc); + {put, Ref, Chunk} -> + Parent ! {ok, Ref}, + accumulator_loop(Parent, [Chunk|Acc]) + end. + +stop_accumulator(Pid) -> + Ref = make_ref(), + Pid ! {stop, Ref}, + receive + {ok, Ref} -> + ok + after ?TIMEOUT -> + throw({timeout, <<"process stop timeout">>}) + end. + + +send_chunk(_, []) -> + {ok, nil}; +send_chunk(_Req, [H|T]=Chunk) when is_list(Chunk) -> + send_chunk(_Req, H), + send_chunk(_Req, T); +send_chunk(_, Chunk) -> + Worker = erlang:get(chunks_gather), + Ref = make_ref(), + Worker ! {put, Ref, Chunk}, + receive + {ok, Ref} -> {ok, nil} + after ?TIMEOUT -> + throw({timeout, <<"send chunk timeout">>}) + end. + + +get_response(Pid) -> + Ref = make_ref(), + Pid ! {get, Ref}, + receive + {ok, Ref, Acc} -> + Acc + after ?TIMEOUT -> + throw({timeout, <<"get response timeout">>}) + end. + +get_results_from_response(Pid) -> + case get_response(Pid) of + [] -> + []; + Result -> + {Result1} = ?JSON_DECODE(lists:nth(2, Result)), + Result1 + end. diff --git a/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl b/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl new file mode 100644 index 000000000..864e7079a --- /dev/null +++ b/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl @@ -0,0 +1,341 @@ +%% Licensed under the Apache License, Version 2.0 (the "License"); you may not +%% use this file except in compliance with the License. You may obtain a copy of +%% the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +%% License for the specific language governing permissions and limitations under +%% the License. + +-module(chttpd_db_bulk_get_test). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 3000). + + +setup() -> + mock(config), + mock(chttpd), + mock(couch_epi), + mock(couch_httpd), + mock(couch_stats), + mock(fabric), + mock(mochireq), + Pid = spawn_accumulator(), + Pid. + + +teardown(Pid) -> + ok = stop_accumulator(Pid), + meck:unload(config), + meck:unload(chttpd), + meck:unload(couch_epi), + meck:unload(couch_httpd), + meck:unload(couch_stats), + meck:unload(fabric), + meck:unload(mochireq). + + +bulk_get_test_() -> + { + "/db/_bulk_get tests", + { + foreach, fun setup/0, fun teardown/1, + [ + fun should_require_docs_field/1, + fun should_not_accept_specific_query_params/1, + fun should_return_empty_results_on_no_docs/1, + fun should_get_doc_with_all_revs/1, + fun should_validate_doc_with_bad_id/1, + fun should_validate_doc_with_bad_rev/1, + fun should_validate_missing_doc/1, + fun should_validate_bad_atts_since/1, + fun should_include_attachments_when_atts_since_specified/1 + ] + } + }. + + +should_require_docs_field(_) -> + Req = fake_request({[{}]}), + ?_assertThrow({bad_request, _}, chttpd_db:db_req(Req, nil)). + + +should_not_accept_specific_query_params(_) -> + Req = fake_request({[{<<"docs">>, []}]}), + lists:map(fun (Param) -> + {Param, ?_assertThrow({bad_request, _}, + begin + ok = meck:expect(chttpd, qs, + fun(_) -> [{Param, ""}] end), + chttpd_db:db_req(Req, nil) + end)} + end, ["rev", "open_revs", "atts_since", "w", "new_edits"]). + + +should_return_empty_results_on_no_docs(Pid) -> + Req = fake_request({[{<<"docs">>, []}]}), + chttpd_db:db_req(Req, nil), + Results = get_results_from_response(Pid), + ?_assertEqual([], Results). + + +should_get_doc_with_all_revs(Pid) -> + DocId = <<"docudoc">>, + Req = fake_request(DocId), + + RevA = {[{<<"_id">>, DocId}, {<<"_rev">>, <<"1-ABC">>}]}, + RevB = {[{<<"_id">>, DocId}, {<<"_rev">>, <<"1-CDE">>}]}, + DocRevA = #doc{id = DocId, body = {[{<<"_rev">>, <<"1-ABC">>}]}}, + DocRevB = #doc{id = DocId, body = {[{<<"_rev">>, <<"1-CDE">>}]}}, + + mock_open_revs(all, {ok, [{ok, DocRevA}, {ok, DocRevB}]}), + chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])), + + [{Result}] = get_results_from_response(Pid), + ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)), + + Docs = couch_util:get_value(<<"docs">>, Result), + ?assertEqual(2, length(Docs)), + + [{DocA0}, {DocB0}] = Docs, + + DocA = couch_util:get_value(<<"ok">>, DocA0), + DocB = couch_util:get_value(<<"ok">>, DocB0), + + ?_assertEqual([RevA, RevB], [DocA, DocB]). + + +should_validate_doc_with_bad_id(Pid) -> + DocId = <<"_docudoc">>, + + Req = fake_request(DocId), + chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])), + + [{Result}] = get_results_from_response(Pid), + ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)), + + Docs = couch_util:get_value(<<"docs">>, Result), + ?assertEqual(1, length(Docs)), + [{DocResult}] = Docs, + + Doc = couch_util:get_value(<<"error">>, DocResult), + + ?_assertMatch({[{<<"id">>, DocId}, + {<<"rev">>, null}, + {<<"error">>, <<"illegal_docid">>}, + {<<"reason">>, _}]}, + Doc). + + +should_validate_doc_with_bad_rev(Pid) -> + DocId = <<"docudoc">>, + Rev = <<"revorev">>, + + Req = fake_request(DocId, Rev), + chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])), + + [{Result}] = get_results_from_response(Pid), + ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)), + + Docs = couch_util:get_value(<<"docs">>, Result), + ?assertEqual(1, length(Docs)), + [{DocResult}] = Docs, + + Doc = couch_util:get_value(<<"error">>, DocResult), + + ?_assertMatch({[{<<"id">>, DocId}, + {<<"rev">>, Rev}, + {<<"error">>, <<"bad_request">>}, + {<<"reason">>, _}]}, + Doc). + + +should_validate_missing_doc(Pid) -> + DocId = <<"docudoc">>, + Rev = <<"1-revorev">>, + + Req = fake_request(DocId, Rev), + mock_open_revs([{1,<<"revorev">>}], {ok, []}), + chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])), + + [{Result}] = get_results_from_response(Pid), + ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)), + + Docs = couch_util:get_value(<<"docs">>, Result), + ?assertEqual(1, length(Docs)), + [{DocResult}] = Docs, + + Doc = couch_util:get_value(<<"error">>, DocResult), + + ?_assertMatch({[{<<"id">>, DocId}, + {<<"rev">>, Rev}, + {<<"error">>, <<"not_found">>}, + {<<"reason">>, _}]}, + Doc). + + +should_validate_bad_atts_since(Pid) -> + DocId = <<"docudoc">>, + Rev = <<"1-revorev">>, + + Req = fake_request(DocId, Rev, <<"badattsince">>), + mock_open_revs([{1,<<"revorev">>}], {ok, []}), + chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])), + + [{Result}] = get_results_from_response(Pid), + ?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)), + + Docs = couch_util:get_value(<<"docs">>, Result), + ?assertEqual(1, length(Docs)), + [{DocResult}] = Docs, + + Doc = couch_util:get_value(<<"error">>, DocResult), + + ?_assertMatch({[{<<"id">>, DocId}, + {<<"rev">>, <<"badattsince">>}, + {<<"error">>, <<"bad_request">>}, + {<<"reason">>, _}]}, + Doc). + + +should_include_attachments_when_atts_since_specified(_) -> + DocId = <<"docudoc">>, + Rev = <<"1-revorev">>, + + Req = fake_request(DocId, Rev, [<<"1-abc">>]), + mock_open_revs([{1,<<"revorev">>}], {ok, []}), + chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])), + + ?_assert(meck:called(fabric, open_revs, + ['_', DocId, [{1, <<"revorev">>}], + [{atts_since, [{1, <<"abc">>}]}, attachments, + {user_ctx, undefined}]])). + +%% helpers + +fake_request(Payload) when is_tuple(Payload) -> + #httpd{method='POST', path_parts=[<<"db">>, <<"_bulk_get">>], + mochi_req=mochireq, req_body=Payload}; +fake_request(DocId) when is_binary(DocId) -> + fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}]}]}]}). + +fake_request(DocId, Rev) -> + fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}, {<<"rev">>, Rev}]}]}]}). + +fake_request(DocId, Rev, AttsSince) -> + fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}, + {<<"rev">>, Rev}, + {<<"atts_since">>, AttsSince}]}]}]}). + + +mock_open_revs(RevsReq0, RevsResp) -> + ok = meck:expect(fabric, open_revs, + fun(_, _, RevsReq1, _) -> + ?assertEqual(RevsReq0, RevsReq1), + RevsResp + end). + + +mock(mochireq) -> + ok = meck:new(mochireq, [non_strict]), + ok = meck:expect(mochireq, parse_qs, fun() -> [] end), + ok = meck:expect(mochireq, accepts_content_type, fun(_) -> false end), + ok; +mock(couch_httpd) -> + ok = meck:new(couch_httpd, [passthrough]), + ok = meck:expect(couch_httpd, validate_ctype, fun(_, _) -> ok end), + ok; +mock(chttpd) -> + ok = meck:new(chttpd, [passthrough]), + ok = meck:expect(chttpd, start_json_response, fun(_, _) -> {ok, nil} end), + ok = meck:expect(chttpd, end_json_response, fun(_) -> ok end), + ok = meck:expect(chttpd, send_chunk, fun send_chunk/2), + ok = meck:expect(chttpd, json_body_obj, fun (#httpd{req_body=Body}) -> Body end), + ok; +mock(couch_epi) -> + ok = meck:new(couch_epi, [passthrough]), + ok = meck:expect(couch_epi, any, fun(_, _, _, _, _) -> false end), + ok; +mock(couch_stats) -> + ok = meck:new(couch_stats, [passthrough]), + ok = meck:expect(couch_stats, increment_counter, fun(_) -> ok end), + ok = meck:expect(couch_stats, increment_counter, fun(_, _) -> ok end), + ok = meck:expect(couch_stats, decrement_counter, fun(_) -> ok end), + ok = meck:expect(couch_stats, decrement_counter, fun(_, _) -> ok end), + ok = meck:expect(couch_stats, update_histogram, fun(_, _) -> ok end), + ok = meck:expect(couch_stats, update_gauge, fun(_, _) -> ok end), + ok; +mock(fabric) -> + ok = meck:new(fabric, [passthrough]), + ok; +mock(config) -> + ok = meck:new(config, [passthrough]), + ok = meck:expect(config, get, fun(_, _, Default) -> Default end), + ok. + + +spawn_accumulator() -> + Parent = self(), + Pid = spawn(fun() -> accumulator_loop(Parent, []) end), + erlang:put(chunks_gather, Pid), + Pid. + +accumulator_loop(Parent, Acc) -> + receive + {stop, Ref} -> + Parent ! {ok, Ref}; + {get, Ref} -> + Parent ! {ok, Ref, Acc}, + accumulator_loop(Parent, Acc); + {put, Ref, Chunk} -> + Parent ! {ok, Ref}, + accumulator_loop(Parent, [Chunk|Acc]) + end. + +stop_accumulator(Pid) -> + Ref = make_ref(), + Pid ! {stop, Ref}, + receive + {ok, Ref} -> + ok + after ?TIMEOUT -> + throw({timeout, <<"process stop timeout">>}) + end. + + +send_chunk(_, []) -> + {ok, nil}; +send_chunk(_Req, [H|T]=Chunk) when is_list(Chunk) -> + send_chunk(_Req, H), + send_chunk(_Req, T); +send_chunk(_, Chunk) -> + Worker = erlang:get(chunks_gather), + Ref = make_ref(), + Worker ! {put, Ref, Chunk}, + receive + {ok, Ref} -> {ok, nil} + after ?TIMEOUT -> + throw({timeout, <<"send chunk timeout">>}) + end. + + +get_response(Pid) -> + Ref = make_ref(), + Pid ! {get, Ref}, + receive + {ok, Ref, Acc} -> + ?JSON_DECODE(iolist_to_binary(lists:reverse(Acc))) + after ?TIMEOUT -> + throw({timeout, <<"get response timeout">>}) + end. + + +get_results_from_response(Pid) -> + {Resp} = get_response(Pid), + couch_util:get_value(<<"results">>, Resp). diff --git a/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl b/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl new file mode 100644 index 000000000..88e2797a3 --- /dev/null +++ b/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl @@ -0,0 +1,179 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(chttpd_db_doc_size_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(USER, "chttpd_db_test_admin"). +-define(PASS, "pass"). +-define(AUTH, {basic_auth, {?USER, ?PASS}}). +-define(CONTENT_JSON, {"Content-Type", "application/json"}). +-define(CONTENT_MULTI_RELATED, {"Content-Type", + "multipart/related;boundary=\"bound\""}). +-define(CONTENT_MULTI_FORM, {"Content-Type", + "multipart/form-data;boundary=\"bound\""}). + + +setup() -> + Hashed = couch_passwords:hash_admin_password(?PASS), + ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), + ok = config:set("couchdb", "max_document_size", "50"), + TmpDb = ?tempdb(), + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(chttpd, port), + Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]), + create_db(Url), + Url. + +teardown(Url) -> + delete_db(Url), + ok = config:delete("admins", ?USER, _Persist=false), + ok = config:delete("couchdb", "max_document_size"). + +create_db(Url) -> + {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), + case Status of + 201 -> ok; + 202 -> ok; + _ -> io:format(user, "~n HTTP Status Code: ~p~n", [Status]) + end, + ?assert(Status =:= 201 orelse Status =:= 202). + +delete_db(Url) -> + {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). + +all_test_() -> + { + "chttpd db max_document_size tests", + { + setup, + fun chttpd_test_util:start_couch/0, + fun chttpd_test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun post_single_doc/1, + fun put_single_doc/1, + fun bulk_doc/1, + fun put_post_doc_attach_inline/1, + fun put_multi_part_related/1, + fun post_multi_part_form/1 + ] + } + } + }. + +post_single_doc(Url) -> + NewDoc = "{\"post_single_doc\": \"some_doc\", + \"_id\": \"testdoc\", \"should_be\" : \"too_large\"}", + {ok, _, _, ResultBody} = test_request:post(Url, + [?CONTENT_JSON, ?AUTH], NewDoc), + {[ErrorMsg | _]} = ?JSON_DECODE(ResultBody), + ?_assertEqual({<<"error">>, <<"document_too_large">>}, ErrorMsg). + +put_single_doc(Url) -> + NewDoc = "{\"post_single_doc\": \"some_doc\", + \"_id\": \"testdoc\", \"should_be\" : \"too_large\"}", + {ok, _, _, ResultBody} = test_request:put(Url ++ "/" ++ "testid", + [?CONTENT_JSON, ?AUTH], NewDoc), + {[ErrorMsg | _]} = ?JSON_DECODE(ResultBody), + ?_assertEqual({<<"error">>, <<"document_too_large">>}, ErrorMsg). + +bulk_doc(Url) -> + NewDoc = "{\"docs\": [{\"doc1\": 1}, {\"errordoc\": + \"this_should_be_the_too_large_error_document\"}]}", + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_bulk_docs/", + [?CONTENT_JSON, ?AUTH], NewDoc), + ResultJson = ?JSON_DECODE(ResultBody), + Expect = {[{<<"error">>,<<"document_too_large">>},{<<"reason">>,<<>>}]}, + ?_assertEqual(Expect, ResultJson). + +put_post_doc_attach_inline(Url) -> + Body1 = "{\"body\":\"This is a body.\",", + Body2 = lists:concat(["{\"body\":\"This is a body it should fail", + "because there are too many characters.\","]), + DocRest = lists:concat(["\"_attachments\":{\"foo.txt\":{", + "\"content_type\":\"text/plain\",", + "\"data\": \"VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ=\"}}}"]), + Doc1 = lists:concat([Body1, DocRest]), + Doc2 = lists:concat([Body2, DocRest]), + + {ok, _, _, ResultBody} = test_request:post(Url, + [?CONTENT_JSON, ?AUTH], Doc1), + {[Msg | _]} = ?JSON_DECODE(ResultBody), + {ok, _, _, ResultBody1} = test_request:post(Url, + [?CONTENT_JSON, ?AUTH], Doc2), + {[Msg1 | _]} = ?JSON_DECODE(ResultBody1), + + {ok, _, _, ResultBody2} = test_request:put(Url ++ "/" ++ "accept", + [?CONTENT_JSON, ?AUTH], Doc1), + {[Msg2 | _]} = ?JSON_DECODE(ResultBody2), + {ok, _, _, ResultBody3} = test_request:put(Url ++ "/" ++ "fail", + [?CONTENT_JSON, ?AUTH], Doc2), + {[Msg3 | _]} = ?JSON_DECODE(ResultBody3), + [ + ?_assertEqual({<<"ok">>, true}, Msg), + ?_assertEqual({<<"error">>, <<"document_too_large">>}, Msg1), + ?_assertEqual({<<"ok">>, true}, Msg2), + ?_assertEqual({<<"error">>, <<"document_too_large">>}, Msg3) + ]. + +put_multi_part_related(Url) -> + Body1 = "{\"body\":\"This is a body.\",", + Body2 = lists:concat(["{\"body\":\"This is a body it should fail", + "because there are too many characters.\","]), + DocBeg = "--bound\r\nContent-Type: application/json\r\n\r\n", + DocRest = lists:concat(["\"_attachments\":{\"foo.txt\":{\"follows\":true,", + "\"content_type\":\"text/plain\",\"length\":21},\"bar.txt\":", + "{\"follows\":true,\"content_type\":\"text/plain\",", + "\"length\":20}}}\r\n--bound\r\n\r\nthis is 21 chars long", + "\r\n--bound\r\n\r\nthis is 20 chars lon\r\n--bound--epilogue"]), + Doc1 = lists:concat([DocBeg, Body1, DocRest]), + Doc2 = lists:concat([DocBeg, Body2, DocRest]), + {ok, _, _, ResultBody} = test_request:put(Url ++ "/" ++ "accept", + [?CONTENT_MULTI_RELATED, ?AUTH], Doc1), + {[Msg | _]} = ?JSON_DECODE(ResultBody), + {ok, _, _, ResultBody1} = test_request:put(Url ++ "/" ++ "faildoc", + [?CONTENT_MULTI_RELATED, ?AUTH], Doc2), + {[Msg1 | _]} = ?JSON_DECODE(ResultBody1), + [ + ?_assertEqual({<<"ok">>, true}, Msg), + ?_assertEqual({<<"error">>, <<"document_too_large">>}, Msg1) + ]. + +post_multi_part_form(Url) -> + Port = mochiweb_socket_server:get(chttpd, port), + Host = lists:concat([ "http://127.0.0.1:", Port]), + Referer = {"Referer", Host}, + Body1 = "{\"body\":\"This is a body.\"}", + Body2 = lists:concat(["{\"body\":\"This is a body it should fail", + "because there are too many characters.\"}"]), + DocBeg = "--bound\r\nContent-Disposition: form-data; name=\"_doc\"\r\n\r\n", + DocRest = lists:concat(["\r\n--bound\r\nContent-Disposition:", + "form-data; name=\"_attachments\"; filename=\"file.txt\"\r\n", + "Content-Type: text/plain\r\n\r\ncontents of file.txt\r\n\r\n", + "--bound--"]), + Doc1 = lists:concat([DocBeg, Body1, DocRest]), + Doc2 = lists:concat([DocBeg, Body2, DocRest]), + {ok, _, _, ResultBody} = test_request:post(Url ++ "/" ++ "accept", + [?CONTENT_MULTI_FORM, ?AUTH, Referer], Doc1), + {[Msg | _]} = ?JSON_DECODE(ResultBody), + {ok, _, _, ResultBody1} = test_request:post(Url ++ "/" ++ "fail", + [?CONTENT_MULTI_FORM, ?AUTH, Referer], Doc2), + {[Msg1 | _]} = ?JSON_DECODE(ResultBody1), + [ + ?_assertEqual({<<"ok">>, true}, Msg), + ?_assertEqual({<<"error">>, <<"document_too_large">>}, Msg1) + ]. diff --git a/src/chttpd/test/eunit/chttpd_db_test.erl b/src/chttpd/test/eunit/chttpd_db_test.erl new file mode 100644 index 000000000..2708aa033 --- /dev/null +++ b/src/chttpd/test/eunit/chttpd_db_test.erl @@ -0,0 +1,437 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(chttpd_db_test). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(USER, "chttpd_db_test_admin"). +-define(PASS, "pass"). +-define(AUTH, {basic_auth, {?USER, ?PASS}}). +-define(CONTENT_JSON, {"Content-Type", "application/json"}). +-define(DESTHEADER1, {"Destination", "foo%E5%95%8Abar"}). +-define(DESTHEADER2, {"Destination", "foo%2Fbar%23baz%3Fpow%3Afiz"}). +-define(FIXTURE_TXT, ?ABS_PATH(?FILE)). +-define(i2l(I), integer_to_list(I)). + +setup() -> + Hashed = couch_passwords:hash_admin_password(?PASS), + ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), + TmpDb = ?tempdb(), + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(chttpd, port), + Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]), + create_db(Url), + Url. + +teardown(Url) -> + delete_db(Url), + ok = config:delete("admins", ?USER, _Persist=false). + +create_db(Url) -> + {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), + ?assert(Status =:= 201 orelse Status =:= 202). + + +create_doc(Url, Id) -> + test_request:put(Url ++ "/" ++ Id, + [?CONTENT_JSON, ?AUTH], "{\"mr\": \"rockoartischocko\"}"). + +delete_db(Url) -> + {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). + +all_test_() -> + { + "chttpd db tests", + { + setup, + fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_return_ok_true_on_bulk_update/1, + fun should_return_ok_true_on_ensure_full_commit/1, + fun should_return_404_for_ensure_full_commit_on_no_db/1, + fun should_accept_live_as_an_alias_for_continuous/1, + fun should_return_404_for_delete_att_on_notadoc/1, + fun should_return_409_for_del_att_without_rev/1, + fun should_return_200_for_del_att_with_rev/1, + fun should_return_409_for_put_att_nonexistent_rev/1, + fun should_return_update_seq_when_set_on_all_docs/1, + fun should_not_return_update_seq_when_unset_on_all_docs/1, + fun should_return_correct_id_on_doc_copy/1, + fun should_return_400_for_bad_engine/1, + fun should_succeed_on_all_docs_with_queries_keys/1, + fun should_succeed_on_all_docs_with_queries_limit_skip/1, + fun should_succeed_on_all_docs_with_multiple_queries/1, + fun should_succeed_on_design_docs_with_queries_keys/1, + fun should_succeed_on_design_docs_with_queries_limit_skip/1, + fun should_succeed_on_design_docs_with_multiple_queries/1, + fun should_succeed_on_local_docs_with_queries_keys/1, + fun should_succeed_on_local_docs_with_queries_limit_skip/1, + fun should_succeed_on_local_docs_with_multiple_queries/1 + ] + } + } + }. + + +should_return_ok_true_on_bulk_update(Url) -> + ?_assertEqual(true, + begin + {ok, _, _, Body} = create_doc(Url, "testdoc"), + {Json} = ?JSON_DECODE(Body), + Ref = couch_util:get_value(<<"rev">>, Json, undefined), + NewDoc = "{\"docs\": [{\"_rev\": \"" ++ ?b2l(Ref) ++ "\", \"_id\": \"testdoc\"}]}", + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_bulk_docs/", + [?CONTENT_JSON, ?AUTH], NewDoc), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = lists:nth(1, ResultJson), + couch_util:get_value(<<"ok">>, InnerJson, undefined) + end). + + +should_return_ok_true_on_ensure_full_commit(Url0) -> + ?_test(begin + Url = Url0 ++ "/_ensure_full_commit", + {ok, RC, _, Body} = test_request:post(Url, [?CONTENT_JSON, ?AUTH], []), + {Json} = ?JSON_DECODE(Body), + ?assertEqual(201, RC), + ?assert(couch_util:get_value(<<"ok">>, Json)) + end). + + +should_return_404_for_ensure_full_commit_on_no_db(Url0) -> + ?_test(begin + Url = Url0 ++ "-missing-db" ++ "/_ensure_full_commit", + {ok, RC, _, Body} = test_request:post(Url, [?CONTENT_JSON, ?AUTH], []), + {Json} = ?JSON_DECODE(Body), + ?assertEqual(404, RC), + ?assertEqual(<<"not_found">>, couch_util:get_value(<<"error">>, Json)) + end). + + +should_accept_live_as_an_alias_for_continuous(Url) -> + GetLastSeq = fun(Bin) -> + Parts = binary:split(Bin, <<"\n">>, [global]), + Filtered = [P || P <- Parts, size(P) > 0], + LastSeqBin = lists:last(Filtered), + {Result} = try ?JSON_DECODE(LastSeqBin) of + Data -> Data + catch + _:_ -> + ?assert(false) % should not happen, abort + end, + couch_util:get_value(<<"last_seq">>, Result, undefined) + end, + ?_test(begin + {ok, _, _, ResultBody1} = + test_request:get(Url ++ "/_changes?feed=live&timeout=1", [?AUTH]), + LastSeq1 = GetLastSeq(ResultBody1), + + {ok, _, _, _} = create_doc(Url, "testdoc2"), + {ok, _, _, ResultBody2} = + test_request:get(Url ++ "/_changes?feed=live&timeout=1", [?AUTH]), + LastSeq2 = GetLastSeq(ResultBody2), + + ?assertNotEqual(LastSeq1, LastSeq2) + end). + + +should_return_404_for_delete_att_on_notadoc(Url) -> + ?_test(begin + {ok, RC, _, RespBody} = test_request:delete( + Url ++ "/notadoc/att.pdf", + [?CONTENT_JSON, ?AUTH], + [] + ), + ?assertEqual(404, RC), + ?assertEqual( + {[{<<"error">>,<<"not_found">>}, + {<<"reason">>,<<"missing">>}]}, + jiffy:decode(RespBody) + ), + {ok, RC1, _, _} = test_request:get( + Url ++ "/notadoc", + [?CONTENT_JSON, ?AUTH], + [] + ), + ?assertEqual(404, RC1) + end). + + +should_return_409_for_del_att_without_rev(Url) -> + ?_test(begin + {ok, RC, _, _} = test_request:put( + Url ++ "/testdoc3", + [?CONTENT_JSON, ?AUTH], + jiffy:encode(attachment_doc()) + ), + ?assertEqual(201, RC), + + {ok, RC1, _, _} = test_request:delete( + Url ++ "/testdoc3/file.erl", + [?CONTENT_JSON, ?AUTH], + [] + ), + ?assertEqual(409, RC1) + end). + + +should_return_200_for_del_att_with_rev(Url) -> + ?_test(begin + {ok, RC, _Headers, RespBody} = test_request:put( + Url ++ "/testdoc4", + [?CONTENT_JSON, ?AUTH], + jiffy:encode(attachment_doc()) + ), + ?assertEqual(201, RC), + + {ResultJson} = ?JSON_DECODE(RespBody), + Rev = couch_util:get_value(<<"rev">>, ResultJson, undefined), + + {ok, RC1, _, _} = test_request:delete( + Url ++ "/testdoc4/file.erl?rev=" ++ Rev, + [?CONTENT_JSON, ?AUTH], + [] + ), + ?assertEqual(200, RC1) + end). + + +should_return_409_for_put_att_nonexistent_rev(Url) -> + ?_test(begin + {ok, RC, _Headers, RespBody} = test_request:put( + Url ++ "/should_return_404/file.erl?rev=1-000", + [?CONTENT_JSON, ?AUTH], + jiffy:encode(attachment_doc()) + ), + ?assertEqual(409, RC), + ?assertMatch({[ + {<<"error">>,<<"not_found">>}, + {<<"reason">>,<<"missing_rev">>}]}, + ?JSON_DECODE(RespBody)) + end). + + +should_return_update_seq_when_set_on_all_docs(Url) -> + ?_test(begin + [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)], + {ok, RC, _, RespBody} = test_request:get(Url ++ "/_all_docs/" + ++ "?update_seq=true&keys=[\"testdoc1\"]",[?CONTENT_JSON, ?AUTH]), + ?assertEqual(200, RC), + {ResultJson} = ?JSON_DECODE(RespBody), + ?assertNotEqual(undefined, + couch_util:get_value(<<"update_seq">>, ResultJson)), + ?assertNotEqual(undefined, + couch_util:get_value(<<"offset">>, ResultJson)) + end). + + +should_not_return_update_seq_when_unset_on_all_docs(Url) -> + ?_test(begin + [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)], + {ok, RC, _, RespBody} = test_request:get(Url ++ "/_all_docs/" + ++ "?update_seq=false&keys=[\"testdoc1\"]",[?CONTENT_JSON, ?AUTH]), + ?assertEqual(200, RC), + {ResultJson} = ?JSON_DECODE(RespBody), + ?assertEqual(undefined, + couch_util:get_value(<<"update_seq">>, ResultJson)), + ?assertNotEqual(undefined, + couch_util:get_value(<<"offset">>, ResultJson)) + end). + + +should_return_correct_id_on_doc_copy(Url) -> + ?_test(begin + {ok, _, _, _} = create_doc(Url, "testdoc"), + {_, _, _, ResultBody1} = test_request:copy(Url ++ "/testdoc/", + [?CONTENT_JSON, ?AUTH, ?DESTHEADER1]), + {ResultJson1} = ?JSON_DECODE(ResultBody1), + Id1 = couch_util:get_value(<<"id">>, ResultJson1), + + {_, _, _, ResultBody2} = test_request:copy(Url ++ "/testdoc/", + [?CONTENT_JSON, ?AUTH, ?DESTHEADER2]), + {ResultJson2} = ?JSON_DECODE(ResultBody2), + Id2 = couch_util:get_value(<<"id">>, ResultJson2), + [ + ?assertEqual(<<102,111,111,229,149,138,98,97,114>>, Id1), + ?assertEqual(<<"foo/bar#baz?pow:fiz">>, Id2) + ] + end). + + +attachment_doc() -> + {ok, Data} = file:read_file(?FIXTURE_TXT), + {[ + {<<"_attachments">>, {[ + {<<"file.erl">>, {[ + {<<"content_type">>, <<"text/plain">>}, + {<<"data">>, base64:encode(Data)} + ]} + }]}} + ]}. + + +should_return_400_for_bad_engine(_) -> + ?_test(begin + TmpDb = ?tempdb(), + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(chttpd, port), + BaseUrl = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]), + Url = BaseUrl ++ "?engine=cowabunga", + {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), + ?assertEqual(400, Status) + end). + + +should_succeed_on_all_docs_with_queries_keys(Url) -> + ?_test(begin + [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], + QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]}]}", + {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/queries/", + [?CONTENT_JSON, ?AUTH], QueryDoc), + ?assertEqual(200, RC), + {ResultJson} = ?JSON_DECODE(RespBody), + ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), + {InnerJson} = lists:nth(1, ResultJsonBody), + ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson))) + end). + + +should_succeed_on_all_docs_with_queries_limit_skip(Url) -> + ?_test(begin + [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], + QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}", + {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/queries/", + [?CONTENT_JSON, ?AUTH], QueryDoc), + ?assertEqual(200, RC), + {ResultJson} = ?JSON_DECODE(RespBody), + ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), + {InnerJson} = lists:nth(1, ResultJsonBody), + ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)), + ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson))) + end). + + +should_succeed_on_all_docs_with_multiple_queries(Url) -> + ?_test(begin + [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], + QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]}, + {\"limit\": 5, \"skip\": 2}]}", + {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/queries/", + [?CONTENT_JSON, ?AUTH], QueryDoc), + ?assertEqual(200, RC), + {ResultJson} = ?JSON_DECODE(RespBody), + ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), + {InnerJson1} = lists:nth(1, ResultJsonBody), + ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))), + {InnerJson2} = lists:nth(2, ResultJsonBody), + ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)), + ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2))) + end). + + +should_succeed_on_design_docs_with_queries_keys(Url) -> + ?_test(begin + [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], + QueryDoc = "{\"queries\": [{\"keys\": [ \"_design/ddoc3\", + \"_design/ddoc8\"]}]}", + {ok, RC, _, RespBody} = test_request:post(Url ++ + "/_design_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc), + ?assertEqual(200, RC), + {ResultJson} = ?JSON_DECODE(RespBody), + ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), + {InnerJson} = lists:nth(1, ResultJsonBody), + ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson))) + end). + + +should_succeed_on_design_docs_with_queries_limit_skip(Url) -> + ?_test(begin + [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], + QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}", + {ok, RC, _, RespBody} = test_request:post(Url ++ + "/_design_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc), + ?assertEqual(200, RC), + {ResultJson} = ?JSON_DECODE(RespBody), + ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), + {InnerJson} = lists:nth(1, ResultJsonBody), + ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)), + ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson))) + end). + + +should_succeed_on_design_docs_with_multiple_queries(Url) -> + ?_test(begin + [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], + QueryDoc = "{\"queries\": [{\"keys\": [ \"_design/ddoc3\", + \"_design/ddoc8\"]}, {\"limit\": 5, \"skip\": 2}]}", + {ok, RC, _, RespBody} = test_request:post(Url ++ + "/_design_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc), + ?assertEqual(200, RC), + {ResultJson} = ?JSON_DECODE(RespBody), + ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), + {InnerJson1} = lists:nth(1, ResultJsonBody), + ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))), + {InnerJson2} = lists:nth(2, ResultJsonBody), + ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)), + ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2))) + end). + + +should_succeed_on_local_docs_with_queries_keys(Url) -> + ?_test(begin + [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], + QueryDoc = "{\"queries\": [{\"keys\": + [ \"_local/doc3\", \"_local/doc8\"]}]}", + {ok, RC, _, RespBody} = test_request:post(Url ++ "/_local_docs/queries/", + [?CONTENT_JSON, ?AUTH], QueryDoc), + ?assertEqual(200, RC), + {ResultJson} = ?JSON_DECODE(RespBody), + ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), + {InnerJson} = lists:nth(1, ResultJsonBody), + ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson))) + end). + + +should_succeed_on_local_docs_with_queries_limit_skip(Url) -> + ?_test(begin + [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], + QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}", + {ok, RC, _, RespBody} = test_request:post(Url ++ + "/_local_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc), + ?assertEqual(200, RC), + {ResultJson} = ?JSON_DECODE(RespBody), + ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), + {InnerJson} = lists:nth(1, ResultJsonBody), + ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson))) + end). + + +should_succeed_on_local_docs_with_multiple_queries(Url) -> + ?_test(begin + [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], + QueryDoc = "{\"queries\": [{\"keys\": [ \"_local/doc3\", + \"_local/doc8\"]}, {\"limit\": 5, \"skip\": 2}]}", + {ok, RC, _, RespBody} = test_request:post(Url ++ + "/_local_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc), + ?assertEqual(200, RC), + {ResultJson} = ?JSON_DECODE(RespBody), + ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), + {InnerJson1} = lists:nth(1, ResultJsonBody), + ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))), + {InnerJson2} = lists:nth(2, ResultJsonBody), + ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2))) + end). diff --git a/src/chttpd/test/eunit/chttpd_dbs_info_test.erl b/src/chttpd/test/eunit/chttpd_dbs_info_test.erl new file mode 100644 index 000000000..5b61d8831 --- /dev/null +++ b/src/chttpd/test/eunit/chttpd_dbs_info_test.erl @@ -0,0 +1,169 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(chttpd_dbs_info_test). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(USER, "chttpd_db_test_admin"). +-define(PASS, "pass"). +-define(AUTH, {basic_auth, {?USER, ?PASS}}). +-define(CONTENT_JSON, {"Content-Type", "application/json"}). + + +setup() -> + Hashed = couch_passwords:hash_admin_password(?PASS), + ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(chttpd, port), + Url = lists:concat(["http://", Addr, ":", Port, "/"]), + Db1Url = lists:concat([Url, "db1"]), + create_db(Db1Url), + Db2Url = lists:concat([Url, "db2"]), + create_db(Db2Url), + Url. + +teardown(Url) -> + Db1Url = lists:concat([Url, "db1"]), + Db2Url = lists:concat([Url, "db2"]), + delete_db(Db1Url), + delete_db(Db2Url), + ok = config:delete("admins", ?USER, _Persist=false). + +create_db(Url) -> + {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), + ?assert(Status =:= 201 orelse Status =:= 202). + +delete_db(Url) -> + {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). + +dbs_info_test_() -> + { + "chttpd dbs info tests", + { + setup, + fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_return_error_for_get_db_info/1, + fun should_return_dbs_info_for_single_db/1, + fun should_return_dbs_info_for_multiple_dbs/1, + fun should_return_error_for_exceeded_keys/1, + fun should_return_error_for_missing_keys/1, + fun should_return_dbs_info_for_dbs_with_mixed_state/1 + ] + } + } + }. + + +should_return_error_for_get_db_info(Url) -> + ?_test(begin + {ok, Code, _, ResultBody} = test_request:get(Url ++ "/_dbs_info?" + ++ "keys=[\"db1\"]", [?CONTENT_JSON, ?AUTH]), + {Body} = jiffy:decode(ResultBody), + [ + ?assertEqual(<<"method_not_allowed">>, + couch_util:get_value(<<"error">>, Body)), + ?assertEqual(405, Code) + ] + end). + + +should_return_dbs_info_for_single_db(Url) -> + ?_test(begin + NewDoc = "{\"keys\": [\"db1\"]}", + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/", + [?CONTENT_JSON, ?AUTH], NewDoc), + BodyJson = jiffy:decode(ResultBody), + {Db1Data} = lists:nth(1, BodyJson), + [ + ?assertEqual(<<"db1">>, + couch_util:get_value(<<"key">>, Db1Data)), + ?assertNotEqual(undefined, + couch_util:get_value(<<"info">>, Db1Data)) + ] + end). + + +should_return_dbs_info_for_multiple_dbs(Url) -> + ?_test(begin + NewDoc = "{\"keys\": [\"db1\", \"db2\"]}", + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/", + [?CONTENT_JSON, ?AUTH], NewDoc), + BodyJson = jiffy:decode(ResultBody), + {Db1Data} = lists:nth(1, BodyJson), + {Db2Data} = lists:nth(2, BodyJson), + [ + ?assertEqual(<<"db1">>, + couch_util:get_value(<<"key">>, Db1Data)), + ?assertNotEqual(undefined, + couch_util:get_value(<<"info">>, Db1Data)), + ?assertEqual(<<"db2">>, + couch_util:get_value(<<"key">>, Db2Data)), + ?assertNotEqual(undefined, + couch_util:get_value(<<"info">>, Db2Data)) + ] + end). + + +should_return_error_for_exceeded_keys(Url) -> + ?_test(begin + NewDoc = "{\"keys\": [\"db1\", \"db2\"]}", + ok = config:set("chttpd", "max_db_number_for_dbs_info_req", "1"), + {ok, Code, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/", + [?CONTENT_JSON, ?AUTH], NewDoc), + {Body} = jiffy:decode(ResultBody), + ok = config:delete("chttpd", "max_db_number_for_dbs_info_req"), + [ + ?assertEqual(<<"bad_request">>, + couch_util:get_value(<<"error">>, Body)), + ?assertEqual(400, Code) + ] + end). + + +should_return_error_for_missing_keys(Url) -> + ?_test(begin + NewDoc = "{\"missingkeys\": [\"db1\", \"db2\"]}", + {ok, Code, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/", + [?CONTENT_JSON, ?AUTH], NewDoc), + {Body} = jiffy:decode(ResultBody), + [ + ?assertEqual(<<"bad_request">>, + couch_util:get_value(<<"error">>, Body)), + ?assertEqual(400, Code) + ] + end). + + +should_return_dbs_info_for_dbs_with_mixed_state(Url) -> + ?_test(begin + NewDoc = "{\"keys\": [\"db1\", \"noexisteddb\"]}", + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/", + [?CONTENT_JSON, ?AUTH], NewDoc), + Json = jiffy:decode(ResultBody), + {Db1Data} = lists:nth(1, Json), + {Db2Data} = lists:nth(2, Json), + [ + ?assertEqual( + <<"db1">>, couch_util:get_value(<<"key">>, Db1Data)), + ?assertNotEqual(undefined, + couch_util:get_value(<<"info">>, Db1Data)), + ?assertEqual( + <<"noexisteddb">>, couch_util:get_value(<<"key">>, Db2Data)), + ?assertEqual(undefined, couch_util:get_value(<<"info">>, Db2Data)) + ] + end). diff --git a/src/chttpd/test/eunit/chttpd_error_info_tests.erl b/src/chttpd/test/eunit/chttpd_error_info_tests.erl new file mode 100644 index 000000000..fdb015c08 --- /dev/null +++ b/src/chttpd/test/eunit/chttpd_error_info_tests.erl @@ -0,0 +1,168 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(chttpd_error_info_tests). + +-include_lib("eunit/include/eunit.hrl"). + + +error_info_test() -> + Error = <<"error">>, + Reason = <<"reason">>, + ArgResult = [ + { + bad_request, + {400, <<"bad_request">>, <<>>} + }, + { + {bad_request, Reason}, + {400, <<"bad_request">>, Reason} + }, + { + {bad_request, "error", "reason"}, + {400, Error, Reason} + }, + { + {query_parse_error, Reason}, + {400, <<"query_parse_error">>, Reason} + }, + { + database_does_not_exist, + {404, <<"not_found">>, <<"Database does not exist.">>} + }, + { + not_found, + {404, <<"not_found">>, <<"missing">>} + }, + { + {not_found, Reason}, + {404, <<"not_found">>, Reason} + }, + { + {not_acceptable, Reason}, + {406, <<"not_acceptable">>, Reason} + }, + { + conflict, + {409, <<"conflict">>, <<"Document update conflict.">>} + }, + { + {conflict, Reason}, + %% yes, the reason is ignored + {409, <<"conflict">>, <<"Document update conflict.">>} + }, + { + {forbidden, Reason}, + {403, <<"forbidden">>, Reason} + }, + { + {forbidden, Error, Reason}, + {403, Error, Reason} + }, + { + {unauthorized, Reason}, + {401, <<"unauthorized">>, Reason} + }, + { + file_exists, + {412, <<"file_exists">>, + <<"The database could not be created, the file already exists.">>} + }, + { + {error, {nodedown, Reason}}, {412, <<"nodedown">>, Reason} + }, + { + {maintenance_mode, Reason}, + {412, <<"nodedown">>, Reason} + }, + { + {maintenance_mode, nil, Reason}, + {412, <<"nodedown">>, Reason} + }, + { + {w_quorum_not_met, Reason}, + {500, <<"write_quorum_not_met">>, Reason} + }, + { + request_uri_too_long, + {414, <<"too_long">>, <<"the request uri is too long">>} + }, + { + {bad_ctype, Reason}, + {415, <<"bad_content_type">>, Reason} + }, + { + requested_range_not_satisfiable, + {416, <<"requested_range_not_satisfiable">>, + <<"Requested range not satisfiable">>} + }, + { + {error, {illegal_database_name, <<"foo">>}}, + {400, <<"illegal_database_name">>, + <<"Name: 'foo'. Only lowercase characters (a-z), digits (0-9), and any of" + " the characters _, $, (, ), +, -, and / are allowed." + " Must begin with a letter.">>} + }, + { + {Error, {illegal_docid,1}}, + {400, <<"illegal_docid">>, 1} + }, + { + {missing_stub, Reason}, + {412, <<"missing_stub">>, Reason} + }, + { + request_entity_too_large, + {413, <<"too_large">>, <<"the request entity is too large">>} + }, + { + not_implemented, + {501, <<"not_implemented">>, + <<"this feature is not yet implemented">>} + }, + { + timeout, + {500, <<"timeout">>, + <<"The request could not be processed in a reasonable" + " amount of time.">>} + }, + { + {timeout, Error}, + {500, <<"timeout">>, + <<"The request could not be processed in a reasonable" + " amount of time.">>} + }, + { + {Error, null}, + {500, <<"unknown_error">>, Error} + }, + { + {Error, Reason}, + {500, Error, Reason} + }, + { + {Error, nil, [{}]}, + {500, <<"unknown_error">>, Error} + }, + { + {Error, Reason, [{}]}, + {500, Error, Reason} + }, + { + Error, + {500, <<"unknown_error">>, Error} + } + ], + + lists:foreach(fun({Arg, Result}) -> + ?assertEqual(Result, chttpd:error_info(Arg)) + end, ArgResult). diff --git a/src/chttpd/test/eunit/chttpd_handlers_tests.erl b/src/chttpd/test/eunit/chttpd_handlers_tests.erl new file mode 100644 index 000000000..f3e8f5dcd --- /dev/null +++ b/src/chttpd/test/eunit/chttpd_handlers_tests.erl @@ -0,0 +1,87 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(chttpd_handlers_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + + +setup() -> + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(chttpd, port), + BaseUrl = lists:concat(["http://", Addr, ":", Port]), + BaseUrl. + +teardown(_Url) -> + ok. + + +replicate_test_() -> + { + "_replicate", + { + setup, + fun chttpd_test_util:start_couch/0, + fun chttpd_test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_escape_dbname_on_replicate/1 + ] + } + } + }. + + +should_escape_dbname_on_replicate(Url) -> + ?_test( + begin + UrlBin = ?l2b(Url), + Request = couch_util:json_encode({[ + {<<"source">>, <>}, + {<<"target">>, <<"bar/baz">>}, + {<<"create_target">>, true} + ]}), + {ok, 200, _, Body} = request_replicate(Url ++ "/_replicate", Request), + JSON = couch_util:json_decode(Body), + + Source = json_value(JSON, [<<"source">>]), + Target = json_value(JSON, [<<"target">>, <<"url">>]), + ?assertEqual(<>, Source), + ?assertEqual(<>, Target) + end). + + +json_value(JSON, Keys) -> + couch_util:get_nested_json_value(JSON, Keys). + +request_replicate(Url, Body) -> + Headers = [{"Content-Type", "application/json"}], + Handler = {chttpd_misc, handle_replicate_req}, + request(post, Url, Headers, Body, Handler, fun(Req) -> + chttpd:send_json(Req, 200, get(post_body)) + end). + +request(Method, Url, Headers, Body, {M, F}, MockFun) -> + meck:new(M, [passthrough, non_strict]), + try + meck:expect(M, F, MockFun), + Result = test_request:Method(Url, Headers, Body), + ?assert(meck:validate(M)), + Result + catch Kind:Reason -> + {Kind, Reason} + after + meck:unload(M) + end. diff --git a/src/chttpd/test/eunit/chttpd_open_revs_error_test.erl b/src/chttpd/test/eunit/chttpd_open_revs_error_test.erl new file mode 100644 index 000000000..d53d370f8 --- /dev/null +++ b/src/chttpd/test/eunit/chttpd_open_revs_error_test.erl @@ -0,0 +1,112 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(chttpd_open_revs_error_test). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(USER, "chttpd_db_test_admin"). +-define(PASS, "pass"). +-define(AUTH, {basic_auth, {?USER, ?PASS}}). +-define(CONTENT_JSON, {"Content-Type", "application/json"}). +-define(CONTENT_MULTI_FORM, {"Content-Type", + "multipart/form-data;boundary=\"bound\""}). + +setup() -> + Hashed = couch_passwords:hash_admin_password(?PASS), + ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), + TmpDb = ?tempdb(), + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(chttpd, port), + Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]), + mock(fabric), + create_db(Url), + Url. + +teardown(Url) -> + delete_db(Url), + (catch meck:unload(fabric)), + ok = config:delete("admins", ?USER, _Persist=false). + +create_db(Url) -> + {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), + ?assert(Status =:= 201 orelse Status =:= 202). + + +create_doc(Url, Id) -> + test_request:put(Url ++ "/" ++ Id, + [?CONTENT_JSON, ?AUTH], "{\"mr\": \"rockoartischocko\"}"). + +delete_db(Url) -> + {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). + +open_revs_error_test_() -> + { + "open revs error tests", + { + setup, + fun chttpd_test_util:start_couch/0, + fun chttpd_test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_return_503_error_for_open_revs_get/1, + fun should_return_503_error_for_open_revs_post_form/1 + ] + } + } + }. + +should_return_503_error_for_open_revs_get(Url) -> + {ok, _, _, Body} = create_doc(Url, "testdoc"), + {Json} = ?JSON_DECODE(Body), + Ref = couch_util:get_value(<<"rev">>, Json, undefined), + mock_open_revs({error, all_workers_died}), + {ok, Code, _, _} = test_request:get(Url ++ + "/testdoc?rev=" ++ ?b2l(Ref), [?AUTH]), + ?_assertEqual(503, Code). + +should_return_503_error_for_open_revs_post_form(Url) -> + Port = mochiweb_socket_server:get(chttpd, port), + Host = lists:concat([ "http://127.0.0.1:", Port]), + Referer = {"Referer", Host}, + Body1 = "{\"body\":\"This is a body.\"}", + DocBeg = "--bound\r\nContent-Disposition: form-data; name=\"_doc\"\r\n\r\n", + DocRev = "--bound\r\nContent-Disposition: form-data; name=\"_rev\"\r\n\r\n", + DocRest = "\r\n--bound\r\nContent-Disposition:" + "form-data; name=\"_attachments\"; filename=\"file.txt\"\r\n" + "Content-Type: text/plain\r\n\r\ncontents of file.txt\r\n\r\n" + "--bound--", + Doc1 = lists:concat([DocBeg, Body1, DocRest]), + {ok, _, _, ResultBody} = test_request:post(Url ++ "/" ++ "RevDoc", + [?CONTENT_MULTI_FORM, ?AUTH, Referer], Doc1), + {Json} = ?JSON_DECODE(ResultBody), + Ref = couch_util:get_value(<<"rev">>, Json, undefined), + Doc2 = lists:concat([DocRev, ?b2l(Ref) , DocRest]), + + mock_open_revs({error, all_workers_died}), + {ok, Code, _, ResultBody1} = test_request:post(Url ++ "/" ++ "RevDoc", + [?CONTENT_MULTI_FORM, ?AUTH, Referer], Doc2), + {Json1} = ?JSON_DECODE(ResultBody1), + ErrorMessage = couch_util:get_value(<<"error">>, Json1), + [ + ?_assertEqual(503, Code), + ?_assertEqual(<<"service unvailable">>, ErrorMessage) + ]. + +mock_open_revs(RevsResp) -> + ok = meck:expect(fabric, open_revs, fun(_, _, _, _) -> RevsResp end). + +mock(fabric) -> + ok = meck:new(fabric, [passthrough]). diff --git a/src/chttpd/test/eunit/chttpd_plugin_tests.erl b/src/chttpd/test/eunit/chttpd_plugin_tests.erl new file mode 100644 index 000000000..36572a419 --- /dev/null +++ b/src/chttpd/test/eunit/chttpd_plugin_tests.erl @@ -0,0 +1,187 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(chttpd_plugin_tests). + +-export([ + before_request/1, + after_request/2, + handle_error/1, + before_response/4, + before_serve_file/5 +]). + +-export([ %% couch_epi_plugin behaviour + app/0, + providers/0, + services/0, + data_providers/0, + data_subscriptions/0, + processes/0, + notify/3 +]). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +%% couch_epi_plugin behaviour + +app() -> test_app. +providers() -> [{chttpd, ?MODULE}]. +services() -> []. +data_providers() -> []. +data_subscriptions() -> []. +processes() -> []. +notify(_, _, _) -> ok. + + +setup() -> + couch_tests:setup([ + couch_epi_dispatch:dispatch(chttpd, ?MODULE) + ]). + +teardown(Ctx) -> + couch_tests:teardown(Ctx). + +before_request({true, Id}) -> [{true, [{before_request, Id}]}]; +before_request({false, Id}) -> [{false, Id}]; +before_request({fail, Id}) -> throw({before_request, Id}). + +after_request({true, Id}, A) -> [{true, [{after_request, Id}]}, A]; +after_request({false, Id}, A) -> [{false, Id}, A]; +after_request({fail, Id}, _A) -> throw({after_request, Id}). + +handle_error({true, Id}) -> [{true, [{handle_error, Id}]}]; +handle_error({false, Id}) -> [{false, Id}]; +handle_error({fail, Id}) -> throw({handle_error, Id}). + +before_response({true, Id}, A, B, C) -> + [{true, [{before_response, Id}]}, A, B, C]; +before_response({false, Id}, A, B, C) -> + [{false, Id}, A, B, C]; +before_response({fail, Id}, _A, _B, _C) -> + throw({before_response, Id}). + +before_serve_file({true, Id}, A, B, C, D) -> + [{true, [{before_serve_file, Id}]}, A, B, C, D]; +before_serve_file({false, Id}, A, B, C, D) -> + [{false, Id}, A, B, C, D]; +before_serve_file({fail, _Id}, _A, _B, _C, _D) -> + throw(before_serve_file). + +callback_test_() -> + { + "callback tests", + { + setup, fun setup/0, fun teardown/1, + [ + fun before_request_match/0, + fun before_request_no_match/0, + fun before_request_throw/0, + + fun after_request_match/0, + fun after_request_no_match/0, + fun after_request_throw/0, + + fun handle_error_match/0, + fun handle_error_no_match/0, + fun handle_error_throw/0, + + fun before_response_match/0, + fun before_response_no_match/0, + fun before_response_throw/0, + + fun before_serve_file_match/0, + fun before_serve_file_no_match/0, + fun before_serve_file_throw/0 + ] + } + }. + + +before_request_match() -> + ?assertEqual( + {ok, {true, [{before_request, foo}]}}, + chttpd_plugin:before_request({true, foo})). + +before_request_no_match() -> + ?assertEqual( + {ok, {false, foo}}, + chttpd_plugin:before_request({false, foo})). + +before_request_throw() -> + ?assertThrow( + {before_request, foo}, + chttpd_plugin:before_request({fail, foo})). + + +after_request_match() -> + ?assertEqual( + {ok, bar}, + chttpd_plugin:after_request({true, foo}, bar)). + +after_request_no_match() -> + ?assertEqual( + {ok, bar}, + chttpd_plugin:after_request({false, foo}, bar)). + +after_request_throw() -> + ?assertThrow( + {after_request, foo}, + chttpd_plugin:after_request({fail, foo}, bar)). + + +handle_error_match() -> + ?assertEqual( + {true, [{handle_error, foo}]}, + chttpd_plugin:handle_error({true, foo})). + +handle_error_no_match() -> + ?assertEqual( + {false, foo}, + chttpd_plugin:handle_error({false, foo})). + +handle_error_throw() -> + ?assertThrow( + {handle_error, foo}, + chttpd_plugin:handle_error({fail, foo})). + +before_response_match() -> + ?assertEqual( + {ok, {{true, [{before_response, foo}]}, 1, 2, 3}}, + chttpd_plugin:before_response({true, foo}, 1, 2, 3)). + +before_response_no_match() -> + ?assertEqual( + {ok, {{false, foo}, 1, 2, 3}}, + chttpd_plugin:before_response({false, foo}, 1, 2, 3)). + +before_response_throw() -> + ?assertThrow( + {before_response, foo}, + chttpd_plugin:before_response({fail, foo}, 1, 2, 3)). + + +before_serve_file_match() -> + ?assertEqual( + {ok, {{true, [{before_serve_file, foo}]}, 1, 2, 3, 4}}, + chttpd_plugin:before_serve_file({true, foo}, 1, 2, 3, 4)). + +before_serve_file_no_match() -> + ?assertEqual( + {ok, {{false, foo}, 1, 2, 3, 4}}, + chttpd_plugin:before_serve_file({false, foo}, 1, 2, 3, 4)). + +before_serve_file_throw() -> + ?assertThrow( + before_serve_file, + chttpd_plugin:before_serve_file({fail, foo}, 1, 2, 3, 4)). diff --git a/src/chttpd/test/eunit/chttpd_prefer_header_test.erl b/src/chttpd/test/eunit/chttpd_prefer_header_test.erl new file mode 100644 index 000000000..0f43ba437 --- /dev/null +++ b/src/chttpd/test/eunit/chttpd_prefer_header_test.erl @@ -0,0 +1,112 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(chttpd_prefer_header_test). + +-compile(tuple_calls). + +-include_lib("couch/include/couch_db.hrl"). +-include_lib("eunit/include/eunit.hrl"). + + +mock_request(ExcludeHeader) -> + Headers = mochiweb_headers:make(ExcludeHeader), + MochiReq = mochiweb_request:new(nil, 'GET', "/", {1, 1}, Headers), + MochiReq:cleanup(), + #httpd{mochi_req = MochiReq}. + + +default_headers() -> + [ + {"Cache-Control","must-revalidate"}, + {"Content-Type","application/json"}, + {"Content-Length", "100"}, + {"ETag","\"12343\""}, + {"X-Couch-Request-ID","7bd1adab86"}, + {"X-CouchDB-Body-Time","0"}, + {"Vary", "Accept-Encoding"}, + {"Server","CouchDB/2.1.0-f1a1d7f1c (Erlang OTP/19)"} + ]. + + +minimal_options_headers() -> + [ + {"Cache-Control","must-revalidate"}, + {"Content-Type","application/json"}, + {"Content-Length", "100"}, + {"ETag","\"12343\""}, + {"Vary", "Accept-Encoding"}, + {"Server","CouchDB/2.1.0-f1a1d7f1c (Erlang OTP/19)"} + ]. + + +default_no_exclude_header_test() -> + Headers = chttpd_prefer_header:maybe_return_minimal( + mock_request([]), + default_headers() + ), + ?assertEqual(default_headers(), Headers). + + +unsupported_exclude_header_test() -> + Req = mock_request([{"prefer", "Wrong"}]), + Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()), + ?assertEqual(default_headers(), Headers). + + +empty_header_test() -> + Req = mock_request([{"prefer", ""}]), + Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()), + ?assertEqual(default_headers(), Headers). + +setup() -> + ok = meck:new(config), + ok = meck:expect(config, get, fun("chttpd", "prefer_minimal", _) -> + "Cache-Control, Content-Length, Content-Type, ETag, Server, Vary" + end), + ok. + + +teardown(_) -> + meck:unload(config). + + +exclude_headers_test_() -> + { + "Test Prefer headers", + { + foreach, fun setup/0, fun teardown/1, + [ + fun minimal_options/1, + fun minimal_options_check_header_case/1, + fun minimal_options_check_header_value_case/1 + ] + } + }. + + +minimal_options(_) -> + Req = mock_request([{"Prefer", "return=minimal"}]), + Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()), + ?_assertEqual(minimal_options_headers(), Headers). + + +minimal_options_check_header_case(_) -> + Req = mock_request([{"prefer", "return=minimal"}]), + Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()), + ?_assertEqual(minimal_options_headers(), Headers). + + +minimal_options_check_header_value_case(_) -> + Req = mock_request([{"prefer", "RETURN=MINIMAL"}]), + Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()), + ?_assertEqual(minimal_options_headers(), Headers). \ No newline at end of file diff --git a/src/chttpd/test/eunit/chttpd_purge_tests.erl b/src/chttpd/test/eunit/chttpd_purge_tests.erl new file mode 100644 index 000000000..dbd73de1f --- /dev/null +++ b/src/chttpd/test/eunit/chttpd_purge_tests.erl @@ -0,0 +1,406 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(chttpd_purge_tests). + + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + + +-define(USER, "chttpd_db_test_admin"). +-define(PASS, "pass"). +-define(AUTH, {basic_auth, {?USER, ?PASS}}). +-define(CONTENT_JSON, {"Content-Type", "application/json"}). + + +setup() -> + ok = config:set("admins", ?USER, ?PASS, _Persist=false), + TmpDb = ?tempdb(), + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(chttpd, port), + Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]), + create_db(Url), + Url. + + +teardown(Url) -> + delete_db(Url), + ok = config:delete("admins", ?USER, _Persist=false). + + +create_db(Url) -> + {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), + ?assert(Status =:= 201 orelse Status =:= 202). + + +create_doc(Url, Id) -> + test_request:put(Url ++ "/" ++ Id, + [?CONTENT_JSON, ?AUTH], "{\"mr\": \"rockoartischocko\"}"). + +create_doc(Url, Id, Content) -> + test_request:put(Url ++ "/" ++ Id, + [?CONTENT_JSON, ?AUTH], "{\"mr\": \"" ++ Content ++ "\"}"). + + +create_docs(Url, Docs) -> + test_request:post(Url ++ "/_bulk_docs", + [?CONTENT_JSON, ?AUTH], ?JSON_ENCODE({[{docs, Docs}]})). + + +delete_db(Url) -> + {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). + + +purge_test_() -> + { + "chttpd db tests", + { + setup, + fun chttpd_test_util:start_couch/0, + fun chttpd_test_util:stop_couch/1, + { + foreach, + fun setup/0, + fun teardown/1, + [ + fun test_empty_purge_request/1, + fun test_ok_purge_request/1, + fun test_ok_purge_request_with_101_docid/1, + fun test_accepted_purge_request/1, + fun test_partial_purge_request/1, + fun test_mixed_purge_request/1, + fun test_overmany_ids_or_revs_purge_request/1, + fun test_exceed_limits_on_purge_infos/1, + fun should_error_set_purged_docs_limit_to0/1, + fun test_timeout_set_purged_infos_limit/1 + ] + } + } + }. + + +test_empty_purge_request(Url) -> + ?_test(begin + IdsRevs = "{}", + {ok, Status, _, ResultBody} = test_request:post(Url ++ "/_purge/", + [?CONTENT_JSON, ?AUTH], IdsRevs), + ResultJson = ?JSON_DECODE(ResultBody), + ?assert(Status =:= 201 orelse Status =:= 202), + ?assertEqual( + {[ + {<<"purge_seq">>, null}, + {<<"purged">>,{[]}} + ]}, + ResultJson + ) + end). + + +test_ok_purge_request(Url) -> + ?_test(begin + {ok, _, _, Body} = create_doc(Url, "doc1"), + {Json} = ?JSON_DECODE(Body), + Rev1 = couch_util:get_value(<<"rev">>, Json, undefined), + {ok, _, _, Body2} = create_doc(Url, "doc2"), + {Json2} = ?JSON_DECODE(Body2), + Rev2 = couch_util:get_value(<<"rev">>, Json2, undefined), + {ok, _, _, Body3} = create_doc(Url, "doc3"), + {Json3} = ?JSON_DECODE(Body3), + Rev3 = couch_util:get_value(<<"rev">>, Json3, undefined), + + IdsRevsEJson = {[ + {<<"doc1">>, [Rev1]}, + {<<"doc2">>, [Rev2]}, + {<<"doc3">>, [Rev3]} + ]}, + IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)), + + {ok, Status, _, ResultBody} = test_request:post(Url ++ "/_purge/", + [?CONTENT_JSON, ?AUTH], IdsRevs), + ResultJson = ?JSON_DECODE(ResultBody), + ?assert(Status =:= 201 orelse Status =:= 202), + ?assertEqual( + {[ + {<<"purge_seq">>, null}, + {<<"purged">>, {[ + {<<"doc1">>, [Rev1]}, + {<<"doc2">>, [Rev2]}, + {<<"doc3">>, [Rev3]} + ]}} + ]}, + ResultJson + ) + end). + + +test_ok_purge_request_with_101_docid(Url) -> + ?_test(begin + PurgedDocsNum = 101, + Docs = lists:foldl(fun(I, Acc) -> + Id = list_to_binary(integer_to_list(I)), + Doc = {[{<<"_id">>, Id}, {value, I}]}, + [Doc | Acc] + end, [], lists:seq(1, PurgedDocsNum)), + + {ok, _, _, Body} = create_docs(Url, Docs), + BodyJson = ?JSON_DECODE(Body), + + PurgeBody = lists:map(fun({DocResp}) -> + Id = couch_util:get_value(<<"id">>, DocResp, undefined), + Rev = couch_util:get_value(<<"rev">>, DocResp, undefined), + {Id, [Rev]} + end, BodyJson), + + ok = config:set("purge", "max_document_id_number", "101"), + try + {ok, Status, _, _} = test_request:post(Url ++ "/_purge/", + [?CONTENT_JSON, ?AUTH], ?JSON_ENCODE({PurgeBody})), + ?assert(Status =:= 201 orelse Status =:= 202) + after + ok = config:delete("purge", "max_document_id_number") + end + end). + + +test_accepted_purge_request(Url) -> + ?_test(begin + {ok, _, _, Body} = create_doc(Url, "doc1"), + {Json} = ?JSON_DECODE(Body), + Rev1 = couch_util:get_value(<<"rev">>, Json, undefined), + IdsRevsEJson = {[ + {<<"doc1">>, [Rev1]} + ]}, + IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)), + meck:new(fabric, [passthrough]), + meck:expect(fabric, purge_docs, + fun(_, _, _) -> {accepted,[{accepted,[{1, + <<57,27,64,134,152,18,73,243,40,1,141,214,135,104,79,188>>}]}]} + end + ), + {ok, Status, _, ResultBody} = test_request:post(Url ++ "/_purge/", + [?CONTENT_JSON, ?AUTH], IdsRevs), + ResultJson = ?JSON_DECODE(ResultBody), + meck:unload(fabric), + ?assert(Status =:= 202), + ?assertEqual( + {[ + {<<"purge_seq">>, null}, + {<<"purged">>, {[ + {<<"doc1">>, [Rev1]} + ]}} + ]}, + ResultJson + ) + end). + + +test_partial_purge_request(Url) -> + ?_test(begin + {ok, _, _, Body} = create_doc(Url, "doc1"), + {Json} = ?JSON_DECODE(Body), + Rev1 = couch_util:get_value(<<"rev">>, Json, undefined), + + NewDoc = "{\"new_edits\": false, \"docs\": [{\"_id\": \"doc1\", + \"_revisions\": {\"start\": 1, \"ids\": [\"12345\", \"67890\"]}, + \"content\": \"updated\", \"_rev\": \"" ++ ?b2l(Rev1) ++ "\"}]}", + {ok, _, _, _} = test_request:post(Url ++ "/_bulk_docs/", + [?CONTENT_JSON, ?AUTH], NewDoc), + + IdsRevsEJson = {[{<<"doc1">>, [Rev1]}]}, + IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)), + {ok, Status, _, ResultBody} = test_request:post(Url ++ "/_purge/", + [?CONTENT_JSON, ?AUTH], IdsRevs), + ResultJson = ?JSON_DECODE(ResultBody), + ?assert(Status =:= 201 orelse Status =:= 202), + ?assertEqual( + {[ + {<<"purge_seq">>, null}, + {<<"purged">>, {[ + {<<"doc1">>, [Rev1]} + ]}} + ]}, + ResultJson + ), + {ok, Status2, _, ResultBody2} = test_request:get(Url + ++ "/doc1/", [?AUTH]), + {Json2} = ?JSON_DECODE(ResultBody2), + Content = couch_util:get_value(<<"content">>, Json2, undefined), + ?assertEqual(<<"updated">>, Content), + ?assert(Status2 =:= 200) + end). + + +test_mixed_purge_request(Url) -> + ?_test(begin + {ok, _, _, Body} = create_doc(Url, "doc1"), + {Json} = ?JSON_DECODE(Body), + Rev1 = couch_util:get_value(<<"rev">>, Json, undefined), + + NewDoc = "{\"new_edits\": false, \"docs\": [{\"_id\": \"doc1\", + \"_revisions\": {\"start\": 1, \"ids\": [\"12345\", \"67890\"]}, + \"content\": \"updated\", \"_rev\": \"" ++ ?b2l(Rev1) ++ "\"}]}", + {ok, _, _, _} = test_request:post(Url ++ "/_bulk_docs/", + [?CONTENT_JSON, ?AUTH], NewDoc), + + {ok, _, _, _Body2} = create_doc(Url, "doc2", "content2"), + {ok, _, _, Body3} = create_doc(Url, "doc3", "content3"), + {Json3} = ?JSON_DECODE(Body3), + Rev3 = couch_util:get_value(<<"rev">>, Json3, undefined), + + + IdsRevsEJson = {[ + {<<"doc1">>, [Rev1]}, % partial purge + {<<"doc2">>, [Rev3, Rev1]}, % correct format, but invalid rev + {<<"doc3">>, [Rev3]} % correct format and rev + ]}, + IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)), + {ok, Status, _, Body4} = test_request:post(Url ++ "/_purge/", + [?CONTENT_JSON, ?AUTH], IdsRevs), + ResultJson = ?JSON_DECODE(Body4), + ?assert(Status =:= 201 orelse Status =:= 202), + ?assertEqual( + {[ + {<<"purge_seq">>, null}, + {<<"purged">>, {[ + {<<"doc1">>, [Rev1]}, + {<<"doc2">>, []}, + {<<"doc3">>, [Rev3]} + ]}} + ]}, + ResultJson + ), + {ok, Status2, _, Body5} = test_request:get(Url + ++ "/doc1/", [?AUTH]), + {Json5} = ?JSON_DECODE(Body5), + Content = couch_util:get_value(<<"content">>, Json5, undefined), + ?assertEqual(<<"updated">>, Content), + ?assert(Status2 =:= 200) + end). + + +test_overmany_ids_or_revs_purge_request(Url) -> + ?_test(begin + {ok, _, _, Body} = create_doc(Url, "doc1"), + {Json} = ?JSON_DECODE(Body), + Rev1 = couch_util:get_value(<<"rev">>, Json, undefined), + + NewDoc = "{\"new_edits\": false, \"docs\": [{\"_id\": \"doc1\", + \"_revisions\": {\"start\": 1, \"ids\": [\"12345\", \"67890\"]}, + \"content\": \"updated\", \"_rev\": \"" ++ ?b2l(Rev1) ++ "\"}]}", + {ok, _, _, _} = test_request:post(Url ++ "/_bulk_docs/", + [?CONTENT_JSON, ?AUTH], NewDoc), + + {ok, _, _, _Body2} = create_doc(Url, "doc2", "content2"), + {ok, _, _, Body3} = create_doc(Url, "doc3", "content3"), + {Json3} = ?JSON_DECODE(Body3), + Rev3 = couch_util:get_value(<<"rev">>, Json3, undefined), + + IdsRevsEJson = {[ + {<<"doc1">>, [Rev1]}, % partial purge + {<<"doc2">>, [Rev3, Rev1]}, % correct format, but invalid rev + {<<"doc3">>, [Rev3]} % correct format and rev + ]}, + IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)), + + % Ids larger than expected + config:set("purge", "max_document_id_number", "1"), + {ok, Status, _, Body4} = test_request:post(Url ++ "/_purge/", + [?CONTENT_JSON, ?AUTH], IdsRevs), + config:delete("purge", "max_document_id_number"), + ResultJson = ?JSON_DECODE(Body4), + ?assertEqual(400, Status), + ?assertMatch({[ + {<<"error">>,<<"bad_request">>}, + {<<"reason">>,<<"Exceeded maximum number of documents.">>}]}, + ResultJson), + + % Revs larger than expected + config:set("purge", "max_revisions_number", "1"), + {ok, Status2, _, Body5} = test_request:post(Url ++ "/_purge/", + [?CONTENT_JSON, ?AUTH], IdsRevs), + config:delete("purge", "max_revisions_number"), + ResultJson2 = ?JSON_DECODE(Body5), + ?assertEqual(400, Status2), + ?assertMatch({[ + {<<"error">>,<<"bad_request">>}, + {<<"reason">>,<<"Exceeded maximum number of revisions.">>}]}, + ResultJson2) + end). + + +test_exceed_limits_on_purge_infos(Url) -> + ?_test(begin + {ok, Status1, _, _} = test_request:put(Url ++ "/_purged_infos_limit/", + [?CONTENT_JSON, ?AUTH], "2"), + ?assert(Status1 =:= 200), + + {ok, _, _, Body} = create_doc(Url, "doc1"), + {Json} = ?JSON_DECODE(Body), + Rev1 = couch_util:get_value(<<"rev">>, Json, undefined), + {ok, _, _, Body2} = create_doc(Url, "doc2"), + {Json2} = ?JSON_DECODE(Body2), + Rev2 = couch_util:get_value(<<"rev">>, Json2, undefined), + {ok, _, _, Body3} = create_doc(Url, "doc3"), + {Json3} = ?JSON_DECODE(Body3), + Rev3 = couch_util:get_value(<<"rev">>, Json3, undefined), + + IdsRevsEJson = {[ + {<<"doc1">>, [Rev1]}, + {<<"doc2">>, [Rev2]}, + {<<"doc3">>, [Rev3]} + ]}, + IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)), + + {ok, Status2, _, ResultBody} = test_request:post(Url ++ "/_purge/", + [?CONTENT_JSON, ?AUTH], IdsRevs), + + ResultJson = ?JSON_DECODE(ResultBody), + ?assert(Status2 =:= 201 orelse Status2 =:= 202), + ?assertEqual( + {[ + {<<"purge_seq">>, null}, + {<<"purged">>, {[ + {<<"doc1">>, [Rev1]}, + {<<"doc2">>, [Rev2]}, + {<<"doc3">>, [Rev3]} + ]}} + ]}, + ResultJson + ) + + end). + + +should_error_set_purged_docs_limit_to0(Url) -> + ?_test(begin + {ok, Status, _, _} = test_request:put(Url ++ "/_purged_infos_limit/", + [?CONTENT_JSON, ?AUTH], "0"), + ?assert(Status =:= 400) + end). + + +test_timeout_set_purged_infos_limit(Url) -> + ?_test(begin + meck:new(fabric, [passthrough]), + meck:expect(fabric, set_purge_infos_limit, fun(_, _, _) -> + {error, timeout} end), + {ok, Status, _, ResultBody} = test_request:put(Url + ++ "/_purged_infos_limit/", [?CONTENT_JSON, ?AUTH], "2"), + meck:unload(fabric), + ResultJson = ?JSON_DECODE(ResultBody), + ?assert(Status =:= 500), + ?assertMatch({[ + {<<"error">>,<<"error">>}, + {<<"reason">>,<<"timeout">>}]}, + ResultJson) + end). diff --git a/src/chttpd/test/eunit/chttpd_security_tests.erl b/src/chttpd/test/eunit/chttpd_security_tests.erl new file mode 100644 index 000000000..955b4ff01 --- /dev/null +++ b/src/chttpd/test/eunit/chttpd_security_tests.erl @@ -0,0 +1,384 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(chttpd_security_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(USER, "chttpd_db_test_admin"). +-define(PASS, "pass"). +-define(AUTH, {basic_auth, {?USER, ?PASS}}). + +-define(TEST_MEMBER, "test_member"). +-define(TEST_MEMBER_PASS, "test_member_pass"). +-define(TEST_MEMBER_AUTH, {basic_auth, {?TEST_MEMBER, ?TEST_MEMBER_PASS}}). + +-define(TEST_ADMIN, "test_admin"). +-define(TEST_ADMIN_PASS, "test_admin_pass"). +-define(TEST_ADMIN_AUTH, {basic_auth, {?TEST_ADMIN, ?TEST_ADMIN_PASS}}). + + + +-define(CONTENT_JSON, {"Content-Type", "application/json"}). +-define(FIXTURE_TXT, ?ABS_PATH(?FILE)). + +setup() -> + Hashed = couch_passwords:hash_admin_password(?PASS), + Persist = false, + ok = config:set("admins", ?USER, ?b2l(Hashed), Persist), + UserDb = ?tempdb(), + TmpDb = ?tempdb(), + ok = config:set("chttpd_auth", "authentication_db", ?b2l(UserDb), Persist), + + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(chttpd, port), + BaseUrl = lists:concat(["http://", Addr, ":", Port, "/"]), + Url = lists:concat([BaseUrl, ?b2l(TmpDb)]), + UsersUrl = lists:concat([BaseUrl, ?b2l(UserDb)]), + create_db(UsersUrl), + create_db(Url), + create_design_doc(Url), + create_user(UsersUrl,?TEST_MEMBER,?TEST_MEMBER_PASS,[<>]), + create_user(UsersUrl,?TEST_ADMIN,?TEST_ADMIN_PASS,[<>]), + set_security(Url), + [Url, UsersUrl]. + +teardown([Url,UsersUrl]) -> + delete_db(Url), + delete_db(UsersUrl), + ok = config:delete("admins", ?USER, _Persist=false). + +create_db(Url) -> + {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), + ?assert(Status =:= 201 orelse Status =:= 202). + +create_design_doc(Url) -> + {ok, Status, _, _} = test_request:put(lists:concat([Url, '/_design/test']), [?CONTENT_JSON, ?AUTH], + "{\"id\":\"_design/test\"}"), + ?assert(Status =:= 201 orelse Status =:= 202). + +set_security(Url) -> + SecurityUrl = lists:concat([Url, "/_security"]), + SecurityProperties = [ + {<<"admins">>,{[{<<"roles">>,[<>]}]}}, + {<<"members">>,{[{<<"roles">>,[<>]}]}} + ], + + Body = jiffy:encode({SecurityProperties}), + {ok, Status, _, _} = test_request:put(SecurityUrl, [?CONTENT_JSON, ?AUTH], Body), + ?assert(Status =:= 200). + +delete_db(Url) -> + {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). + +create_user(UsersUrl, Name, Password, Roles) -> + + Body = "{\"name\":\"" ++ Name ++ + "\",\"type\":\"user\",\"roles\":" ++ erlang:binary_to_list(jiffy:encode(Roles)) ++ ",\"password\":\"" ++ Password ++"\"}", + + Url = lists:concat([ + UsersUrl, "/org.couchdb.user:", Name]), + {ok, 201, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], Body). + + +all_test_() -> + { + "chttpd security tests", + { + setup, + fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_allow_admin_db_compaction/1, + fun should_allow_valid_password_to_create_user/1, + fun should_disallow_invalid_password_to_create_user/1, + fun should_disallow_anonymous_db_compaction/1, + fun should_disallow_db_member_db_compaction/1, + fun should_allow_db_admin_db_compaction/1, + fun should_allow_admin_view_compaction/1, + fun should_disallow_anonymous_view_compaction/1, + fun should_allow_admin_db_view_cleanup/1, + fun should_disallow_anonymous_db_view_cleanup/1, + fun should_allow_admin_purge/1, + fun should_disallow_anonymous_purge/1, + fun should_disallow_db_member_purge/1, + fun should_allow_admin_purged_infos_limit/1, + fun should_disallow_anonymous_purged_infos_limit/1, + fun should_disallow_db_member_purged_infos_limit/1 + ] + } + } + }. + +security_object_validate_test_() -> + { + "chttpd security object validate tests", + { + setup, + fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_return_ok_for_sec_obj_with_roles/1, + fun should_return_ok_for_sec_obj_with_names/1, + fun should_return_ok_for_sec_obj_with_roles_and_names/1, + fun should_return_error_for_sec_obj_with_incorrect_roles_and_names/1, + fun should_return_error_for_sec_obj_with_incorrect_roles/1, + fun should_return_error_for_sec_obj_with_incorrect_names/1 + ] + } + } + }. + +should_allow_admin_db_compaction([Url,_UsersUrl]) -> + ?_assertEqual(true, + begin + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact", + [?CONTENT_JSON, ?AUTH], ""), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + couch_util:get_value(<<"ok">>, InnerJson, undefined) + end). + +should_allow_valid_password_to_create_user([_Url, UsersUrl]) -> + UserDoc = "{\"_id\": \"org.couchdb.user:foo\", \"name\": \"foo\", + \"type\": \"user\", \"roles\": [], \"password\": \"bar\"}", + {ok, _, _, ResultBody} = test_request:post(UsersUrl, + [?CONTENT_JSON, ?AUTH], UserDoc), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + ?_assertEqual(true, couch_util:get_value(<<"ok">>, InnerJson)). + +should_disallow_invalid_password_to_create_user([_Url, UsersUrl]) -> + UserDoc = "{\"_id\": \"org.couchdb.user:foo\", \"name\": \"foo\", + \"type\": \"user\", \"roles\": [], \"password\": 123}", + {ok, _, _, ResultBody} = test_request:post(UsersUrl, + [?CONTENT_JSON, ?AUTH], UserDoc), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + ErrType = couch_util:get_value(<<"error">>, InnerJson), + ?_assertEqual(<<"forbidden">>, ErrType). + +should_disallow_anonymous_db_compaction([Url,_UsersUrl]) -> + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact", + [?CONTENT_JSON], ""), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + ErrType = couch_util:get_value(<<"error">>, InnerJson), + ?_assertEqual(<<"unauthorized">>,ErrType). + +should_disallow_db_member_db_compaction([Url,_UsersUrl]) -> + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact", + [?CONTENT_JSON, ?TEST_MEMBER_AUTH], ""), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + ErrType = couch_util:get_value(<<"error">>, InnerJson), + ?_assertEqual(<<"unauthorized">>,ErrType). + +should_allow_db_admin_db_compaction([Url,_UsersUrl]) -> + ?_assertEqual(true, + begin + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact", + [?CONTENT_JSON, ?TEST_ADMIN_AUTH], ""), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + couch_util:get_value(<<"ok">>, InnerJson, undefined) + end). + +should_allow_admin_view_compaction([Url,_UsersUrl]) -> + ?_assertEqual(true, + begin + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact/test", + [?CONTENT_JSON, ?AUTH], ""), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + couch_util:get_value(<<"ok">>, InnerJson, undefined) + end). + +should_disallow_anonymous_view_compaction([Url,_UsersUrl]) -> + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact/test", + [?CONTENT_JSON], ""), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + ErrType = couch_util:get_value(<<"error">>, InnerJson), + ?_assertEqual(<<"unauthorized">>,ErrType). + +should_allow_admin_db_view_cleanup([Url,_UsersUrl]) -> + ?_assertEqual(true, + begin + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_view_cleanup", + [?CONTENT_JSON, ?AUTH], ""), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + couch_util:get_value(<<"ok">>, InnerJson, undefined) + end). + +should_disallow_anonymous_db_view_cleanup([Url,_UsersUrl]) -> + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_view_cleanup", + [?CONTENT_JSON], ""), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + ErrType = couch_util:get_value(<<"error">>, InnerJson), + ?_assertEqual(<<"unauthorized">>, ErrType). + +should_allow_admin_purge([Url,_UsersUrl]) -> + ?_assertEqual(null, + begin + IdsRevs = "{}", + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_purge", + [?CONTENT_JSON, ?AUTH], IdsRevs), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + couch_util:get_value(<<"purge_seq">>, InnerJson, undefined) + end). + +should_disallow_anonymous_purge([Url,_UsersUrl]) -> + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_purge", + [?CONTENT_JSON], ""), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + ErrType = couch_util:get_value(<<"error">>, InnerJson), + ?_assertEqual(<<"unauthorized">>, ErrType). + +should_disallow_db_member_purge([Url,_UsersUrl]) -> + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_purge", + [?CONTENT_JSON, ?TEST_MEMBER_AUTH], ""), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + ErrType = couch_util:get_value(<<"error">>, InnerJson), + ?_assertEqual(<<"unauthorized">>,ErrType). + +should_allow_admin_purged_infos_limit([Url,_UsersUrl]) -> + ?_assertEqual(true, + begin + {ok, _, _, ResultBody} = test_request:put(Url + ++ "/_purged_infos_limit/", [?CONTENT_JSON, ?AUTH], "2"), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + couch_util:get_value(<<"ok">>, InnerJson, undefined) + end). + +should_disallow_anonymous_purged_infos_limit([Url,_UsersUrl]) -> + {ok, _, _, ResultBody} = test_request:put(Url ++ "/_purged_infos_limit/", + [?CONTENT_JSON, ?TEST_MEMBER_AUTH], "2"), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + ErrType = couch_util:get_value(<<"error">>, InnerJson), + ?_assertEqual(<<"unauthorized">>, ErrType). + +should_disallow_db_member_purged_infos_limit([Url,_UsersUrl]) -> + {ok, _, _, ResultBody} = test_request:put(Url ++ "/_purged_infos_limit/", + [?CONTENT_JSON, ?TEST_MEMBER_AUTH], "2"), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + ErrType = couch_util:get_value(<<"error">>, InnerJson), + ?_assertEqual(<<"unauthorized">>,ErrType). + +should_return_ok_for_sec_obj_with_roles([Url,_UsersUrl]) -> + SecurityUrl = lists:concat([Url, "/_security"]), + SecurityProperties = [ + {<<"admins">>,{[{<<"roles">>,[<>]}]}}, + {<<"members">>,{[{<<"roles">>,[<>]}]}} + ], + + Body = jiffy:encode({SecurityProperties}), + {ok, Status, _, _} = test_request:put(SecurityUrl, + [?CONTENT_JSON, ?AUTH], Body), + ?_assertEqual(200, Status). + +should_return_ok_for_sec_obj_with_names([Url,_UsersUrl]) -> + SecurityUrl = lists:concat([Url, "/_security"]), + SecurityProperties = [ + {<<"admins">>,{[{<<"names">>,[<>]}]}}, + {<<"members">>,{[{<<"names">>,[<>]}]}} + ], + + Body = jiffy:encode({SecurityProperties}), + {ok, Status, _, _} = test_request:put(SecurityUrl, + [?CONTENT_JSON, ?AUTH], Body), + ?_assertEqual(200, Status). + +should_return_ok_for_sec_obj_with_roles_and_names([Url,_UsersUrl]) -> + SecurityUrl = lists:concat([Url, "/_security"]), + SecurityProperties = [ + {<<"admins">>, {[{<<"names">>,[<>]}, + {<<"roles">>,[<>]}]}}, + {<<"members">>,{[{<<"names">>,[<>]}, + {<<"roles">>,[<>]}]}} + ], + + Body = jiffy:encode({SecurityProperties}), + {ok, Status, _, _} = test_request:put(SecurityUrl, + [?CONTENT_JSON, ?AUTH], Body), + ?_assertEqual(200, Status). + +should_return_error_for_sec_obj_with_incorrect_roles_and_names( + [Url,_UsersUrl]) -> + SecurityUrl = lists:concat([Url, "/_security"]), + SecurityProperties = [ + {<<"admins">>,{[{<<"names">>,[123]}]}}, + {<<"members">>,{[{<<"roles">>,["foo"]}]}} + ], + + Body = jiffy:encode({SecurityProperties}), + {ok, Status, _, RespBody} = test_request:put(SecurityUrl, + [?CONTENT_JSON, ?AUTH], Body), + ResultJson = ?JSON_DECODE(RespBody), + [ + ?_assertEqual(500, Status), + ?_assertEqual({[ + {<<"error">>,<<"error">>}, + {<<"reason">>,<<"no_majority">>} + ]}, ResultJson) + ]. + +should_return_error_for_sec_obj_with_incorrect_roles([Url,_UsersUrl]) -> + SecurityUrl = lists:concat([Url, "/_security"]), + SecurityProperties = [ + {<<"admins">>,{[{<<"roles">>,[?TEST_ADMIN]}]}}, + {<<"members">>,{[{<<"roles">>,[<>]}]}} + ], + + Body = jiffy:encode({SecurityProperties}), + {ok, Status, _, RespBody} = test_request:put(SecurityUrl, + [?CONTENT_JSON, ?AUTH], Body), + ResultJson = ?JSON_DECODE(RespBody), + [ + ?_assertEqual(500, Status), + ?_assertEqual({[ + {<<"error">>,<<"error">>}, + {<<"reason">>,<<"no_majority">>} + ]}, ResultJson) + ]. + +should_return_error_for_sec_obj_with_incorrect_names([Url,_UsersUrl]) -> + SecurityUrl = lists:concat([Url, "/_security"]), + SecurityProperties = [ + {<<"admins">>,{[{<<"names">>,[<>]}]}}, + {<<"members">>,{[{<<"names">>,[?TEST_MEMBER]}]}} + ], + + Body = jiffy:encode({SecurityProperties}), + {ok, Status, _, RespBody} = test_request:put(SecurityUrl, + [?CONTENT_JSON, ?AUTH], Body), + ResultJson = ?JSON_DECODE(RespBody), + [ + ?_assertEqual(500, Status), + ?_assertEqual({[ + {<<"error">>,<<"error">>}, + {<<"reason">>,<<"no_majority">>} + ]}, ResultJson) + ]. diff --git a/src/chttpd/test/eunit/chttpd_socket_buffer_size_test.erl b/src/chttpd/test/eunit/chttpd_socket_buffer_size_test.erl new file mode 100644 index 000000000..937880621 --- /dev/null +++ b/src/chttpd/test/eunit/chttpd_socket_buffer_size_test.erl @@ -0,0 +1,127 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(chttpd_socket_buffer_size_test). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(USER, "chttpd_db_socket_buffer_size_test_admin"). +-define(PASS, "pass"). +-define(AUTH, {basic_auth, {?USER, ?PASS}}). +-define(CONTENT_JSON, {"Content-Type", "application/json"}). + + +setup(SocketOpts) -> + StartCtx = start_couch_with_cfg(SocketOpts), + Db = ?tempdb(), + create_db(url(Db)), + {StartCtx, Db}. + + +teardown(_, {StartCtx, Db}) -> + delete_db(url(Db)), + ok = config:delete("admins", ?USER, _Persist=false), + test_util:stop_couch(StartCtx). + + +socket_buffer_size_test_() -> + { + "chttpd socket_buffer_size_test", + { + foreachx, + fun setup/1, fun teardown/2, + [ + {"[{recbuf, undefined}]", fun default_buffer/2}, + {"[{recbuf, 1024}]", fun small_recbuf/2}, + {"[{buffer, 1024}]", fun small_buffer/2} + ] + } + }. + + +small_recbuf(_, {_, Db}) -> + {timeout, 30, ?_test(begin + Id = data(2048), + Response = put_req(url(Db) ++ "/" ++ Id, "{}"), + ?assert(Response =:= 400 orelse Response =:= request_failed) + end)}. + + +small_buffer(_, {_, Db}) -> + {timeout, 30, ?_test(begin + Id = data(2048), + Response = put_req(url(Db) ++ "/" ++ Id, "{}"), + ?assert(Response =:= 400 orelse Response =:= request_failed) + end)}. + + +default_buffer(_, {_, Db}) -> + {timeout, 30, ?_test(begin + Id = data(7000), + Headers = [{"Blah", data(7000)}], + Status = put_req(url(Db) ++ "/" ++ Id, Headers, "{}"), + ?assert(Status =:= 201 orelse Status =:= 202) + end)}. + + +% Helper functions + +url() -> + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)), + "http://" ++ Addr ++ ":" ++ Port. + + +url(Db) -> + url() ++ "/" ++ ?b2l(Db). + + +create_db(Url) -> + Status = put_req(Url ++ "?q=1&n=1", "{}"), + ?assert(Status =:= 201 orelse Status =:= 202). + + +delete_db(Url) -> + {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). + + +put_req(Url, Body) -> + put_req(Url, [], Body). + + +put_req(Url, Headers, Body) -> + AllHeaders = Headers ++ [?CONTENT_JSON, ?AUTH], + case test_request:put(Url, AllHeaders, Body) of + {ok, Status, _, _} -> Status; + {error, Error} -> Error + end. + + +data(Size) -> + string:copies("x", Size). + + +append_to_cfg_chain(Cfg) -> + CfgDir = filename:dirname(lists:last(?CONFIG_CHAIN)), + CfgFile = filename:join([CfgDir, "chttpd_socket_buffer_extra_cfg.ini"]), + CfgSect = io_lib:format("[chttpd]~nserver_options = ~s~n", [Cfg]), + ok = file:write_file(CfgFile, CfgSect), + ?CONFIG_CHAIN ++ [CfgFile]. + + +start_couch_with_cfg(Cfg) -> + CfgChain = append_to_cfg_chain(Cfg), + StartCtx = test_util:start_couch(CfgChain, [chttpd]), + Hashed = couch_passwords:hash_admin_password(?PASS), + ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), + StartCtx. diff --git a/src/chttpd/test/eunit/chttpd_view_test.erl b/src/chttpd/test/eunit/chttpd_view_test.erl new file mode 100644 index 000000000..3457c6f30 --- /dev/null +++ b/src/chttpd/test/eunit/chttpd_view_test.erl @@ -0,0 +1,123 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(chttpd_view_test). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(USER, "chttpd_view_test_admin"). +-define(PASS, "pass"). +-define(AUTH, {basic_auth, {?USER, ?PASS}}). +-define(CONTENT_JSON, {"Content-Type", "application/json"}). +-define(DDOC, "{\"_id\": \"_design/bar\", \"views\": {\"baz\": + {\"map\": \"function(doc) {emit(doc._id, doc._id);}\"}}}"). + +-define(FIXTURE_TXT, ?ABS_PATH(?FILE)). +-define(i2l(I), integer_to_list(I)). + +setup() -> + Hashed = couch_passwords:hash_admin_password(?PASS), + ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), + TmpDb = ?tempdb(), + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(chttpd, port), + Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]), + create_db(Url), + Url. + +teardown(Url) -> + delete_db(Url), + ok = config:delete("admins", ?USER, _Persist=false). + +create_db(Url) -> + {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), + ?assert(Status =:= 201 orelse Status =:= 202). + + +create_doc(Url, Id) -> + test_request:put(Url ++ "/" ++ Id, + [?CONTENT_JSON, ?AUTH], "{\"mr\": \"rockoartischocko\"}"). + +delete_db(Url) -> + {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). + +all_view_test_() -> + { + "chttpd view tests", + { + setup, + fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_succeed_on_view_with_queries_keys/1, + fun should_succeed_on_view_with_queries_limit_skip/1, + fun should_succeed_on_view_with_multiple_queries/1 + ] + } + } + }. + + +should_succeed_on_view_with_queries_keys(Url) -> + ?_test(begin + [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], + {ok, _, _, _} = test_request:put(Url ++ "/_design/bar", + [?CONTENT_JSON, ?AUTH], ?DDOC), + QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", + \"testdoc8\"]}]}", + {ok, _, _, RespBody} = test_request:post(Url ++ "/_design/bar/" + ++ "_view/baz/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc), + {ResultJson} = ?JSON_DECODE(RespBody), + ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), + {InnerJson} = lists:nth(1, ResultJsonBody), + ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson))) + end). + + +should_succeed_on_view_with_queries_limit_skip(Url) -> + ?_test(begin + [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], + {ok, _, _, _} = test_request:put(Url ++ "/_design/bar", + [?CONTENT_JSON, ?AUTH], ?DDOC), + QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}", + {ok, RC, _, RespBody} = test_request:post(Url ++ "/_design/bar/" + ++ "_view/baz/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc), + ?assertEqual(200, RC), + {ResultJson} = ?JSON_DECODE(RespBody), + ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), + {InnerJson} = lists:nth(1, ResultJsonBody), + ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)), + ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson))) + end). + + +should_succeed_on_view_with_multiple_queries(Url) -> + ?_test(begin + [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], + {ok, _, _, _} = test_request:put(Url ++ "/_design/bar", + [?CONTENT_JSON, ?AUTH], ?DDOC), + QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", + \"testdoc8\"]}, {\"limit\": 5, \"skip\": 2}]}", + {ok, RC, _, RespBody} = test_request:post(Url ++ "/_design/bar/" + ++ "_view/baz/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc), + ?assertEqual(200, RC), + {ResultJson} = ?JSON_DECODE(RespBody), + ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), + {InnerJson1} = lists:nth(1, ResultJsonBody), + ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))), + {InnerJson2} = lists:nth(2, ResultJsonBody), + ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)), + ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2))) + end). diff --git a/src/chttpd/test/eunit/chttpd_welcome_test.erl b/src/chttpd/test/eunit/chttpd_welcome_test.erl new file mode 100644 index 000000000..e427f4dff --- /dev/null +++ b/src/chttpd/test/eunit/chttpd_welcome_test.erl @@ -0,0 +1,104 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(chttpd_welcome_test). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(USER, "chttpd_db_test_admin"). +-define(PASS, "pass"). +-define(AUTH, {basic_auth, {?USER, ?PASS}}). +-define(CONTENT_JSON, {"Content-Type", "application/json"}). + + +setup() -> + Hashed = couch_passwords:hash_admin_password(?PASS), + ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(chttpd, port), + Url = lists:concat(["http://", Addr, ":", Port, "/"]), + Url. + + +teardown(_Url) -> + ok = config:delete("admins", ?USER, _Persist=false). + + +welcome_test_() -> + { + "chttpd welcome endpoint tests", + { + setup, + fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_have_version/1, + fun should_have_features/1, + fun should_have_uuid/1 + ] + } + } + }. + +should_have_uuid(Url) -> + ?_test(begin + {ok, Status, _, Body} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]), + ?assertEqual(200, Status), + {Json} = ?JSON_DECODE(Body), + CouchDB = couch_util:get_value(<<"couchdb">>, Json, undefined), + Uuid = couch_util:get_value(<<"uuid">>, Json, undefined), + Features = couch_util:get_value(<<"features">>, Json, undefined), + Sha = couch_util:get_value(<<"git_sha">>, Json, undefined), + ?assertNotEqual(Sha, undefined), + ?assertEqual(<<"Welcome">>, CouchDB), + RealUuid = couch_server:get_uuid(), + + ?assertEqual(RealUuid, Uuid), + ?assert(is_list(Features)) + end). + + +should_have_version(Url) -> + ?_test(begin + {ok, Status, _, Body} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]), + ?assertEqual(200, Status), + {Json} = ?JSON_DECODE(Body), + Version = couch_util:get_value(<<"version">>, Json, undefined), + CouchDB = couch_util:get_value(<<"couchdb">>, Json, undefined), + Features = couch_util:get_value(<<"features">>, Json, undefined), + Sha = couch_util:get_value(<<"git_sha">>, Json, undefined), + ?assertNotEqual(Sha, undefined), + ?assertEqual(<<"Welcome">>, CouchDB), + RealVersion = list_to_binary(couch_server:get_version()), + ?assertEqual(RealVersion, Version), + ?assert(is_list(Features)) + end). + + +should_have_features(Url) -> + ?_test(begin + config:enable_feature(snek), + {ok, 200, _, Body1} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]), + {Json1} = ?JSON_DECODE(Body1), + Features1 = couch_util:get_value(<<"features">>, Json1, undefined), + ?assert(is_list(Features1)), + ?assert(lists:member(<<"snek">>, Features1)), + config:disable_feature(snek), + {ok, 200, _, Body2} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]), + {Json2} = ?JSON_DECODE(Body2), + Features2 = couch_util:get_value(<<"features">>, Json2, undefined), + ?assert(is_list(Features2)), + ?assertNot(lists:member(<<"snek">>, Features2)) + end). diff --git a/src/chttpd/test/eunit/chttpd_xframe_test.erl b/src/chttpd/test/eunit/chttpd_xframe_test.erl new file mode 100644 index 000000000..1272c198c --- /dev/null +++ b/src/chttpd/test/eunit/chttpd_xframe_test.erl @@ -0,0 +1,84 @@ +-module(chttpd_xframe_test). + + +-include_lib("couch/include/couch_db.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +setup() -> + ok = meck:new(config), + ok = meck:expect(config, get, fun(_, _, _) -> "X-Forwarded-Host" end), + ok. + +teardown(_) -> + meck:unload(config). + +mock_request() -> + Headers = mochiweb_headers:make([{"Host", "examples.com"}]), + MochiReq = mochiweb_request:new(nil, 'GET', '/', {1, 1}, Headers), + #httpd{mochi_req = MochiReq}. + +config_disabled() -> + [ + {enabled, false} + ]. + +config_sameorigin() -> + [ + {enabled, true}, + {same_origin, true} + ]. + +config_wildcard() -> + [ + {enabled, true}, + {same_origin, false}, + {hosts, ["*"]} + ]. + +config_specific_hosts() -> + [ + {enabled, true}, + {same_origin, false}, + {hosts, ["http://couchdb.org", "http://examples.com"]} + ]. + +config_diffent_specific_hosts() -> + [ + {enabled, true}, + {same_origin, false}, + {hosts, ["http://couchdb.org"]} + ]. + +no_header_if_xframe_disabled_test() -> + Headers = chttpd_xframe_options:header(mock_request(), [], config_disabled()), + ?assertEqual(Headers, []). + +enabled_with_same_origin_test() -> + Headers = chttpd_xframe_options:header(mock_request(), [], config_sameorigin()), + ?assertEqual(Headers, [{"X-Frame-Options", "SAMEORIGIN"}]). + + +xframe_host_test_() -> + { + "xframe host tests", + { + foreach, fun setup/0, fun teardown/1, + [ + fun allow_with_wildcard_host/1, + fun allow_with_specific_host/1, + fun deny_with_different_host/1 + ] + } + }. + +allow_with_wildcard_host(_) -> + Headers = chttpd_xframe_options:header(mock_request(), [], config_wildcard()), + ?_assertEqual([{"X-Frame-Options", "ALLOW-FROM http://examples.com"}], Headers). + +allow_with_specific_host(_) -> + Headers = chttpd_xframe_options:header(mock_request(), [], config_specific_hosts()), + ?_assertEqual([{"X-Frame-Options", "ALLOW-FROM http://examples.com"}], Headers). + +deny_with_different_host(_) -> + Headers = chttpd_xframe_options:header(mock_request(), [], config_diffent_specific_hosts()), + ?_assertEqual([{"X-Frame-Options", "DENY"}], Headers). diff --git a/src/couch/include/couch_eunit.hrl b/src/couch/include/couch_eunit.hrl index f4617e1d3..d3611c88b 100644 --- a/src/couch/include/couch_eunit.hrl +++ b/src/couch/include/couch_eunit.hrl @@ -28,7 +28,7 @@ filename:join([?BUILDDIR(), "tmp", "etc", "local_eunit.ini"]), filename:join([?BUILDDIR(), "tmp", "etc", "eunit.ini"])]). -define(FIXTURESDIR, - filename:join([?BUILDDIR(), "src", "couch", "test", "fixtures"])). + filename:join([?BUILDDIR(), "src", "couch", "test", "eunit", "fixtures"])). -define(TEMPDIR, filename:join([?BUILDDIR(), "tmp", "tmp_data"])). diff --git a/src/couch/test/chttpd_endpoints_tests.erl b/src/couch/test/chttpd_endpoints_tests.erl deleted file mode 100644 index 9b7430823..000000000 --- a/src/couch/test/chttpd_endpoints_tests.erl +++ /dev/null @@ -1,184 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(chttpd_endpoints_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - -setup("mocked") -> - fun setup_mocked/1; -setup("not_mocked") -> - fun setup_not_mocked/1. - -setup_mocked({Endpoint, {_Path, Module, Function}}) -> - catch meck:unload(Module), - meck:new(Module, [passthrough, non_strict]), - Expected = mock_handler(Endpoint, Module, Function), - Expected. - -setup_not_mocked({_Endpoint, {_Path, Module, _Function}}) -> - catch meck:unload(Module), - meck:new(Module, [non_strict]), - ok. - -teardown({_Endpoint, {Module, _F, _A}}, _) -> - catch meck:unload(Module), - ok. - -handlers(url_handler) -> - [ - {<<"">>, chttpd_misc, handle_welcome_req}, - {<<"favicon.ico">>, chttpd_misc, handle_favicon_req}, - {<<"_utils">>, chttpd_misc, handle_utils_dir_req}, - {<<"_all_dbs">>, chttpd_misc, handle_all_dbs_req}, - {<<"_dbs_info">>, chttpd_misc, handle_dbs_info_req}, - {<<"_active_tasks">>, chttpd_misc, handle_task_status_req}, - {<<"_node">>, chttpd_misc, handle_node_req}, - {<<"_reload_query_servers">>, chttpd_misc, handle_reload_query_servers_req}, - {<<"_replicate">>, chttpd_misc, handle_replicate_req}, - {<<"_uuids">>, chttpd_misc, handle_uuids_req}, - {<<"_session">>, chttpd_auth, handle_session_req}, - {<<"_up">>, chttpd_misc, handle_up_req}, - {<<"_membership">>, mem3_httpd, handle_membership_req}, - {<<"_db_updates">>, global_changes_httpd, handle_global_changes_req}, - {<<"_cluster_setup">>, setup_httpd, handle_setup_req}, - {<<"anything">>, chttpd_db, handle_request} - ]; -handlers(db_handler) -> - [ - {<<"_view_cleanup">>, chttpd_db, handle_view_cleanup_req}, - {<<"_compact">>, chttpd_db, handle_compact_req}, - {<<"_design">>, chttpd_db, handle_design_req}, - {<<"_temp_view">>, chttpd_view, handle_temp_view_req}, - {<<"_changes">>, chttpd_db, handle_changes_req}, - {<<"_shards">>, mem3_httpd, handle_shards_req}, - {<<"_index">>, mango_httpd, handle_req}, - {<<"_explain">>, mango_httpd, handle_req}, - {<<"_find">>, mango_httpd, handle_req} - ]; -handlers(design_handler) -> - [ - {<<"_view">>, chttpd_view, handle_view_req}, - {<<"_show">>, chttpd_show, handle_doc_show_req}, - {<<"_list">>, chttpd_show, handle_view_list_req}, - {<<"_update">>, chttpd_show, handle_doc_update_req}, - {<<"_info">>, chttpd_db, handle_design_info_req}, - {<<"_rewrite">>, chttpd_rewrite, handle_rewrite_req} - ]. - -endpoints_test_() -> - { - "Checking dynamic endpoints", - { - setup, - fun() -> test_util:start_couch([chttpd, mem3, global_changes, mango, setup]) end, - fun test_util:stop/1, - [ - check_dynamic_endpoints( - "mocked", url_handler, fun ensure_called/2), - check_dynamic_endpoints( - "mocked", db_handler, fun ensure_called/2), - check_dynamic_endpoints( - "mocked", design_handler, fun ensure_called/2), - check_dynamic_endpoints( - "not_mocked", url_handler, fun verify_we_fail_if_missing/2), - check_dynamic_endpoints( - "not_mocked", db_handler, fun verify_we_fail_if_missing/2), - check_dynamic_endpoints( - "not_mocked", design_handler, fun verify_we_fail_if_missing/2) - ] - } - }. - -check_dynamic_endpoints(Setup, EndpointType, TestFun) -> - { - "Checking '" - ++ atom_to_list(EndpointType) - ++ "' [" ++ Setup ++ "] dynamic endpoints", - [ - make_test_case(Setup, EndpointType, Spec, TestFun) - || Spec <- handlers(EndpointType) - ] - }. - -make_test_case(Setup, EndpointType, {Path, Module, Function}, TestFun) -> - { - lists:flatten(io_lib:format("~s -- \"~s\"", [EndpointType, ?b2l(Path)])), - { - foreachx, setup(Setup), fun teardown/2, - [ - {{EndpointType, {Path, Module, Function}}, TestFun} - ] - } - }. - - -mock_handler(url_handler = Endpoint, M, F) -> - meck:expect(M, F, fun(X) -> {return, Endpoint, X} end), - fun M:F/1; -mock_handler(db_handler = Endpoint, M, F) -> - meck:expect(M, F, fun(X, Y) -> {return, Endpoint, X, Y} end), - fun M:F/2; -mock_handler(design_handler = Endpoint, M, F) -> - meck:expect(M, F, fun(X, Y, Z) -> {return, Endpoint, X, Y, Z} end), - fun M:F/3. - -ensure_called({url_handler = Endpoint, {Path, _M, _Fun}}, ExpectedFun) -> - HandlerFun = handler(Endpoint, Path), - ?_test(begin - ?assertEqual(ExpectedFun, HandlerFun), - ?assertMatch({return, Endpoint, x}, HandlerFun(x)) - end); -ensure_called({db_handler = Endpoint, {Path, _M, _Fun}}, ExpectedFun) -> - HandlerFun = handler(Endpoint, Path), - ?_test(begin - ?assertEqual(ExpectedFun, HandlerFun), - ?assertMatch({return, Endpoint, x, y}, HandlerFun(x, y)) - end); -ensure_called({design_handler = Endpoint, {Path, _M, _Fun}}, ExpectedFun) -> - HandlerFun = handler(Endpoint, Path), - ?_test(begin - ?assertEqual(ExpectedFun, HandlerFun), - ?assertMatch({return, Endpoint, x, y, z}, HandlerFun(x, y, z)) - end). - -%% Test the test: when the final target function is missing, -%% the Fun call must fail. -verify_we_fail_if_missing({url_handler = Endpoint, {Path, _M, _Fun}}, _) -> - HandlerFun = handler(Endpoint, Path), - ?_test(begin - ?assert(is_function(HandlerFun)), - ?assertError(undef, HandlerFun(x)) - end); -verify_we_fail_if_missing({db_handler = Endpoint, {Path, _M, _Fun}}, _) -> - HandlerFun = handler(Endpoint, Path), - ?_test(begin - ?assert(is_function(HandlerFun)), - ?assertError(undef, HandlerFun(x, y)) - end); -verify_we_fail_if_missing({design_handler = Endpoint, {Path, _M, _Fun}}, _) -> - HandlerFun = handler(Endpoint, Path), - ?_test(begin - ?assert(is_function(HandlerFun)), - ?assertError(undef, HandlerFun(x, y, z)) - end). - -handler(url_handler, HandlerKey) -> - chttpd_handlers:url_handler(HandlerKey, fun chttpd_db:handle_request/1); -handler(db_handler, HandlerKey) -> - chttpd_handlers:db_handler(HandlerKey, fun chttpd_db:db_req/2); -handler(design_handler, HandlerKey) -> - chttpd_handlers:design_handler(HandlerKey, fun dummy/3). - -dummy(_, _, _) -> - throw(error). diff --git a/src/couch/test/couch_auth_cache_tests.erl b/src/couch/test/couch_auth_cache_tests.erl deleted file mode 100644 index 706c0cee9..000000000 --- a/src/couch/test/couch_auth_cache_tests.erl +++ /dev/null @@ -1,384 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_auth_cache_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(SALT, <<"SALT">>). --define(DB_TIMEOUT, 15000). - -start() -> - test_util:start_couch([ioq]). - - -setup() -> - DbName = ?tempdb(), - config:set("couch_httpd_auth", "authentication_db", - ?b2l(DbName), false), - DbName. - -teardown(DbName) -> - ok = couch_server:delete(DbName, [?ADMIN_CTX]), - ok. - - -couch_auth_cache_test_() -> - { - "CouchDB auth cache tests", - { - setup, - fun start/0, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_get_nil_on_missed_cache/1, - fun should_get_right_password_hash/1, - fun should_ensure_doc_hash_equals_cached_one/1, - fun should_update_password/1, - fun should_cleanup_cache_after_userdoc_deletion/1, - fun should_restore_cache_after_userdoc_recreation/1, - fun should_drop_cache_on_auth_db_change/1, - fun should_restore_cache_on_auth_db_change/1, - fun should_recover_cache_after_shutdown/1, - fun should_close_old_db_on_auth_db_change/1, - fun should_get_admin_from_config/1 - ] - } - } - }. - -auth_vdu_test_() -> - Cases = [ - %% Old , New , Result - %% [Roles, Type] , [Roles, Type] , - - %% Updating valid user doc with valid one - {[custom, user], [custom, user], "ok"}, - - %% Updating invalid doc (missing type or roles field) with valid one - {[missing, missing], [custom, user], "ok"}, - {[missing, user], [custom, user], "ok"}, - {[custom, missing], [custom, user], "ok"}, - - %% Updating invalid doc (wrong type) with valid one - {[missing, other], [custom, user], "ok"}, - {[custom, other], [custom, user], "ok"}, - - %% Updating valid document with invalid one - {[custom, user], [missing, missing], "doc.type must be user"}, - {[custom, user], [missing, user], "doc.roles must exist"}, - {[custom, user], [custom, missing], "doc.type must be user"}, - {[custom, user], [missing, other], "doc.type must be user"}, - {[custom, user], [custom, other], "doc.type must be user"}, - - %% Updating invalid doc with invalid one - {[missing, missing], [missing, missing], "doc.type must be user"}, - {[missing, missing], [missing, user], "doc.roles must exist"}, - {[missing, missing], [custom, missing], "doc.type must be user"}, - {[missing, missing], [missing, other], "doc.type must be user"}, - {[missing, missing], [custom, other], "doc.type must be user"}, - - {[missing, user], [missing, missing], "doc.type must be user"}, - {[missing, user], [missing, user], "doc.roles must exist"}, - {[missing, user], [custom, missing], "doc.type must be user"}, - {[missing, user], [missing, other], "doc.type must be user"}, - {[missing, user], [custom, other], "doc.type must be user"}, - - {[missing, other], [missing, missing], "doc.type must be user"}, - {[missing, other], [missing, user], "doc.roles must exist"}, - {[missing, other], [custom, missing], "doc.type must be user"}, - {[missing, other], [missing, other], "doc.type must be user"}, - {[missing, other], [custom, other], "doc.type must be user"}, - - {[custom, missing], [missing, missing], "doc.type must be user"}, - {[custom, missing], [missing, user], "doc.roles must exist"}, - {[custom, missing], [custom, missing], "doc.type must be user"}, - {[custom, missing], [missing, other], "doc.type must be user"}, - {[custom, missing], [custom, other], "doc.type must be user"}, - - {[custom, other], [missing, missing], "doc.type must be user"}, - {[custom, other], [missing, user], "doc.roles must exist"}, - {[custom, other], [custom, missing], "doc.type must be user"}, - {[custom, other], [missing, other], "doc.type must be user"}, - {[custom, other], [custom, other], "doc.type must be user"} - ], - - %% Make sure we covered all combinations - AllPossibleDocs = couch_tests_combinatorics:product([ - [missing, custom], - [missing, user, other] - ]), - AllPossibleCases = couch_tests_combinatorics:product( - [AllPossibleDocs, AllPossibleDocs]), - ?assertEqual([], AllPossibleCases -- [[A, B] || {A, B, _} <- Cases]), - - { - "Check User doc validation", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - [ - make_validate_test(Case) || Case <- Cases - ] - } - }. - -should_get_nil_on_missed_cache(_) -> - ?_assertEqual(nil, couch_auth_cache:get_user_creds("joe")). - -should_get_right_password_hash(DbName) -> - ?_test(begin - PasswordHash = hash_password("pass1"), - {ok, _} = update_user_doc(DbName, "joe", "pass1"), - {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"), - ?assertEqual(PasswordHash, - couch_util:get_value(<<"password_sha">>, Creds)) - end). - -should_ensure_doc_hash_equals_cached_one(DbName) -> - ?_test(begin - {ok, _} = update_user_doc(DbName, "joe", "pass1"), - {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"), - - CachedHash = couch_util:get_value(<<"password_sha">>, Creds), - StoredHash = get_user_doc_password_sha(DbName, "joe"), - ?assertEqual(StoredHash, CachedHash) - end). - -should_update_password(DbName) -> - ?_test(begin - PasswordHash = hash_password("pass2"), - {ok, Rev} = update_user_doc(DbName, "joe", "pass1"), - {ok, _} = update_user_doc(DbName, "joe", "pass2", Rev), - {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"), - ?assertEqual(PasswordHash, - couch_util:get_value(<<"password_sha">>, Creds)) - end). - -should_cleanup_cache_after_userdoc_deletion(DbName) -> - ?_test(begin - {ok, _} = update_user_doc(DbName, "joe", "pass1"), - delete_user_doc(DbName, "joe"), - ?assertEqual(nil, couch_auth_cache:get_user_creds("joe")) - end). - -should_restore_cache_after_userdoc_recreation(DbName) -> - ?_test(begin - PasswordHash = hash_password("pass5"), - {ok, _} = update_user_doc(DbName, "joe", "pass1"), - delete_user_doc(DbName, "joe"), - ?assertEqual(nil, couch_auth_cache:get_user_creds("joe")), - - {ok, _} = update_user_doc(DbName, "joe", "pass5"), - {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"), - - ?assertEqual(PasswordHash, - couch_util:get_value(<<"password_sha">>, Creds)) - end). - -should_drop_cache_on_auth_db_change(DbName) -> - ?_test(begin - {ok, _} = update_user_doc(DbName, "joe", "pass1"), - full_commit(DbName), - config:set("couch_httpd_auth", "authentication_db", - ?b2l(?tempdb()), false), - ?assertEqual(nil, couch_auth_cache:get_user_creds("joe")) - end). - -should_restore_cache_on_auth_db_change(DbName) -> - ?_test(begin - PasswordHash = hash_password("pass1"), - {ok, _} = update_user_doc(DbName, "joe", "pass1"), - {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"), - full_commit(DbName), - - DbName1 = ?tempdb(), - config:set("couch_httpd_auth", "authentication_db", - ?b2l(DbName1), false), - - {ok, _} = update_user_doc(DbName1, "joe", "pass5"), - full_commit(DbName1), - - config:set("couch_httpd_auth", "authentication_db", - ?b2l(DbName), false), - - {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"), - ?assertEqual(PasswordHash, - couch_util:get_value(<<"password_sha">>, Creds)) - end). - -should_recover_cache_after_shutdown(DbName) -> - ?_test(begin - PasswordHash = hash_password("pass2"), - {ok, Rev0} = update_user_doc(DbName, "joe", "pass1"), - {ok, Rev1} = update_user_doc(DbName, "joe", "pass2", Rev0), - full_commit(DbName), - shutdown_db(DbName), - {ok, Rev1} = get_doc_rev(DbName, "joe"), - ?assertEqual(PasswordHash, get_user_doc_password_sha(DbName, "joe")) - end). - -should_close_old_db_on_auth_db_change(DbName) -> - {timeout, ?DB_TIMEOUT, ?_test(begin - ?assertEqual(ok, wait_db(DbName, fun is_opened/1)), - config:set("couch_httpd_auth", "authentication_db", - ?b2l(?tempdb()), false), - ?assertEqual(ok, wait_db(DbName, fun is_closed/1)) - end)}. - -should_get_admin_from_config(_DbName) -> - ?_test(begin - config:set("admins", "testadmin", "password", false), - Creds = test_util:wait(fun() -> - case couch_auth_cache:get_user_creds("testadmin") of - {ok, Creds0, _} -> Creds0; - nil -> wait - end - end), - Roles = couch_util:get_value(<<"roles">>, Creds), - ?assertEqual([<<"_admin">>], Roles) - end). - -update_user_doc(DbName, UserName, Password) -> - update_user_doc(DbName, UserName, Password, nil). - -update_user_doc(DbName, UserName, Password, Rev) -> - User = iolist_to_binary(UserName), - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"org.couchdb.user:", User/binary>>}, - {<<"name">>, User}, - {<<"type">>, <<"user">>}, - {<<"salt">>, ?SALT}, - {<<"password_sha">>, hash_password(Password)}, - {<<"roles">>, []} - ] ++ case Rev of - nil -> []; - _ -> [{<<"_rev">>, Rev}] - end - }), - {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]), - {ok, NewRev} = couch_db:update_doc(AuthDb, Doc, []), - ok = couch_db:close(AuthDb), - {ok, couch_doc:rev_to_str(NewRev)}. - -wait_db(Db, DbFun) -> - test_util:wait(fun() -> - case DbFun(Db) of - true -> - ok; - false -> - wait - end - end, ?DB_TIMEOUT, 500). - - -hash_password(Password) -> - ?l2b(couch_util:to_hex(crypto:hash(sha, iolist_to_binary([Password, ?SALT])))). - -shutdown_db(DbName) -> - {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]), - ok = couch_db:close(AuthDb), - couch_util:shutdown_sync(couch_db:get_pid(AuthDb)), - ok = timer:sleep(1000). - -get_doc_rev(DbName, UserName) -> - DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]), - {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]), - UpdateRev = - case couch_db:open_doc(AuthDb, DocId, []) of - {ok, Doc} -> - {Props} = couch_doc:to_json_obj(Doc, []), - couch_util:get_value(<<"_rev">>, Props); - {not_found, missing} -> - nil - end, - ok = couch_db:close(AuthDb), - {ok, UpdateRev}. - -get_user_doc_password_sha(DbName, UserName) -> - DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]), - {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]), - {ok, Doc} = couch_db:open_doc(AuthDb, DocId, []), - ok = couch_db:close(AuthDb), - {Props} = couch_doc:to_json_obj(Doc, []), - couch_util:get_value(<<"password_sha">>, Props). - -delete_user_doc(DbName, UserName) -> - DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]), - {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]), - {ok, Doc} = couch_db:open_doc(AuthDb, DocId, []), - {Props} = couch_doc:to_json_obj(Doc, []), - DeletedDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DocId}, - {<<"_rev">>, couch_util:get_value(<<"_rev">>, Props)}, - {<<"_deleted">>, true} - ]}), - {ok, _} = couch_db:update_doc(AuthDb, DeletedDoc, []), - ok = couch_db:close(AuthDb). - -full_commit(DbName) -> - {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]), - {ok, _} = couch_db:ensure_full_commit(AuthDb), - ok = couch_db:close(AuthDb). - -is_opened(DbName) -> - {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]), - Monitors = couch_db:monitored_by(AuthDb) -- [self()], - ok = couch_db:close(AuthDb), - Monitors /= []. - -is_closed(DbName) -> - not is_opened(DbName). - -make_validate_test({Old, New, "ok"} = Case) -> - {test_id(Case), ?_assertEqual(ok, validate(doc(Old), doc(New)))}; -make_validate_test({Old, New, Reason} = Case) -> - Failure = ?l2b(Reason), - {test_id(Case), ?_assertThrow({forbidden, Failure}, validate(doc(Old), doc(New)))}. - -test_id({[OldRoles, OldType], [NewRoles, NewType], Result}) -> - lists:flatten(io_lib:format( - "(roles: ~w, type: ~w) -> (roles: ~w, type: ~w) ==> \"~s\"", - [OldRoles, OldType, NewRoles, NewType, Result])). - -doc([Roles, Type]) -> - couch_doc:from_json_obj({[ - {<<"_id">>,<<"org.couchdb.user:foo">>}, - {<<"_rev">>,<<"1-281c81adb1bf10927a6160f246dc0468">>}, - {<<"name">>,<<"foo">>}, - {<<"password_scheme">>,<<"simple">>}, - {<<"salt">>,<<"00000000000000000000000000000000">>}, - {<<"password_sha">>, <<"111111111111111111111111111111111111">>}] - ++ type(Type) ++ roles(Roles)}). - -roles(custom) -> [{<<"roles">>, [<<"custom">>]}]; -roles(missing) -> []. - -type(user) -> [{<<"type">>, <<"user">>}]; -type(other) -> [{<<"type">>, <<"other">>}]; -type(missing) -> []. - -validate(DiskDoc, NewDoc) -> - JSONCtx = {[ - {<<"db">>, <<"foo/bar">>}, - {<<"name">>, <<"foo">>}, - {<<"roles">>, [<<"_admin">>]} - ]}, - validate(DiskDoc, NewDoc, JSONCtx). - -validate(DiskDoc, NewDoc, JSONCtx) -> - {ok, DDoc0} = couch_auth_cache:auth_design_doc(<<"_design/anything">>), - DDoc = DDoc0#doc{revs = {1, [<<>>]}}, - couch_query_servers:validate_doc_update(DDoc, NewDoc, DiskDoc, JSONCtx, []). diff --git a/src/couch/test/couch_base32_tests.erl b/src/couch/test/couch_base32_tests.erl deleted file mode 100644 index 7e4d59a09..000000000 --- a/src/couch/test/couch_base32_tests.erl +++ /dev/null @@ -1,28 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_base32_tests). - --include_lib("eunit/include/eunit.hrl"). - -base32_test() -> - roundtrip(<<"">>, <<"">>), - roundtrip(<<"f">>, <<"MY======">>), - roundtrip(<<"fo">>, <<"MZXQ====">>), - roundtrip(<<"foo">>, <<"MZXW6===">>), - roundtrip(<<"foob">>, <<"MZXW6YQ=">>), - roundtrip(<<"fooba">>, <<"MZXW6YTB">>), - roundtrip(<<"foobar">>, <<"MZXW6YTBOI======">>). - -roundtrip(Plain, Encoded) -> - ?assertEqual(Plain, couch_base32:decode(Encoded)), - ?assertEqual(Encoded, couch_base32:encode(Plain)). diff --git a/src/couch/test/couch_bt_engine_compactor_tests.erl b/src/couch/test/couch_bt_engine_compactor_tests.erl deleted file mode 100644 index 6c99ceb73..000000000 --- a/src/couch/test/couch_bt_engine_compactor_tests.erl +++ /dev/null @@ -1,130 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_bt_engine_compactor_tests). - - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - - --define(DELAY, 100). --define(WAIT_DELAY_COUNT, 50). - - -setup() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = couch_db:close(Db), - create_docs(DbName), - DbName. - - -teardown(DbName) when is_binary(DbName) -> - couch_server:delete(DbName, [?ADMIN_CTX]), - ok. - - -compaction_resume_test_() -> - { - setup, - fun test_util:start_couch/0, - fun test_util:stop_couch/1, - { - foreach, - fun setup/0, - fun teardown/1, - [ - fun compaction_resume/1 - ] - } - }. - - -compaction_resume(DbName) -> - ?_test(begin - check_db_validity(DbName), - compact_db(DbName), - check_db_validity(DbName), - - % Force an error when copying document ids - with_mecked_emsort(fun() -> - compact_db(DbName) - end), - - check_db_validity(DbName), - compact_db(DbName), - check_db_validity(DbName) - end). - - -check_db_validity(DbName) -> - couch_util:with_db(DbName, fun(Db) -> - ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)), - ?assertEqual(3, couch_db:count_changes_since(Db, 0)) - end). - - -with_mecked_emsort(Fun) -> - meck:new(couch_emsort, [passthrough]), - meck:expect(couch_emsort, iter, fun(_) -> erlang:error(kaboom) end), - try - Fun() - after - meck:unload() - end. - - -create_docs(DbName) -> - couch_util:with_db(DbName, fun(Db) -> - Doc1 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc1">>}, - {<<"value">>, 1} - - ]}), - Doc2 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc2">>}, - {<<"value">>, 2} - - ]}), - Doc3 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc3">>}, - {<<"value">>, 3} - - ]}), - {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]), - couch_db:ensure_full_commit(Db) - end). - - -compact_db(DbName) -> - couch_util:with_db(DbName, fun(Db) -> - {ok, _} = couch_db:start_compact(Db) - end), - wait_db_compact_done(DbName, ?WAIT_DELAY_COUNT). - - -wait_db_compact_done(_DbName, 0) -> - Failure = [ - {module, ?MODULE}, - {line, ?LINE}, - {reason, "DB compaction failed to finish"} - ], - erlang:error({assertion_failed, Failure}); -wait_db_compact_done(DbName, N) -> - IsDone = couch_util:with_db(DbName, fun(Db) -> - not is_pid(couch_db:get_compactor_pid(Db)) - end), - if IsDone -> ok; true -> - timer:sleep(?DELAY), - wait_db_compact_done(DbName, N - 1) - end. diff --git a/src/couch/test/couch_bt_engine_tests.erl b/src/couch/test/couch_bt_engine_tests.erl deleted file mode 100644 index 3e3ecbf25..000000000 --- a/src/couch/test/couch_bt_engine_tests.erl +++ /dev/null @@ -1,20 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_bt_engine_tests). - - --include_lib("eunit/include/eunit.hrl"). - - -couch_bt_engine_test_()-> - cpse_util:create_tests(couch, couch_bt_engine, "couch"). diff --git a/src/couch/test/couch_bt_engine_upgrade_tests.erl b/src/couch/test/couch_bt_engine_upgrade_tests.erl deleted file mode 100644 index 3a516f8f7..000000000 --- a/src/couch/test/couch_bt_engine_upgrade_tests.erl +++ /dev/null @@ -1,243 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_bt_engine_upgrade_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - - -setup(_) -> - Ctx = test_util:start_couch(), - DbDir = config:get("couchdb", "database_dir"), - DbFileNames = [ - "db_v6_without_purge_req.couch", - "db_v6_with_1_purge_req.couch", - "db_v6_with_2_purge_req.couch", - "db_v6_with_1_purge_req_for_2_docs.couch", - "db_v7_without_purge_req.couch", - "db_v7_with_1_purge_req.couch", - "db_v7_with_2_purge_req.couch", - "db_v7_with_1_purge_req_for_2_docs.couch" - ], - NewPaths = lists:map(fun(DbFileName) -> - OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]), - NewDbFilePath = filename:join([DbDir, DbFileName]), - ok = filelib:ensure_dir(NewDbFilePath), - file:delete(NewDbFilePath), - {ok, _} = file:copy(OldDbFilePath, NewDbFilePath), - NewDbFilePath - end, DbFileNames), - {Ctx, NewPaths}. - - -teardown(_, {Ctx, Paths}) -> - test_util:stop_couch(Ctx), - lists:foreach(fun(Path) -> - file:delete(Path) - end, Paths). - - -upgrade_test_() -> - From = [6, 7], - { - "Couch Bt Engine Upgrade tests", - { - foreachx, - fun setup/1, fun teardown/2, - [{F, fun t_upgrade_without_purge_req/2} || F <- From] ++ - [{F, fun t_upgrade_with_1_purge_req/2} || F <- From] ++ - [{F, fun t_upgrade_with_N_purge_req/2} || F <- From] ++ - [{F, fun t_upgrade_with_1_purge_req_for_2_docs/2} || F <- From] - } - }. - - -t_upgrade_without_purge_req(VersionFrom, {_Ctx, _NewPaths}) -> - ?_test(begin - % There are three documents in the fixture - % db with zero purge entries - DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom) - ++ "_without_purge_req"), - - ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)), - {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> - ?assertEqual(0, couch_db:get_purge_seq(Db)), - couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, []) - end), - ?assertEqual([], UpgradedPurged), - ?assertEqual(8, get_disk_version_from_header(DbName)), - {ok, Rev} = save_doc( - DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]} - ), - {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}), - - couch_util:with_db(DbName, fun(Db) -> - ?assertEqual({ok, 5}, couch_db:get_doc_count(Db)), - ?assertEqual(0, couch_db:get_purge_seq(Db)) - end), - - PurgeReqs = [ - {couch_uuids:random(), <<"doc4">>, [Rev]} - ], - - {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) -> - couch_db:purge_docs(Db, PurgeReqs) - end), - ?assertEqual(PRevs, [Rev]), - - couch_util:with_db(DbName, fun(Db) -> - ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)), - ?assertEqual(1, couch_db:get_purge_seq(Db)) - end) - end). - - -t_upgrade_with_1_purge_req(VersionFrom, {_Ctx, _NewPaths}) -> - ?_test(begin - % There are two documents in the fixture database - % with a single purge entry - DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom) - ++ "_with_1_purge_req"), - - ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)), - {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> - ?assertEqual(1, couch_db:get_purge_seq(Db)), - couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, []) - end), - ?assertEqual(8, get_disk_version_from_header(DbName)), - ?assertEqual([{1, <<"doc1">>}], UpgradedPurged), - - {ok, Rev} = save_doc( - DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]} - ), - {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}), - - couch_util:with_db(DbName, fun(Db) -> - ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)), - ?assertEqual(1, couch_db:get_purge_seq(Db)) - end), - - PurgeReqs = [ - {couch_uuids:random(), <<"doc4">>, [Rev]} - ], - - {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) -> - couch_db:purge_docs(Db, PurgeReqs) - end), - ?assertEqual(PRevs, [Rev]), - - couch_util:with_db(DbName, fun(Db) -> - ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)), - ?assertEqual(2, couch_db:get_purge_seq(Db)) - end) - end). - - -t_upgrade_with_N_purge_req(VersionFrom, {_Ctx, _NewPaths}) -> - ?_test(begin - % There is one document in the fixture database - % with two docs that have been purged - DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom) - ++ "_with_2_purge_req"), - - ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)), - {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> - ?assertEqual(2, couch_db:get_purge_seq(Db)), - couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, []) - end), - ?assertEqual(8, get_disk_version_from_header(DbName)), - ?assertEqual([{2, <<"doc2">>}], UpgradedPurged), - - {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}), - {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}), - - couch_util:with_db(DbName, fun(Db) -> - ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)), - ?assertEqual(2, couch_db:get_purge_seq(Db)) - end), - - PurgeReqs = [ - {couch_uuids:random(), <<"doc4">>, [Rev]} - ], - - {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) -> - couch_db:purge_docs(Db, PurgeReqs) - end), - ?assertEqual(PRevs, [Rev]), - - couch_util:with_db(DbName, fun(Db) -> - ?assertEqual({ok, 2}, couch_db:get_doc_count(Db)), - ?assertEqual(3, couch_db:get_purge_seq(Db)) - end) - end). - - -t_upgrade_with_1_purge_req_for_2_docs(VersionFrom, {_Ctx, _NewPaths}) -> - ?_test(begin - % There are two documents (Doc4 and Doc5) in the fixture database - % with three docs (Doc1, Doc2 and Doc3) that have been purged, and - % with one purge req for Doc1 and another purge req for Doc 2 and Doc3 - DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom) - ++ "_with_1_purge_req_for_2_docs"), - - ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)), - {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> - ?assertEqual(3, couch_db:get_purge_seq(Db)), - couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, []) - end), - ?assertEqual(8, get_disk_version_from_header(DbName)), - ?assertEqual([{3,<<"doc2">>},{2,<<"doc3">>}], UpgradedPurged), - - {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc6">>}, {<<"v">>, 1}]}), - {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc7">>}, {<<"v">>, 2}]}), - - couch_util:with_db(DbName, fun(Db) -> - ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)), - ?assertEqual(3, couch_db:get_purge_seq(Db)) - end), - - PurgeReqs = [ - {couch_uuids:random(), <<"doc6">>, [Rev]} - ], - - {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) -> - couch_db:purge_docs(Db, PurgeReqs) - end), - ?assertEqual(PRevs, [Rev]), - - couch_util:with_db(DbName, fun(Db) -> - ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)), - ?assertEqual(4, couch_db:get_purge_seq(Db)) - end) - end). - - -save_doc(DbName, Json) -> - Doc = couch_doc:from_json_obj(Json), - couch_util:with_db(DbName, fun(Db) -> - couch_db:update_doc(Db, Doc, []) - end). - - -fold_fun({PSeq, _UUID, Id, _Revs}, Acc) -> - {ok, [{PSeq, Id} | Acc]}. - - -get_disk_version_from_header(DbFileName) -> - DbDir = config:get("couchdb", "database_dir"), - DbFilePath = filename:join([DbDir, ?l2b(?b2l(DbFileName) ++ ".couch")]), - {ok, Fd} = couch_file:open(DbFilePath, []), - {ok, Header} = couch_file:read_header(Fd), - DiskVerison = couch_bt_engine_header:disk_version(Header), - couch_file:close(Fd), - DiskVerison. diff --git a/src/couch/test/couch_btree_tests.erl b/src/couch/test/couch_btree_tests.erl deleted file mode 100644 index c9b791d2c..000000000 --- a/src/couch/test/couch_btree_tests.erl +++ /dev/null @@ -1,572 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_btree_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(ROWS, 1000). --define(TIMEOUT, 60). % seconds - - -setup() -> - {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]), - {ok, Btree} = couch_btree:open(nil, Fd, [{compression, none}, - {reduce, fun reduce_fun/2}]), - {Fd, Btree}. - -setup_kvs(_) -> - setup(). - -setup_red() -> - {_, EvenOddKVs} = lists:foldl( - fun(Idx, {Key, Acc}) -> - case Key of - "even" -> {"odd", [{{Key, Idx}, 1} | Acc]}; - _ -> {"even", [{{Key, Idx}, 1} | Acc]} - end - end, {"odd", []}, lists:seq(1, ?ROWS)), - {Fd, Btree} = setup(), - {ok, Btree1} = couch_btree:add_remove(Btree, EvenOddKVs, []), - {Fd, Btree1}. -setup_red(_) -> - setup_red(). - -teardown(Fd) when is_pid(Fd) -> - ok = couch_file:close(Fd); -teardown({Fd, _}) -> - teardown(Fd). -teardown(_, {Fd, _}) -> - teardown(Fd). - - -kvs_test_funs() -> - [ - fun should_set_fd_correctly/2, - fun should_set_root_correctly/2, - fun should_create_zero_sized_btree/2, - fun should_set_reduce_option/2, - fun should_fold_over_empty_btree/2, - fun should_add_all_keys/2, - fun should_continuously_add_new_kv/2, - fun should_continuously_remove_keys/2, - fun should_insert_keys_in_reversed_order/2, - fun should_add_every_odd_key_remove_every_even/2, - fun should_add_every_even_key_remove_every_old/2 - ]. - -red_test_funs() -> - [ - fun should_reduce_whole_range/2, - fun should_reduce_first_half/2, - fun should_reduce_second_half/2 - ]. - - -btree_open_test_() -> - {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]), - {ok, Btree} = couch_btree:open(nil, Fd, [{compression, none}]), - { - "Ensure that created btree is really a btree record", - ?_assert(is_record(Btree, btree)) - }. - -sorted_kvs_test_() -> - Funs = kvs_test_funs(), - Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)], - { - "BTree with sorted keys", - { - setup, - fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, - { - foreachx, - fun setup_kvs/1, fun teardown/2, - [{Sorted, Fun} || Fun <- Funs] - } - } - }. - -rsorted_kvs_test_() -> - Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)], - Funs = kvs_test_funs(), - Reversed = Sorted, - { - "BTree with backward sorted keys", - { - setup, - fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, - { - foreachx, - fun setup_kvs/1, fun teardown/2, - [{Reversed, Fun} || Fun <- Funs] - } - } - }. - -shuffled_kvs_test_() -> - Funs = kvs_test_funs(), - Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)], - Shuffled = shuffle(Sorted), - { - "BTree with shuffled keys", - { - setup, - fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, - { - foreachx, - fun setup_kvs/1, fun teardown/2, - [{Shuffled, Fun} || Fun <- Funs] - } - } - }. - -reductions_test_() -> - { - "BTree reductions", - { - setup, - fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, - [ - { - "Common tests", - { - foreach, - fun setup_red/0, fun teardown/1, - [ - fun should_reduce_without_specified_direction/1, - fun should_reduce_forward/1, - fun should_reduce_backward/1 - ] - } - }, - { - "Range requests", - [ - { - "Forward direction", - { - foreachx, - fun setup_red/1, fun teardown/2, - [{fwd, F} || F <- red_test_funs()] - } - }, - { - "Backward direction", - { - foreachx, - fun setup_red/1, fun teardown/2, - [{rev, F} || F <- red_test_funs()] - } - } - ] - } - ] - } - }. - - -should_set_fd_correctly(_, {Fd, Btree}) -> - ?_assertMatch(Fd, Btree#btree.fd). - -should_set_root_correctly(_, {_, Btree}) -> - ?_assertMatch(nil, Btree#btree.root). - -should_create_zero_sized_btree(_, {_, Btree}) -> - ?_assertMatch(0, couch_btree:size(Btree)). - -should_set_reduce_option(_, {_, Btree}) -> - ReduceFun = fun reduce_fun/2, - Btree1 = couch_btree:set_options(Btree, [{reduce, ReduceFun}]), - ?_assertMatch(ReduceFun, Btree1#btree.reduce). - -should_fold_over_empty_btree(_, {_, Btree}) -> - {ok, _, EmptyRes} = couch_btree:foldl(Btree, fun(_, X) -> {ok, X+1} end, 0), - ?_assertEqual(EmptyRes, 0). - -should_add_all_keys(KeyValues, {Fd, Btree}) -> - {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []), - [ - should_return_complete_btree_on_adding_all_keys(KeyValues, Btree1), - should_have_non_zero_size(Btree1), - should_have_lesser_size_than_file(Fd, Btree1), - should_keep_root_pointer_to_kp_node(Fd, Btree1), - should_remove_all_keys(KeyValues, Btree1) - ]. - -should_return_complete_btree_on_adding_all_keys(KeyValues, Btree) -> - ?_assert(test_btree(Btree, KeyValues)). - -should_have_non_zero_size(Btree) -> - ?_assert(couch_btree:size(Btree) > 0). - -should_have_lesser_size_than_file(Fd, Btree) -> - ?_assert((couch_btree:size(Btree) =< couch_file:bytes(Fd))). - -should_keep_root_pointer_to_kp_node(Fd, Btree) -> - ?_assertMatch({ok, {kp_node, _}}, - couch_file:pread_term(Fd, element(1, Btree#btree.root))). - -should_remove_all_keys(KeyValues, Btree) -> - Keys = keys(KeyValues), - {ok, Btree1} = couch_btree:add_remove(Btree, [], Keys), - { - "Should remove all the keys", - [ - should_produce_valid_btree(Btree1, []), - should_be_empty(Btree1) - ] - }. - -should_continuously_add_new_kv(KeyValues, {_, Btree}) -> - {Btree1, _} = lists:foldl( - fun(KV, {BtAcc, PrevSize}) -> - {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []), - ?assert(couch_btree:size(BtAcc2) > PrevSize), - {BtAcc2, couch_btree:size(BtAcc2)} - end, {Btree, couch_btree:size(Btree)}, KeyValues), - { - "Should continuously add key-values to btree", - [ - should_produce_valid_btree(Btree1, KeyValues), - should_not_be_empty(Btree1) - ] - }. - -should_continuously_remove_keys(KeyValues, {_, Btree}) -> - {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []), - {Btree2, _} = lists:foldl( - fun({K, _}, {BtAcc, PrevSize}) -> - {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]), - ?assert(couch_btree:size(BtAcc2) < PrevSize), - {BtAcc2, couch_btree:size(BtAcc2)} - end, {Btree1, couch_btree:size(Btree1)}, KeyValues), - { - "Should continuously remove keys from btree", - [ - should_produce_valid_btree(Btree2, []), - should_be_empty(Btree2) - ] - }. - -should_insert_keys_in_reversed_order(KeyValues, {_, Btree}) -> - KeyValuesRev = lists:reverse(KeyValues), - {Btree1, _} = lists:foldl( - fun(KV, {BtAcc, PrevSize}) -> - {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []), - ?assert(couch_btree:size(BtAcc2) > PrevSize), - {BtAcc2, couch_btree:size(BtAcc2)} - end, {Btree, couch_btree:size(Btree)}, KeyValuesRev), - should_produce_valid_btree(Btree1, KeyValues). - -should_add_every_odd_key_remove_every_even(KeyValues, {_, Btree}) -> - {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []), - {_, Rem2Keys0, Rem2Keys1} = lists:foldl(fun(X, {Count, Left, Right}) -> - case Count rem 2 == 0 of - true -> {Count + 1, [X | Left], Right}; - false -> {Count + 1, Left, [X | Right]} - end - end, {0, [], []}, KeyValues), - {timeout, ?TIMEOUT, - ?_assert(test_add_remove(Btree1, Rem2Keys0, Rem2Keys1)) - }. - -should_add_every_even_key_remove_every_old(KeyValues, {_, Btree}) -> - {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []), - {_, Rem2Keys0, Rem2Keys1} = lists:foldl(fun(X, {Count, Left, Right}) -> - case Count rem 2 == 0 of - true -> {Count + 1, [X | Left], Right}; - false -> {Count + 1, Left, [X | Right]} - end - end, {0, [], []}, KeyValues), - {timeout, ?TIMEOUT, - ?_assert(test_add_remove(Btree1, Rem2Keys1, Rem2Keys0)) - }. - - -should_reduce_without_specified_direction({_, Btree}) -> - ?_assertMatch( - {ok, [{{"odd", _}, ?ROWS div 2}, {{"even", _}, ?ROWS div 2}]}, - fold_reduce(Btree, [])). - -should_reduce_forward({_, Btree}) -> - ?_assertMatch( - {ok, [{{"odd", _}, ?ROWS div 2}, {{"even", _}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, fwd}])). - -should_reduce_backward({_, Btree}) -> - ?_assertMatch( - {ok, [{{"even", _}, ?ROWS div 2}, {{"odd", _}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, rev}])). - -should_reduce_whole_range(fwd, {_, Btree}) -> - {SK, EK} = {{"even", 0}, {"odd", ?ROWS - 1}}, - [ - { - "include endkey", - ?_assertMatch( - {ok, [{{"odd", 1}, ?ROWS div 2}, - {{"even", 2}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, fwd}, - {start_key, SK}, - {end_key, EK}])) - }, - { - "exclude endkey", - ?_assertMatch( - {ok, [{{"odd", 1}, (?ROWS div 2) - 1}, - {{"even", 2}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, fwd}, - {start_key, SK}, - {end_key_gt, EK}])) - } - ]; -should_reduce_whole_range(rev, {_, Btree}) -> - {SK, EK} = {{"odd", ?ROWS - 1}, {"even", 2}}, - [ - { - "include endkey", - ?_assertMatch( - {ok, [{{"even", ?ROWS}, ?ROWS div 2}, - {{"odd", ?ROWS - 1}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, rev}, - {start_key, SK}, - {end_key, EK}])) - }, - { - "exclude endkey", - ?_assertMatch( - {ok, [{{"even", ?ROWS}, (?ROWS div 2) - 1}, - {{"odd", ?ROWS - 1}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, rev}, - {start_key, SK}, - {end_key_gt, EK}])) - } - ]. - -should_reduce_first_half(fwd, {_, Btree}) -> - {SK, EK} = {{"even", 0}, {"odd", (?ROWS div 2) - 1}}, - [ - { - "include endkey", - ?_assertMatch( - {ok, [{{"odd", 1}, ?ROWS div 4}, - {{"even", 2}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, fwd}, - {start_key, SK}, {end_key, EK}])) - }, - { - "exclude endkey", - ?_assertMatch( - {ok, [{{"odd", 1}, (?ROWS div 4) - 1}, - {{"even", 2}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, fwd}, - {start_key, SK}, - {end_key_gt, EK}])) - } - ]; -should_reduce_first_half(rev, {_, Btree}) -> - {SK, EK} = {{"odd", ?ROWS - 1}, {"even", ?ROWS div 2}}, - [ - { - "include endkey", - ?_assertMatch( - {ok, [{{"even", ?ROWS}, (?ROWS div 4) + 1}, - {{"odd", ?ROWS - 1}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, rev}, - {start_key, SK}, - {end_key, EK}])) - }, - { - "exclude endkey", - ?_assertMatch( - {ok, [{{"even", ?ROWS}, ?ROWS div 4}, - {{"odd", ?ROWS - 1}, ?ROWS div 2}]}, - fold_reduce(Btree, [{dir, rev}, - {start_key, SK}, - {end_key_gt, EK}])) - } - ]. - -should_reduce_second_half(fwd, {_, Btree}) -> - {SK, EK} = {{"even", ?ROWS div 2}, {"odd", ?ROWS - 1}}, - [ - { - "include endkey", - ?_assertMatch( - {ok, [{{"odd", 1}, ?ROWS div 2}, - {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}]}, - fold_reduce(Btree, [{dir, fwd}, - {start_key, SK}, - {end_key, EK}])) - }, - { - "exclude endkey", - ?_assertMatch( - {ok, [{{"odd", 1}, (?ROWS div 2) - 1}, - {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}]}, - fold_reduce(Btree, [{dir, fwd}, - {start_key, SK}, - {end_key_gt, EK}])) - } - ]; -should_reduce_second_half(rev, {_, Btree}) -> - {SK, EK} = {{"odd", (?ROWS div 2) + 1}, {"even", 2}}, - [ - { - "include endkey", - ?_assertMatch( - {ok, [{{"even", ?ROWS}, ?ROWS div 2}, - {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}]}, - fold_reduce(Btree, [{dir, rev}, - {start_key, SK}, - {end_key, EK}])) - }, - { - "exclude endkey", - ?_assertMatch( - {ok, [{{"even", ?ROWS}, (?ROWS div 2) - 1}, - {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}]}, - fold_reduce(Btree, [{dir, rev}, - {start_key, SK}, - {end_key_gt, EK}])) - } - ]. - -should_produce_valid_btree(Btree, KeyValues) -> - ?_assert(test_btree(Btree, KeyValues)). - -should_be_empty(Btree) -> - ?_assertEqual(couch_btree:size(Btree), 0). - -should_not_be_empty(Btree) -> - ?_assert(couch_btree:size(Btree) > 0). - -fold_reduce(Btree, Opts) -> - GroupFun = fun({K1, _}, {K2, _}) -> - K1 == K2 - end, - FoldFun = fun(GroupedKey, Unreduced, Acc) -> - {ok, [{GroupedKey, couch_btree:final_reduce(Btree, Unreduced)} | Acc]} - end, - couch_btree:fold_reduce(Btree, FoldFun, [], - [{key_group_fun, GroupFun}] ++ Opts). - - -keys(KVs) -> - [K || {K, _} <- KVs]. - -reduce_fun(reduce, KVs) -> - length(KVs); -reduce_fun(rereduce, Reds) -> - lists:sum(Reds). - - -shuffle(List) -> - randomize(round(math:log(length(List)) + 0.5), List). - -randomize(1, List) -> - randomize(List); -randomize(T, List) -> - lists:foldl( - fun(_E, Acc) -> - randomize(Acc) - end, randomize(List), lists:seq(1, (T - 1))). - -randomize(List) -> - D = lists:map(fun(A) -> {couch_rand:uniform(), A} end, List), - {_, D1} = lists:unzip(lists:keysort(1, D)), - D1. - -test_btree(Btree, KeyValues) -> - ok = test_key_access(Btree, KeyValues), - ok = test_lookup_access(Btree, KeyValues), - ok = test_final_reductions(Btree, KeyValues), - ok = test_traversal_callbacks(Btree, KeyValues), - true. - -test_add_remove(Btree, OutKeyValues, RemainingKeyValues) -> - Btree2 = lists:foldl( - fun({K, _}, BtAcc) -> - {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]), - BtAcc2 - end, Btree, OutKeyValues), - true = test_btree(Btree2, RemainingKeyValues), - - Btree3 = lists:foldl( - fun(KV, BtAcc) -> - {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []), - BtAcc2 - end, Btree2, OutKeyValues), - true = test_btree(Btree3, OutKeyValues ++ RemainingKeyValues). - -test_key_access(Btree, List) -> - FoldFun = fun(Element, {[HAcc|TAcc], Count}) -> - case Element == HAcc of - true -> {ok, {TAcc, Count + 1}}; - _ -> {ok, {TAcc, Count + 1}} - end - end, - Length = length(List), - Sorted = lists:sort(List), - {ok, _, {[], Length}} = couch_btree:foldl(Btree, FoldFun, {Sorted, 0}), - {ok, _, {[], Length}} = couch_btree:fold(Btree, FoldFun, - {Sorted, 0}, [{dir, rev}]), - ok. - -test_lookup_access(Btree, KeyValues) -> - FoldFun = fun({Key, Value}, {Key, Value}) -> {stop, true} end, - lists:foreach( - fun({Key, Value}) -> - [{ok, {Key, Value}}] = couch_btree:lookup(Btree, [Key]), - {ok, _, true} = couch_btree:foldl(Btree, FoldFun, - {Key, Value}, [{start_key, Key}]) - end, KeyValues). - -test_final_reductions(Btree, KeyValues) -> - KVLen = length(KeyValues), - FoldLFun = fun(_X, LeadingReds, Acc) -> - CountToStart = KVLen div 3 + Acc, - CountToStart = couch_btree:final_reduce(Btree, LeadingReds), - {ok, Acc + 1} - end, - FoldRFun = fun(_X, LeadingReds, Acc) -> - CountToEnd = KVLen - KVLen div 3 + Acc, - CountToEnd = couch_btree:final_reduce(Btree, LeadingReds), - {ok, Acc + 1} - end, - {LStartKey, _} = case KVLen of - 0 -> {nil, nil}; - _ -> lists:nth(KVLen div 3 + 1, lists:sort(KeyValues)) - end, - {RStartKey, _} = case KVLen of - 0 -> {nil, nil}; - _ -> lists:nth(KVLen div 3, lists:sort(KeyValues)) - end, - {ok, _, FoldLRed} = couch_btree:foldl(Btree, FoldLFun, 0, - [{start_key, LStartKey}]), - {ok, _, FoldRRed} = couch_btree:fold(Btree, FoldRFun, 0, - [{dir, rev}, {start_key, RStartKey}]), - KVLen = FoldLRed + FoldRRed, - ok. - -test_traversal_callbacks(Btree, _KeyValues) -> - FoldFun = fun - (visit, _GroupedKey, _Unreduced, Acc) -> - {ok, Acc andalso false}; - (traverse, _LK, _Red, Acc) -> - {skip, Acc andalso true} - end, - % With 250 items the root is a kp. Always skipping should reduce to true. - {ok, _, true} = couch_btree:fold(Btree, FoldFun, true, [{dir, fwd}]), - ok. diff --git a/src/couch/test/couch_changes_tests.erl b/src/couch/test/couch_changes_tests.erl deleted file mode 100644 index 0c2f5f91f..000000000 --- a/src/couch/test/couch_changes_tests.erl +++ /dev/null @@ -1,1001 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_changes_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 6000). --define(TEST_TIMEOUT, 10000). - --record(row, { - id, - seq, - deleted = false, - doc = nil -}). - -setup() -> - DbName = ?tempdb(), - {ok, Db} = create_db(DbName), - Revs = [R || {ok, R} <- [ - save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}), - save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}), - save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}), - save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}), - save_doc(Db, {[{<<"_id">>, <<"doc5">>}]}) - ]], - Rev = lists:nth(3, Revs), - couch_db:ensure_full_commit(Db), - {ok, Db1} = couch_db:reopen(Db), - - {ok, Rev1} = save_doc(Db1, {[{<<"_id">>, <<"doc3">>}, {<<"_rev">>, Rev}]}), - Revs1 = Revs ++ [Rev1], - Revs2 = Revs1 ++ [R || {ok, R} <- [ - save_doc(Db1, {[{<<"_id">>, <<"doc6">>}]}), - save_doc(Db1, {[{<<"_id">>, <<"_design/foo">>}]}), - save_doc(Db1, {[{<<"_id">>, <<"doc7">>}]}), - save_doc(Db1, {[{<<"_id">>, <<"doc8">>}]}) - ]], - config:set("native_query_servers", "erlang", "{couch_native_process, start_link, []}", _Persist=false), - {DbName, list_to_tuple(Revs2)}. - -teardown({DbName, _}) -> - config:delete("native_query_servers", "erlang", _Persist=false), - delete_db(DbName), - ok. - - -changes_test_() -> - { - "Changes feed", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - [ - filter_by_selector(), - filter_by_doc_id(), - filter_by_design(), - continuous_feed(), - %%filter_by_custom_function() - filter_by_filter_function(), - filter_by_view() - ] - } - }. - -filter_by_doc_id() -> - { - "Filter _doc_id", - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_filter_by_specific_doc_ids/1, - fun should_filter_by_specific_doc_ids_descending/1, - fun should_filter_by_specific_doc_ids_with_since/1, - fun should_filter_by_specific_doc_ids_no_result/1, - fun should_handle_deleted_docs/1 - ] - } - }. - -filter_by_selector() -> - { - "Filter _selector", - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_select_basic/1, - fun should_select_with_since/1, - fun should_select_when_no_result/1, - fun should_select_with_deleted_docs/1, - fun should_select_with_continuous/1, - fun should_stop_selector_when_db_deleted/1, - fun should_select_with_empty_fields/1, - fun should_select_with_fields/1 - ] - } - }. - - -filter_by_design() -> - { - "Filter _design", - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_emit_only_design_documents/1 - ] - } - }. - -%% filter_by_custom_function() -> -%% { -%% "Filter function", -%% { -%% foreach, -%% fun setup/0, fun teardown/1, -%% [ -%% fun should_receive_heartbeats/1 -%% ] -%% } -%% }. - -filter_by_filter_function() -> - { - "Filter by filters", - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_filter_by_doc_attribute/1, - fun should_filter_by_user_ctx/1 - ] - } - }. - -filter_by_view() -> - { - "Filter _view", - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_filter_by_view/1, - fun should_filter_by_fast_view/1, - fun should_filter_by_erlang_view/1 - ] - } - }. - -continuous_feed() -> - { - "Continuous Feed", - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_filter_continuous_feed_by_specific_doc_ids/1, - fun should_end_changes_when_db_deleted/1 - ] - } - }. - - -should_filter_by_specific_doc_ids({DbName, _}) -> - ?_test( - begin - ChArgs = #changes_args{ - filter = "_doc_ids" - }, - DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>], - Req = {json_req, {[{<<"doc_ids">>, DocIds}]}}, - {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), - - ?assertEqual(2, length(Rows)), - [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows, - ?assertEqual(<<"doc4">>, Id1), - ?assertEqual(4, Seq1), - ?assertEqual(<<"doc3">>, Id2), - ?assertEqual(6, Seq2), - ?assertEqual(UpSeq, LastSeq) - end). - -should_filter_by_specific_doc_ids_descending({DbName, _}) -> - ?_test( - begin - ChArgs = #changes_args{ - filter = "_doc_ids", - dir = rev - }, - DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>], - Req = {json_req, {[{<<"doc_ids">>, DocIds}]}}, - {Rows, LastSeq, _} = run_changes_query(DbName, ChArgs, Req), - - ?assertEqual(2, length(Rows)), - [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows, - ?assertEqual(<<"doc3">>, Id1), - ?assertEqual(6, Seq1), - ?assertEqual(<<"doc4">>, Id2), - ?assertEqual(4, Seq2), - ?assertEqual(4, LastSeq) - end). - -should_filter_by_specific_doc_ids_with_since({DbName, _}) -> - ?_test( - begin - ChArgs = #changes_args{ - filter = "_doc_ids", - since = 5 - }, - DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>], - Req = {json_req, {[{<<"doc_ids">>, DocIds}]}}, - {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), - - ?assertEqual(1, length(Rows)), - [#row{seq = Seq1, id = Id1}] = Rows, - ?assertEqual(<<"doc3">>, Id1), - ?assertEqual(6, Seq1), - ?assertEqual(UpSeq, LastSeq) - end). - -should_filter_by_specific_doc_ids_no_result({DbName, _}) -> - ?_test( - begin - ChArgs = #changes_args{ - filter = "_doc_ids", - since = 6 - }, - DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>], - Req = {json_req, {[{<<"doc_ids">>, DocIds}]}}, - {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), - - ?assertEqual(0, length(Rows)), - ?assertEqual(UpSeq, LastSeq) - end). - -should_handle_deleted_docs({DbName, Revs}) -> - ?_test( - begin - Rev3_2 = element(6, Revs), - {ok, Db} = couch_db:open_int(DbName, []), - {ok, _} = save_doc( - Db, - {[{<<"_id">>, <<"doc3">>}, - {<<"_deleted">>, true}, - {<<"_rev">>, Rev3_2}]}), - - ChArgs = #changes_args{ - filter = "_doc_ids", - since = 9 - }, - DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>], - Req = {json_req, {[{<<"doc_ids">>, DocIds}]}}, - {Rows, LastSeq, _} = run_changes_query(DbName, ChArgs, Req), - - ?assertEqual(1, length(Rows)), - ?assertMatch( - [#row{seq = LastSeq, id = <<"doc3">>, deleted = true}], - Rows - ), - ?assertEqual(11, LastSeq) - end). - -should_filter_continuous_feed_by_specific_doc_ids({DbName, Revs}) -> - ?_test( - begin - {ok, Db} = couch_db:open_int(DbName, []), - ChangesArgs = #changes_args{ - filter = "_doc_ids", - feed = "continuous" - }, - DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>], - Req = {json_req, {[{<<"doc_ids">>, DocIds}]}}, - reset_row_notifications(), - Consumer = spawn_consumer(DbName, ChangesArgs, Req), - ?assertEqual(ok, wait_row_notifications(2)), - ok = pause(Consumer), - - Rows = get_rows(Consumer), - ?assertEqual(2, length(Rows)), - [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows, - ?assertEqual(<<"doc4">>, Id1), - ?assertEqual(4, Seq1), - ?assertEqual(<<"doc3">>, Id2), - ?assertEqual(6, Seq2), - - clear_rows(Consumer), - {ok, _Rev9} = save_doc(Db, {[{<<"_id">>, <<"doc9">>}]}), - {ok, _Rev10} = save_doc(Db, {[{<<"_id">>, <<"doc10">>}]}), - ok = unpause(Consumer), - timer:sleep(100), - ok = pause(Consumer), - ?assertEqual([], get_rows(Consumer)), - - Rev4 = element(4, Revs), - Rev3_2 = element(6, Revs), - {ok, Rev4_2} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}, - {<<"_rev">>, Rev4}]}), - {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}), - {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}, - {<<"_rev">>, Rev4_2}]}), - {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}), - {ok, Rev3_3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}, - {<<"_rev">>, Rev3_2}]}), - reset_row_notifications(), - ok = unpause(Consumer), - ?assertEqual(ok, wait_row_notifications(2)), - ok = pause(Consumer), - - NewRows = get_rows(Consumer), - ?assertEqual(2, length(NewRows)), - [Row14, Row16] = NewRows, - ?assertEqual(<<"doc4">>, Row14#row.id), - ?assertEqual(15, Row14#row.seq), - ?assertEqual(<<"doc3">>, Row16#row.id), - ?assertEqual(17, Row16#row.seq), - - clear_rows(Consumer), - {ok, _Rev3_4} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}, - {<<"_rev">>, Rev3_3}]}), - reset_row_notifications(), - ok = unpause(Consumer), - ?assertEqual(ok, wait_row_notifications(1)), - ok = pause(Consumer), - - FinalRows = get_rows(Consumer), - - ok = unpause(Consumer), - stop_consumer(Consumer), - - ?assertMatch([#row{seq = 18, id = <<"doc3">>}], FinalRows) - end). - - -should_end_changes_when_db_deleted({DbName, _Revs}) -> - ?_test(begin - {ok, _Db} = couch_db:open_int(DbName, []), - ChangesArgs = #changes_args{ - filter = "_doc_ids", - feed = "continuous" - }, - DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>], - Req = {json_req, {[{<<"doc_ids">>, DocIds}]}}, - Consumer = spawn_consumer(DbName, ChangesArgs, Req), - ok = pause(Consumer), - ok = couch_server:delete(DbName, [?ADMIN_CTX]), - ok = unpause(Consumer), - {_Rows, _LastSeq} = wait_finished(Consumer), - stop_consumer(Consumer), - ok - end). - - -should_select_basic({DbName, _}) -> - ?_test( - begin - ChArgs = #changes_args{filter = "_selector"}, - Selector = {[{<<"_id">>, <<"doc3">>}]}, - Req = {json_req, {[{<<"selector">>, Selector}]}}, - {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), - ?assertEqual(1, length(Rows)), - [#row{seq = Seq, id = Id}] = Rows, - ?assertEqual(<<"doc3">>, Id), - ?assertEqual(6, Seq), - ?assertEqual(UpSeq, LastSeq) - end). - -should_select_with_since({DbName, _}) -> - ?_test( - begin - ChArgs = #changes_args{filter = "_selector", since = 9}, - GteDoc2 = {[{<<"$gte">>, <<"doc1">>}]}, - Selector = {[{<<"_id">>, GteDoc2}]}, - Req = {json_req, {[{<<"selector">>, Selector}]}}, - {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), - ?assertEqual(1, length(Rows)), - [#row{seq = Seq, id = Id}] = Rows, - ?assertEqual(<<"doc8">>, Id), - ?assertEqual(10, Seq), - ?assertEqual(UpSeq, LastSeq) - end). - -should_select_when_no_result({DbName, _}) -> - ?_test( - begin - ChArgs = #changes_args{filter = "_selector"}, - Selector = {[{<<"_id">>, <<"nopers">>}]}, - Req = {json_req, {[{<<"selector">>, Selector}]}}, - {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), - ?assertEqual(0, length(Rows)), - ?assertEqual(UpSeq, LastSeq) - end). - -should_select_with_deleted_docs({DbName, Revs}) -> - ?_test( - begin - Rev3_2 = element(6, Revs), - {ok, Db} = couch_db:open_int(DbName, []), - {ok, _} = save_doc( - Db, - {[{<<"_id">>, <<"doc3">>}, - {<<"_deleted">>, true}, - {<<"_rev">>, Rev3_2}]}), - ChArgs = #changes_args{filter = "_selector"}, - Selector = {[{<<"_id">>, <<"doc3">>}]}, - Req = {json_req, {[{<<"selector">>, Selector}]}}, - {Rows, LastSeq, _} = run_changes_query(DbName, ChArgs, Req), - ?assertMatch( - [#row{seq = LastSeq, id = <<"doc3">>, deleted = true}], - Rows - ), - ?assertEqual(11, LastSeq) - end). - -should_select_with_continuous({DbName, Revs}) -> - ?_test( - begin - {ok, Db} = couch_db:open_int(DbName, []), - ChArgs = #changes_args{filter = "_selector", feed = "continuous"}, - GteDoc8 = {[{<<"$gte">>, <<"doc8">>}]}, - Selector = {[{<<"_id">>, GteDoc8}]}, - Req = {json_req, {[{<<"selector">>, Selector}]}}, - reset_row_notifications(), - Consumer = spawn_consumer(DbName, ChArgs, Req), - ?assertEqual(ok, wait_row_notifications(1)), - ok = pause(Consumer), - Rows = get_rows(Consumer), - ?assertMatch( - [#row{seq = 10, id = <<"doc8">>, deleted = false}], - Rows - ), - clear_rows(Consumer), - {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc01">>}]}), - ok = unpause(Consumer), - timer:sleep(100), - ok = pause(Consumer), - ?assertEqual([], get_rows(Consumer)), - Rev4 = element(4, Revs), - Rev8 = element(10, Revs), - {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc8">>}, - {<<"_rev">>, Rev8}]}), - {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}, - {<<"_rev">>, Rev4}]}), - reset_row_notifications(), - ok = unpause(Consumer), - ?assertEqual(ok, wait_row_notifications(1)), - ok = pause(Consumer), - NewRows = get_rows(Consumer), - ?assertMatch( - [#row{seq = _, id = <<"doc8">>, deleted = false}], - NewRows - ) - end). - -should_stop_selector_when_db_deleted({DbName, _Revs}) -> - ?_test( - begin - {ok, _Db} = couch_db:open_int(DbName, []), - ChArgs = #changes_args{filter = "_selector", feed = "continuous"}, - Selector = {[{<<"_id">>, <<"doc3">>}]}, - Req = {json_req, {[{<<"selector">>, Selector}]}}, - Consumer = spawn_consumer(DbName, ChArgs, Req), - ok = pause(Consumer), - ok = couch_server:delete(DbName, [?ADMIN_CTX]), - ok = unpause(Consumer), - {_Rows, _LastSeq} = wait_finished(Consumer), - stop_consumer(Consumer), - ok - end). - - -should_select_with_empty_fields({DbName, _}) -> - ?_test( - begin - ChArgs = #changes_args{filter = "_selector", include_docs=true}, - Selector = {[{<<"_id">>, <<"doc3">>}]}, - Req = {json_req, {[{<<"selector">>, Selector}, - {<<"fields">>, []}]}}, - {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), - ?assertEqual(1, length(Rows)), - [#row{seq = Seq, id = Id, doc = Doc}] = Rows, - ?assertEqual(<<"doc3">>, Id), - ?assertEqual(6, Seq), - ?assertEqual(UpSeq, LastSeq), - ?assertMatch({[{_K1, _V1}, {_K2, _V2}]}, Doc) - end). - -should_select_with_fields({DbName, _}) -> - ?_test( - begin - ChArgs = #changes_args{filter = "_selector", include_docs=true}, - Selector = {[{<<"_id">>, <<"doc3">>}]}, - Req = {json_req, {[{<<"selector">>, Selector}, - {<<"fields">>, [<<"_id">>, <<"nope">>]}]}}, - {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), - ?assertEqual(1, length(Rows)), - [#row{seq = Seq, id = Id, doc = Doc}] = Rows, - ?assertEqual(<<"doc3">>, Id), - ?assertEqual(6, Seq), - ?assertEqual(UpSeq, LastSeq), - ?assertMatch(Doc, {[{<<"_id">>, <<"doc3">>}]}) - end). - - -should_emit_only_design_documents({DbName, Revs}) -> - ?_test( - begin - ChArgs = #changes_args{ - filter = "_design" - }, - Req = {json_req, null}, - {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), - - ?assertEqual(1, length(Rows)), - ?assertEqual(UpSeq, LastSeq), - ?assertEqual([#row{seq = 8, id = <<"_design/foo">>}], Rows), - - - {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]), - {ok, _} = save_doc(Db, {[{<<"_id">>, <<"_design/foo">>}, - {<<"_rev">>, element(8, Revs)}, - {<<"_deleted">>, true}]}), - - couch_db:close(Db), - {Rows2, LastSeq2, _} = run_changes_query(DbName, ChArgs, Req), - - UpSeq2 = UpSeq + 1, - - ?assertEqual(1, length(Rows2)), - ?assertEqual(UpSeq2, LastSeq2), - ?assertEqual([#row{seq = 11, - id = <<"_design/foo">>, - deleted = true}], - Rows2) - end). - -%% should_receive_heartbeats(_) -> -%% {timeout, ?TEST_TIMEOUT div 1000, -%% ?_test( -%% begin -%% DbName = ?tempdb(), -%% Timeout = 100, -%% {ok, Db} = create_db(DbName), - -%% {ok, _} = save_doc(Db, {[ -%% {<<"_id">>, <<"_design/filtered">>}, -%% {<<"language">>, <<"javascript">>}, -%% {<<"filters">>, {[ -%% {<<"foo">>, <<"function(doc) { -%% return ['doc10', 'doc11', 'doc12'].indexOf(doc._id) != -1;}">> -%% }]}} -%% ]}), - -%% ChangesArgs = #changes_args{ -%% filter = "filtered/foo", -%% feed = "continuous", -%% timeout = 10000, -%% heartbeat = 1000 -%% }, -%% Consumer = spawn_consumer(DbName, ChangesArgs, {json_req, null}), - -%% {ok, _Rev1} = save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}), -%% timer:sleep(Timeout), -%% {ok, _Rev2} = save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}), -%% timer:sleep(Timeout), -%% {ok, _Rev3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}), -%% timer:sleep(Timeout), -%% {ok, _Rev4} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}), -%% timer:sleep(Timeout), -%% {ok, _Rev5} = save_doc(Db, {[{<<"_id">>, <<"doc5">>}]}), -%% timer:sleep(Timeout), -%% {ok, _Rev6} = save_doc(Db, {[{<<"_id">>, <<"doc6">>}]}), -%% timer:sleep(Timeout), -%% {ok, _Rev7} = save_doc(Db, {[{<<"_id">>, <<"doc7">>}]}), -%% timer:sleep(Timeout), -%% {ok, _Rev8} = save_doc(Db, {[{<<"_id">>, <<"doc8">>}]}), -%% timer:sleep(Timeout), -%% {ok, _Rev9} = save_doc(Db, {[{<<"_id">>, <<"doc9">>}]}), - -%% Heartbeats = get_heartbeats(Consumer), -%% ?assert(Heartbeats > 0), - -%% {ok, _Rev10} = save_doc(Db, {[{<<"_id">>, <<"doc10">>}]}), -%% timer:sleep(Timeout), -%% {ok, _Rev11} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}), -%% timer:sleep(Timeout), -%% {ok, _Rev12} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}), - -%% Heartbeats2 = get_heartbeats(Consumer), -%% ?assert(Heartbeats2 > Heartbeats), - -%% Rows = get_rows(Consumer), -%% ?assertEqual(3, length(Rows)), - -%% {ok, _Rev13} = save_doc(Db, {[{<<"_id">>, <<"doc13">>}]}), -%% timer:sleep(Timeout), -%% {ok, _Rev14} = save_doc(Db, {[{<<"_id">>, <<"doc14">>}]}), -%% timer:sleep(Timeout), - -%% Heartbeats3 = get_heartbeats(Consumer), -%% ?assert(Heartbeats3 > Heartbeats2) -%% end)}. - -should_filter_by_doc_attribute({DbName, _}) -> - ?_test( - begin - DDocId = <<"_design/app">>, - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDocId}, - {<<"language">>, <<"javascript">>}, - {<<"filters">>, {[ - {<<"valid">>, <<"function(doc, req) {" - " if (doc._id == 'doc3') {" - " return true; " - "} }">>} - ]}} - ]}), - ChArgs = #changes_args{filter = "app/valid"}, - Req = {json_req, null}, - ok = update_ddoc(DbName, DDoc), - {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), - ?assertEqual(1, length(Rows)), - [#row{seq = Seq, id = Id}] = Rows, - ?assertEqual(<<"doc3">>, Id), - ?assertEqual(6, Seq), - ?assertEqual(UpSeq, LastSeq) - end). - -should_filter_by_user_ctx({DbName, _}) -> - ?_test( - begin - DDocId = <<"_design/app">>, - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDocId}, - {<<"language">>, <<"javascript">>}, - {<<"filters">>, {[ - {<<"valid">>, <<"function(doc, req) {" - " if (req.userCtx.name == doc._id) {" - " return true; " - "} }">>} - ]}} - ]}), - ChArgs = #changes_args{filter = "app/valid"}, - UserCtx = #user_ctx{name = <<"doc3">>, roles = []}, - {ok, DbRec} = couch_db:clustered_db(DbName, UserCtx), - Req = {json_req, {[{ - <<"userCtx">>, couch_util:json_user_ctx(DbRec) - }]}}, - ok = update_ddoc(DbName, DDoc), - {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), - ?assertEqual(1, length(Rows)), - [#row{seq = Seq, id = Id}] = Rows, - ?assertEqual(<<"doc3">>, Id), - ?assertEqual(6, Seq), - ?assertEqual(UpSeq, LastSeq) - end). - -should_filter_by_view({DbName, _}) -> - ?_test( - begin - DDocId = <<"_design/app">>, - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDocId}, - {<<"language">>, <<"javascript">>}, - {<<"views">>, {[ - {<<"valid">>, {[ - {<<"map">>, <<"function(doc) {" - " if (doc._id == 'doc3') {" - " emit(doc); " - "} }">>} - ]}} - ]}} - ]}), - ChArgs = #changes_args{filter = "_view"}, - Req = {json_req, {[{ - <<"query">>, {[ - {<<"view">>, <<"app/valid">>} - ]} - }]}}, - ok = update_ddoc(DbName, DDoc), - {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), - ?assertEqual(1, length(Rows)), - [#row{seq = Seq, id = Id}] = Rows, - ?assertEqual(<<"doc3">>, Id), - ?assertEqual(6, Seq), - ?assertEqual(UpSeq, LastSeq) - end). - -should_filter_by_fast_view({DbName, _}) -> - ?_test( - begin - DDocId = <<"_design/app">>, - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDocId}, - {<<"language">>, <<"javascript">>}, - {<<"options">>, {[{<<"seq_indexed">>, true}]}}, - {<<"views">>, {[ - {<<"valid">>, {[ - {<<"map">>, <<"function(doc) {" - " if (doc._id == 'doc3') {" - " emit(doc); " - "} }">>} - ]}} - ]}} - ]}), - ChArgs = #changes_args{filter = "_view"}, - Req = {json_req, {[{ - <<"query">>, {[ - {<<"view">>, <<"app/valid">>} - ]} - }]}}, - ok = update_ddoc(DbName, DDoc), - {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), - {ok, Db} = couch_db:open_int(DbName, []), - {ok, ViewInfo} = couch_mrview:get_view_info(Db, DDoc, <<"valid">>), - {update_seq, ViewUpSeq} = lists:keyfind(update_seq, 1, ViewInfo), - couch_db:close(Db), - ?assertEqual(1, length(Rows)), - [#row{seq = Seq, id = Id}] = Rows, - ?assertEqual(<<"doc3">>, Id), - ?assertEqual(6, Seq), - ?assertEqual(LastSeq, Seq), - ?assertEqual(UpSeq, ViewUpSeq) - end). - -should_filter_by_erlang_view({DbName, _}) -> - ?_test( - begin - DDocId = <<"_design/app">>, - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDocId}, - {<<"language">>, <<"erlang">>}, - {<<"views">>, {[ - {<<"valid">>, {[ - {<<"map">>, <<"fun({Doc}) ->" - " case lists:keyfind(<<\"_id\">>, 1, Doc) of" - " {<<\"_id\">>, <<\"doc3\">>} -> Emit(Doc, null); " - " false -> ok" - " end " - "end.">>} - ]}} - ]}} - ]}), - ChArgs = #changes_args{filter = "_view"}, - Req = {json_req, {[{ - <<"query">>, {[ - {<<"view">>, <<"app/valid">>} - ]} - }]}}, - ok = update_ddoc(DbName, DDoc), - {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), - ?assertEqual(1, length(Rows)), - [#row{seq = Seq, id = Id}] = Rows, - ?assertEqual(<<"doc3">>, Id), - ?assertEqual(6, Seq), - ?assertEqual(UpSeq, LastSeq) - end). - -update_ddoc(DbName, DDoc) -> - {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]), - {ok, _} = couch_db:update_doc(Db, DDoc, []), - couch_db:close(Db). - -run_changes_query(DbName, ChangesArgs, Opts) -> - Consumer = spawn_consumer(DbName, ChangesArgs, Opts), - {Rows, LastSeq} = wait_finished(Consumer), - {ok, Db} = couch_db:open_int(DbName, []), - UpSeq = couch_db:get_update_seq(Db), - couch_db:close(Db), - stop_consumer(Consumer), - {Rows, LastSeq, UpSeq}. - -save_doc(Db, Json) -> - Doc = couch_doc:from_json_obj(Json), - {ok, Rev} = couch_db:update_doc(Db, Doc, []), - {ok, couch_doc:rev_to_str(Rev)}. - -get_rows({Consumer, _}) -> - Ref = make_ref(), - Consumer ! {get_rows, Ref}, - Resp = receive - {rows, Ref, Rows} -> - Rows - after ?TIMEOUT -> - timeout - end, - ?assertNotEqual(timeout, Resp), - Resp. - -%% get_heartbeats({Consumer, _}) -> -%% Ref = make_ref(), -%% Consumer ! {get_heartbeats, Ref}, -%% Resp = receive -%% {hearthbeats, Ref, HeartBeats} -> -%% HeartBeats -%% after ?TIMEOUT -> -%% timeout -%% end, -%% ?assertNotEqual(timeout, Resp), -%% Resp. - -clear_rows({Consumer, _}) -> - Ref = make_ref(), - Consumer ! {reset, Ref}, - Resp = receive - {ok, Ref} -> - ok - after ?TIMEOUT -> - timeout - end, - ?assertNotEqual(timeout, Resp), - Resp. - -stop_consumer({Consumer, _}) -> - Ref = make_ref(), - Consumer ! {stop, Ref}, - Resp = receive - {ok, Ref} -> - ok - after ?TIMEOUT -> - timeout - end, - ?assertNotEqual(timeout, Resp), - Resp. - -pause({Consumer, _}) -> - Ref = make_ref(), - Consumer ! {pause, Ref}, - Resp = receive - {paused, Ref} -> - ok - after ?TIMEOUT -> - timeout - end, - ?assertNotEqual(timeout, Resp), - Resp. - -unpause({Consumer, _}) -> - Ref = make_ref(), - Consumer ! {continue, Ref}, - Resp = receive - {ok, Ref} -> - ok - after ?TIMEOUT -> - timeout - end, - ?assertNotEqual(timeout, Resp), - Resp. - -wait_finished({_, ConsumerRef}) -> - receive - {consumer_finished, Rows, LastSeq} -> - {Rows, LastSeq}; - {'DOWN', ConsumerRef, _, _, Msg} when Msg == normal; Msg == ok -> - ok; - {'DOWN', ConsumerRef, _, _, Msg} -> - erlang:error({consumer_died, [ - {module, ?MODULE}, - {line, ?LINE}, - {value, Msg} - ]}) - after ?TIMEOUT -> - erlang:error({consumer_died, [ - {module, ?MODULE}, - {line, ?LINE}, - {value, timeout} - ]}) - end. - - -reset_row_notifications() -> - receive - row -> - reset_row_notifications() - after 0 -> - ok - end. - - -wait_row_notifications(N) -> - receive - row when N == 1 -> - ok; - row when N > 1 -> - wait_row_notifications(N - 1) - after ?TIMEOUT -> - timeout - end. - - -spawn_consumer(DbName, ChangesArgs0, Req) -> - Parent = self(), - spawn_monitor(fun() -> - put(heartbeat_count, 0), - Callback = fun - ({change, {Change}, _}, _, Acc) -> - Id = couch_util:get_value(<<"id">>, Change), - Seq = couch_util:get_value(<<"seq">>, Change), - Del = couch_util:get_value(<<"deleted">>, Change, false), - Doc = couch_util:get_value(doc, Change, nil), - Parent ! row, - [#row{id = Id, seq = Seq, deleted = Del, doc = Doc} | Acc]; - ({stop, LastSeq}, _, Acc) -> - Parent ! {consumer_finished, lists:reverse(Acc), LastSeq}, - stop_loop(Parent, Acc); - (timeout, _, Acc) -> - put(heartbeat_count, get(heartbeat_count) + 1), - maybe_pause(Parent, Acc); - (_, _, Acc) -> - maybe_pause(Parent, Acc) - end, - {ok, Db} = couch_db:open_int(DbName, []), - ChangesArgs = case (ChangesArgs0#changes_args.timeout =:= undefined) - andalso (ChangesArgs0#changes_args.heartbeat =:= undefined) of - true -> - ChangesArgs0#changes_args{timeout = 1000, heartbeat = 100}; - false -> - ChangesArgs0 - end, - FeedFun = couch_changes:handle_db_changes(ChangesArgs, Req, Db), - try - FeedFun({Callback, []}) - catch - throw:{stop, _} -> ok; - _:Error -> exit(Error) - after - couch_db:close(Db) - end - end). - -maybe_pause(Parent, Acc) -> - receive - {get_rows, Ref} -> - Parent ! {rows, Ref, lists:reverse(Acc)}, - maybe_pause(Parent, Acc); - {get_heartbeats, Ref} -> - Parent ! {hearthbeats, Ref, get(heartbeat_count)}, - maybe_pause(Parent, Acc); - {reset, Ref} -> - Parent ! {ok, Ref}, - maybe_pause(Parent, []); - {pause, Ref} -> - Parent ! {paused, Ref}, - pause_loop(Parent, Acc); - {stop, Ref} -> - Parent ! {ok, Ref}, - throw({stop, Acc}); - V when V /= updated -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {value, V}, - {reason, "Received unexpected message"}]}) - after 0 -> - Acc - end. - -pause_loop(Parent, Acc) -> - receive - {stop, Ref} -> - Parent ! {ok, Ref}, - throw({stop, Acc}); - {reset, Ref} -> - Parent ! {ok, Ref}, - pause_loop(Parent, []); - {continue, Ref} -> - Parent ! {ok, Ref}, - Acc; - {get_rows, Ref} -> - Parent ! {rows, Ref, lists:reverse(Acc)}, - pause_loop(Parent, Acc) - end. - -stop_loop(Parent, Acc) -> - receive - {get_rows, Ref} -> - Parent ! {rows, Ref, lists:reverse(Acc)}, - stop_loop(Parent, Acc); - {stop, Ref} -> - Parent ! {ok, Ref}, - Acc - end. - -create_db(DbName) -> - couch_db:create(DbName, [?ADMIN_CTX, overwrite]). - -delete_db(DbName) -> - couch_server:delete(DbName, [?ADMIN_CTX]). diff --git a/src/couch/test/couch_compress_tests.erl b/src/couch/test/couch_compress_tests.erl deleted file mode 100644 index addb9a0e2..000000000 --- a/src/couch/test/couch_compress_tests.erl +++ /dev/null @@ -1,85 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_compress_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TERM, {[{a, 1}, {b, 2}, {c, 3}, {d, 4}, {e, 5}]}). --define(NONE, <<131,104,1,108,0,0,0,5,104,2,100,0,1,97,97,1, - 104,2,100,0,1,98,97,2,104,2,100,0,1,99,97,3,104,2,100,0, - 1,100,97,4,104,2,100,0,1,101,97,5,106>>). --define(DEFLATE, <<131,80,0,0,0,48,120,218,203,96,204,97,96, - 96,96,205,96,74,97,96,76,76,100,4,211,73,137,76,96,58,57, - 145,25,76,167,36,178,128,233,212,68,214,44,0,212,169,9,51>>). --define(SNAPPY, <<1,49,64,131,104,1,108,0,0,0,5,104,2,100,0, - 1,97,97,1,104,1,8,8,98,97,2,5,8,8,99,97,3,5,8,44,100,97, - 4,104,2,100,0,1,101,97,5,106>>). --define(CORRUPT, <<2,12,85,06>>). - - -compress_test_() -> - [ - ?_assertEqual(?NONE, couch_compress:compress(?TERM, none)), - ?_assertEqual(?DEFLATE, couch_compress:compress(?TERM, {deflate, 9})), - ?_assertEqual(?SNAPPY, couch_compress:compress(?TERM, snappy)) - ]. - -decompress_test_() -> - [ - ?_assertEqual(?TERM, couch_compress:decompress(?NONE)), - ?_assertEqual(?TERM, couch_compress:decompress(?DEFLATE)), - ?_assertEqual(?TERM, couch_compress:decompress(?SNAPPY)), - ?_assertError(invalid_compression, couch_compress:decompress(?CORRUPT)) - ]. - -recompress_test_() -> - [ - ?_assertEqual(?DEFLATE, couch_compress:compress(?NONE, {deflate, 9})), - ?_assertEqual(?SNAPPY, couch_compress:compress(?NONE, snappy)), - ?_assertEqual(?NONE, couch_compress:compress(?DEFLATE, none)), - ?_assertEqual(?SNAPPY, couch_compress:compress(?DEFLATE, snappy)), - ?_assertEqual(?NONE, couch_compress:compress(?SNAPPY, none)), - ?_assertEqual(?DEFLATE, couch_compress:compress(?SNAPPY, {deflate, 9})) - ]. - -is_compressed_test_() -> - [ - ?_assert(couch_compress:is_compressed(?NONE, none)), - ?_assert(couch_compress:is_compressed(?DEFLATE, {deflate, 9})), - ?_assert(couch_compress:is_compressed(?SNAPPY, snappy)), - ?_assertNot(couch_compress:is_compressed(?NONE, {deflate, 0})), - ?_assertNot(couch_compress:is_compressed(?NONE, {deflate, 9})), - ?_assertNot(couch_compress:is_compressed(?NONE, snappy)), - ?_assertNot(couch_compress:is_compressed(?DEFLATE, none)), - ?_assertNot(couch_compress:is_compressed(?DEFLATE, snappy)), - ?_assertNot(couch_compress:is_compressed(?SNAPPY, none)), - ?_assertNot(couch_compress:is_compressed(?SNAPPY, {deflate, 9})), - ?_assertError(invalid_compression, - couch_compress:is_compressed(?CORRUPT, none)), - ?_assertError(invalid_compression, - couch_compress:is_compressed(?CORRUPT, {deflate, 9})), - ?_assertError(invalid_compression, - couch_compress:is_compressed(?CORRUPT, snappy)) - ]. - -uncompressed_size_test_() -> - [ - ?_assertEqual(49, couch_compress:uncompressed_size(?NONE)), - ?_assertEqual(49, couch_compress:uncompressed_size(?DEFLATE)), - ?_assertEqual(49, couch_compress:uncompressed_size(?SNAPPY)), - ?_assertEqual(5, couch_compress:uncompressed_size( - couch_compress:compress(x, {deflate, 9}))), - ?_assertError(invalid_compression, - couch_compress:uncompressed_size(?CORRUPT)) - ]. diff --git a/src/couch/test/couch_db_doc_tests.erl b/src/couch/test/couch_db_doc_tests.erl deleted file mode 100644 index cdcf81d15..000000000 --- a/src/couch/test/couch_db_doc_tests.erl +++ /dev/null @@ -1,113 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_db_doc_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - -start() -> - test_util:start_couch([ioq]). - - -setup() -> - DbName = ?tempdb(), - config:set("couchdb", "stem_interactive_updates", "false", false), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - couch_db:close(Db), - DbName. - - -teardown(DbName) -> - ok = couch_server:delete(DbName, [?ADMIN_CTX]), - ok. - - -couch_db_doc_test_() -> - { - "CouchDB doc tests", - { - setup, - fun start/0, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_truncate_number_of_revisions/1, - fun should_raise_bad_request_on_invalid_rev/1 - ] - } - } - }. - - -should_truncate_number_of_revisions(DbName) -> - DocId = <<"foo">>, - Db = open_db(DbName), - couch_db:set_revs_limit(Db, 5), - Rev = create_doc(Db, DocId), - Rev10 = add_revisions(Db, DocId, Rev, 10), - {ok, [{ok, #doc{revs = {11, Revs}}}]} = open_doc_rev(Db, DocId, Rev10), - ?_assertEqual(5, length(Revs)). - - -should_raise_bad_request_on_invalid_rev(DbName) -> - DocId = <<"foo">>, - InvalidRev1 = <<"foo">>, - InvalidRev2 = <<"a-foo">>, - InvalidRev3 = <<"1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx">>, - Expect = {bad_request, <<"Invalid rev format">>}, - Db = open_db(DbName), - create_doc(Db, DocId), - [ - {InvalidRev1, - ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev1, 1))}, - {InvalidRev2, - ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev2, 1))}, - {InvalidRev3, - ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev3, 1))} - ]. - - -open_db(DbName) -> - {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]), - Db. - - -create_doc(Db, DocId) -> - add_revision(Db, DocId, undefined). - - -open_doc_rev(Db0, DocId, Rev) -> - {ok, Db} = couch_db:reopen(Db0), - couch_db:open_doc_revs(Db, DocId, [couch_doc:parse_rev(Rev)], []). - - -add_revision(Db, DocId, undefined) -> - add_revision(Db, DocId, []); -add_revision(Db, DocId, Rev) when is_binary(Rev) -> - add_revision(Db, DocId, [{<<"_rev">>, Rev}]); -add_revision(Db0, DocId, Rev) -> - {ok, Db} = couch_db:reopen(Db0), - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, DocId}, - {<<"value">>, DocId} - ] ++ Rev}), - {ok, NewRev} = couch_db:update_doc(Db, Doc, []), - {ok, _} = couch_db:ensure_full_commit(Db), - couch_doc:rev_to_str(NewRev). - - -add_revisions(Db, DocId, Rev, N) -> - lists:foldl(fun(_, OldRev) -> - add_revision(Db, DocId, OldRev) - end, Rev, lists:seq(1, N)). diff --git a/src/couch/test/couch_db_mpr_tests.erl b/src/couch/test/couch_db_mpr_tests.erl deleted file mode 100644 index bb97c66d7..000000000 --- a/src/couch/test/couch_db_mpr_tests.erl +++ /dev/null @@ -1,137 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_db_mpr_tests). - - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 30). - --define(USER, "couch_db_admin"). --define(PASS, "pass"). --define(AUTH, {basic_auth, {?USER, ?PASS}}). --define(CONTENT_JSON, {"Content-Type", "application/json"}). --define(JSON_BODY, "{\"foo\": \"bar\"}"). --define(CONTENT_MULTI_RELATED, - {"Content-Type", "multipart/related;boundary=\"bound\""}). - - -setup() -> - Hashed = couch_passwords:hash_admin_password(?PASS), - ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), - TmpDb = ?tempdb(), - Addr = config:get("httpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(couch_httpd, port), - Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]), - Url. - - -teardown(Url) -> - catch delete_db(Url), - ok = config:delete("admins", ?USER, _Persist=false). - - -create_db(Url) -> - {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), - ?assert(Status =:= 201 orelse Status =:= 202). - - -delete_db(Url) -> - {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). - - -create_doc(Url, Id, Body, Type) -> - test_request:put(Url ++ "/" ++ Id, [Type, ?AUTH], Body). - - -delete_doc(Url, Id, Rev) -> - test_request:delete(Url ++ "/" ++ Id ++ "?rev=" ++ ?b2l(Rev)). - - -couch_db_mpr_test_() -> - { - "multi-part attachment tests", - { - setup, - fun test_util:start_couch/0, - fun test_util:stop_couch/1, - { - foreach, - fun setup/0, - fun teardown/1, - [ - fun recreate_with_mpr/1 - ] - } - } - }. - - -recreate_with_mpr(Url) -> - {timeout, ?TIMEOUT, ?_test(begin - DocId1 = "foo", - DocId2 = "bar", - - create_db(Url), - create_and_delete_doc(Url, DocId1), - Rev1 = create_with_mpr(Url, DocId1), - delete_db(Url), - - create_db(Url), - create_and_delete_doc(Url, DocId1), - % We create a second unrelated doc to change the - % position on disk where the attachment is written - % so that we can assert that the position on disk - % is not included when calculating a revision. - create_and_delete_doc(Url, DocId2), - Rev2 = create_with_mpr(Url, DocId1), - delete_db(Url), - - ?assertEqual(Rev1, Rev2) - end)}. - - -create_and_delete_doc(Url, DocId) -> - {ok, _, _, Resp} = create_doc(Url, DocId, ?JSON_BODY, ?CONTENT_JSON), - {Props} = ?JSON_DECODE(Resp), - Rev = couch_util:get_value(<<"rev">>, Props, undefined), - ?assert(is_binary(Rev)), - {ok, _, _, _} = delete_doc(Url, DocId, Rev). - - -create_with_mpr(Url, DocId) -> - {ok, _, _, Resp} = create_doc(Url, DocId, mpr(), ?CONTENT_MULTI_RELATED), - {Props} = ?JSON_DECODE(Resp), - Rev = couch_util:get_value(<<"rev">>, Props, undefined), - ?assert(is_binary(Rev)), - Rev. - - -mpr() -> - lists:concat([ - "--bound\r\n", - "Content-Type: application/json\r\n\r\n", - "{", - "\"body\":\"stuff\"," - "\"_attachments\":", - "{\"foo.txt\":{", - "\"follows\":true,", - "\"content_type\":\"text/plain\"," - "\"length\":21", - "}}" - "}", - "\r\n--bound\r\n\r\n", - "this is 21 chars long", - "\r\n--bound--epilogue" - ]). diff --git a/src/couch/test/couch_db_plugin_tests.erl b/src/couch/test/couch_db_plugin_tests.erl deleted file mode 100644 index 93551adbc..000000000 --- a/src/couch/test/couch_db_plugin_tests.erl +++ /dev/null @@ -1,205 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_db_plugin_tests). - --export([ - validate_dbname/2, - before_doc_update/3, - after_doc_read/2, - validate_docid/1, - check_is_admin/1, - on_delete/2 -]). - --export([ %% couch_epi_plugin behaviour - app/0, - providers/0, - services/0, - data_providers/0, - data_subscriptions/0, - processes/0, - notify/3 -]). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - -%% couch_epi_plugin behaviour - -app() -> test_app. -providers() -> [{couch_db, ?MODULE}]. -services() -> []. -data_providers() -> []. -data_subscriptions() -> []. -processes() -> []. -notify(_, _, _) -> ok. -fake_db() -> test_util:fake_db([]). - -setup() -> - couch_tests:setup([ - couch_epi_dispatch:dispatch(chttpd, ?MODULE) - ]). - -teardown(Ctx) -> - couch_tests:teardown(Ctx). - -validate_dbname({true, _Db}, _) -> {decided, true}; -validate_dbname({false, _Db}, _) -> {decided, false}; -validate_dbname({fail, _Db}, _) -> throw(validate_dbname); -validate_dbname({pass, _Db}, _) -> no_decision. - -before_doc_update({fail, _Doc}, _Db, interactive_edit) -> throw(before_doc_update); -before_doc_update({true, Doc}, Db, interactive_edit) -> [{true, [before_doc_update|Doc]}, Db, interactive_edit]; -before_doc_update({false, Doc}, Db, interactive_edit) -> [{false, Doc}, Db, interactive_edit]. - -after_doc_read({fail, _Doc}, _Db) -> throw(after_doc_read); -after_doc_read({true, Doc}, Db) -> [{true, [after_doc_read|Doc]}, Db]; -after_doc_read({false, Doc}, Db) -> [{false, Doc}, Db]. - -validate_docid({true, _Id}) -> true; -validate_docid({false, _Id}) -> false; -validate_docid({fail, _Id}) -> throw(validate_docid). - -check_is_admin({true, _Db}) -> true; -check_is_admin({false, _Db}) -> false; -check_is_admin({fail, _Db}) -> throw(check_is_admin). - -on_delete(true, _Opts) -> true; -on_delete(false, _Opts) -> false; -on_delete(fail, _Opts) -> throw(on_delete). - -callback_test_() -> - { - "callback tests", - { - setup, fun setup/0, fun teardown/1, - [ - {"validate_dbname_match", fun validate_dbname_match/0}, - {"validate_dbname_no_match", fun validate_dbname_no_match/0}, - {"validate_dbname_throw", fun validate_dbname_throw/0}, - {"validate_dbname_pass", fun validate_dbname_pass/0}, - - {"before_doc_update_match", fun before_doc_update_match/0}, - {"before_doc_update_no_match", fun before_doc_update_no_match/0}, - {"before_doc_update_throw", fun before_doc_update_throw/0}, - - {"after_doc_read_match", fun after_doc_read_match/0}, - {"after_doc_read_no_match", fun after_doc_read_no_match/0}, - {"after_doc_read_throw", fun after_doc_read_throw/0}, - - {"validate_docid_match", fun validate_docid_match/0}, - {"validate_docid_no_match", fun validate_docid_no_match/0}, - {"validate_docid_throw", fun validate_docid_throw/0}, - - {"check_is_admin_match", fun check_is_admin_match/0}, - {"check_is_admin_no_match", fun check_is_admin_no_match/0}, - {"check_is_admin_throw", fun check_is_admin_throw/0}, - - {"on_delete_match", fun on_delete_match/0}, - {"on_delete_no_match", fun on_delete_no_match/0}, - {"on_delete_throw", fun on_delete_throw/0} - ] - } - }. - - -validate_dbname_match() -> - ?assert(couch_db_plugin:validate_dbname( - {true, [db]}, db, fun(_, _) -> pass end)). - -validate_dbname_no_match() -> - ?assertNot(couch_db_plugin:validate_dbname( - {false, [db]}, db, fun(_, _) -> pass end)). - -validate_dbname_throw() -> - ?assertThrow( - validate_dbname, - couch_db_plugin:validate_dbname( - {fail, [db]}, db, fun(_, _) -> pass end)). - -validate_dbname_pass() -> - ?assertEqual(pass, couch_db_plugin:validate_dbname( - {pass, [db]}, db, fun(_, _) -> pass end)). - -before_doc_update_match() -> - ?assertMatch( - {true, [before_doc_update, doc]}, - couch_db_plugin:before_doc_update( - fake_db(), {true, [doc]}, interactive_edit)). - -before_doc_update_no_match() -> - ?assertMatch( - {false, [doc]}, - couch_db_plugin:before_doc_update( - fake_db(), {false, [doc]}, interactive_edit)). - -before_doc_update_throw() -> - ?assertThrow( - before_doc_update, - couch_db_plugin:before_doc_update( - fake_db(), {fail, [doc]}, interactive_edit)). - - -after_doc_read_match() -> - ?assertMatch( - {true, [after_doc_read, doc]}, - couch_db_plugin:after_doc_read(fake_db(), {true, [doc]})). - -after_doc_read_no_match() -> - ?assertMatch( - {false, [doc]}, - couch_db_plugin:after_doc_read(fake_db(), {false, [doc]})). - -after_doc_read_throw() -> - ?assertThrow( - after_doc_read, - couch_db_plugin:after_doc_read(fake_db(), {fail, [doc]})). - - -validate_docid_match() -> - ?assert(couch_db_plugin:validate_docid({true, [doc]})). - -validate_docid_no_match() -> - ?assertNot(couch_db_plugin:validate_docid({false, [doc]})). - -validate_docid_throw() -> - ?assertThrow( - validate_docid, - couch_db_plugin:validate_docid({fail, [doc]})). - - -check_is_admin_match() -> - ?assert(couch_db_plugin:check_is_admin({true, [db]})). - -check_is_admin_no_match() -> - ?assertNot(couch_db_plugin:check_is_admin({false, [db]})). - -check_is_admin_throw() -> - ?assertThrow( - check_is_admin, - couch_db_plugin:check_is_admin({fail, [db]})). - -on_delete_match() -> - ?assertMatch( - [true], - couch_db_plugin:on_delete(true, [])). - -on_delete_no_match() -> - ?assertMatch( - [false], - couch_db_plugin:on_delete(false, [])). - -on_delete_throw() -> - ?assertThrow( - on_delete, - couch_db_plugin:on_delete(fail, [])). diff --git a/src/couch/test/couch_db_props_upgrade_tests.erl b/src/couch/test/couch_db_props_upgrade_tests.erl deleted file mode 100644 index 40ad283cf..000000000 --- a/src/couch/test/couch_db_props_upgrade_tests.erl +++ /dev/null @@ -1,83 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_db_props_upgrade_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include_lib("couch_mrview/include/couch_mrview.hrl"). - - -setup() -> - DbName = <<"test">>, - DbFileName = "test.couch", - OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]), - - DbDir = config:get("couchdb", "database_dir"), - NewDbFilePath = filename:join([DbDir, DbFileName]), - - file:delete(NewDbFilePath), - {ok, _} = file:copy(OldDbFilePath, NewDbFilePath), - - DbName. - - -teardown(DbName) when is_binary(DbName) -> - couch_server:delete(DbName, [?ADMIN_CTX]), - ok. - - -old_db_info_test_() -> - { - "Old database versions work", - { - setup, - fun test_util:start_couch/0, - fun test_util:stop_couch/1, - { - foreach, - fun setup/0, - fun teardown/1, - [ - fun can_get_props/1, - fun can_get_db_info/1, - fun can_compact_db/1 - ] - } - } - }. - - -can_get_props(DbName) -> - ?_test(begin - {ok, Db} = couch_db:open_int(DbName, []), - Props = couch_db_engine:get_props(Db), - ?assert(is_list(Props)) - end). - - -can_get_db_info(DbName) -> - ?_test(begin - {ok, Db} = couch_db:open_int(DbName, []), - {ok, Info} = couch_db:get_db_info(Db), - Props = couch_util:get_value(props, Info), - ?assertEqual({[]}, Props) - end). - - -can_compact_db(DbName) -> - ?_test(begin - couch_util:with_db(DbName, fun(Db) -> - couch_db:start_compact(Db), - couch_db:wait_for_compaction(Db) - end) - end). diff --git a/src/couch/test/couch_db_split_tests.erl b/src/couch/test/couch_db_split_tests.erl deleted file mode 100644 index 7d2bb4006..000000000 --- a/src/couch/test/couch_db_split_tests.erl +++ /dev/null @@ -1,302 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_db_split_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(RINGTOP, 2 bsl 31). - - -setup() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = couch_db:close(Db), - DbName. - - -teardown(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - FilePath = couch_db:get_filepath(Db), - ok = couch_db:close(Db), - ok = file:delete(FilePath). - - -split_test_() -> - Cases = [ - {"Should split an empty shard", 0, 2}, - {"Should split shard in half", 100, 2}, - {"Should split shard in three", 99, 3}, - {"Should split shard in four", 100, 4} - ], - { - setup, - fun test_util:start_couch/0, fun test_util:stop/1, - [ - { - foreachx, - fun(_) -> setup() end, fun(_, St) -> teardown(St) end, - [{Case, fun should_split_shard/2} || Case <- Cases] - }, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_fail_on_missing_source/1, - fun should_fail_on_existing_target/1, - fun should_fail_on_invalid_target_name/1, - fun should_crash_on_invalid_tmap/1 - ] - } - ] - }. - - -should_split_shard({Desc, TotalDocs, Q}, DbName) -> - {ok, ExpectSeq} = create_docs(DbName, TotalDocs), - Ranges = make_ranges(Q), - TMap = make_targets(Ranges), - DocsPerRange = TotalDocs div Q, - PickFun = make_pickfun(DocsPerRange), - {Desc, ?_test(begin - {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun), - ?assertEqual(ExpectSeq, UpdateSeq), - maps:map(fun(Range, Name) -> - {ok, Db} = couch_db:open_int(Name, []), - FilePath = couch_db:get_filepath(Db), - %% target actually exists - ?assertMatch({ok, _}, file:read_file_info(FilePath)), - %% target's update seq is the same as source's update seq - USeq = couch_db:get_update_seq(Db), - ?assertEqual(ExpectSeq, USeq), - %% target shard has all the expected in its range docs - {ok, DocsInShard} = couch_db:fold_docs(Db, fun(FDI, Acc) -> - DocId = FDI#full_doc_info.id, - ExpectedRange = PickFun(DocId, Ranges, undefined), - ?assertEqual(ExpectedRange, Range), - {ok, Acc + 1} - end, 0), - ?assertEqual(DocsPerRange, DocsInShard), - ok = couch_db:close(Db), - ok = file:delete(FilePath) - end, TMap) - end)}. - - -should_fail_on_missing_source(_DbName) -> - DbName = ?tempdb(), - Ranges = make_ranges(2), - TMap = make_targets(Ranges), - Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3), - ?_assertEqual({error, missing_source}, Response). - - -should_fail_on_existing_target(DbName) -> - Ranges = make_ranges(2), - TMap = maps:map(fun(_, _) -> DbName end, make_targets(Ranges)), - Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3), - ?_assertMatch({error, {target_create_error, DbName, eexist}}, Response). - - -should_fail_on_invalid_target_name(DbName) -> - Ranges = make_ranges(2), - TMap = maps:map(fun([B, _], _) -> - iolist_to_binary(["_$", couch_util:to_hex(<>)]) - end, make_targets(Ranges)), - Expect = {error, {target_create_error, <<"_$00000000">>, - {illegal_database_name, <<"_$00000000">>}}}, - Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3), - ?_assertMatch(Expect, Response). - - -should_crash_on_invalid_tmap(DbName) -> - Ranges = make_ranges(1), - TMap = make_targets(Ranges), - ?_assertError(function_clause, - couch_db_split:split(DbName, TMap, fun fake_pickfun/3)). - - -copy_local_docs_test_() -> - Cases = [ - {"Should work with no docs", 0, 2}, - {"Should copy local docs after split in two", 100, 2}, - {"Should copy local docs after split in three", 99, 3}, - {"Should copy local docs after split in four", 100, 4} - ], - { - setup, - fun test_util:start_couch/0, fun test_util:stop/1, - [ - { - foreachx, - fun(_) -> setup() end, fun(_, St) -> teardown(St) end, - [{Case, fun should_copy_local_docs/2} || Case <- Cases] - }, - {"Should return error on missing source", - fun should_fail_copy_local_on_missing_source/0} - ] - }. - - -should_copy_local_docs({Desc, TotalDocs, Q}, DbName) -> - {ok, ExpectSeq} = create_docs(DbName, TotalDocs), - Ranges = make_ranges(Q), - TMap = make_targets(Ranges), - DocsPerRange = TotalDocs div Q, - PickFun = make_pickfun(DocsPerRange), - {Desc, ?_test(begin - {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun), - ?assertEqual(ExpectSeq, UpdateSeq), - Response = couch_db_split:copy_local_docs(DbName, TMap, PickFun), - ?assertEqual(ok, Response), - maps:map(fun(Range, Name) -> - {ok, Db} = couch_db:open_int(Name, []), - FilePath = couch_db:get_filepath(Db), - %% target shard has all the expected in its range docs - {ok, DocsInShard} = couch_db:fold_local_docs(Db, fun(Doc, Acc) -> - DocId = Doc#doc.id, - ExpectedRange = PickFun(DocId, Ranges, undefined), - ?assertEqual(ExpectedRange, Range), - {ok, Acc + 1} - end, 0, []), - ?assertEqual(DocsPerRange, DocsInShard), - ok = couch_db:close(Db), - ok = file:delete(FilePath) - end, TMap) - end)}. - - -should_fail_copy_local_on_missing_source() -> - DbName = ?tempdb(), - Ranges = make_ranges(2), - TMap = make_targets(Ranges), - PickFun = fun fake_pickfun/3, - Response = couch_db_split:copy_local_docs(DbName, TMap, PickFun), - ?assertEqual({error, missing_source}, Response). - - -cleanup_target_test_() -> - { - setup, - fun test_util:start_couch/0, fun test_util:stop/1, - [ - { - setup, - fun setup/0, fun teardown/1, - fun should_delete_existing_targets/1 - }, - {"Should return error on missing source", - fun should_fail_cleanup_target_on_missing_source/0} - ] - }. - - -should_delete_existing_targets(SourceName) -> - {ok, ExpectSeq} = create_docs(SourceName, 100), - Ranges = make_ranges(2), - TMap = make_targets(Ranges), - PickFun = make_pickfun(50), - ?_test(begin - {ok, UpdateSeq} = couch_db_split:split(SourceName, TMap, PickFun), - ?assertEqual(ExpectSeq, UpdateSeq), - maps:map(fun(_Range, TargetName) -> - FilePath = couch_util:with_db(TargetName, fun(Db) -> - couch_db:get_filepath(Db) - end), - ?assertMatch({ok, _}, file:read_file_info(FilePath)), - Response = couch_db_split:cleanup_target(SourceName, TargetName), - ?assertEqual(ok, Response), - ?assertEqual({error, enoent}, file:read_file_info(FilePath)) - end, TMap) - end). - - -should_fail_cleanup_target_on_missing_source() -> - SourceName = ?tempdb(), - TargetName = ?tempdb(), - Response = couch_db_split:cleanup_target(SourceName, TargetName), - ?assertEqual({error, missing_source}, Response). - - -make_pickfun(DocsPerRange) -> - fun(DocId, Ranges, _HashFun) -> - Id = docid_to_integer(DocId), - case {Id div DocsPerRange, Id rem DocsPerRange} of - {N, 0} -> - lists:nth(N, Ranges); - {N, _} -> - lists:nth(N + 1, Ranges) - end - end. - - -fake_pickfun(_, Ranges, _) -> - hd(Ranges). - - -make_targets([]) -> - maps:new(); -make_targets(Ranges) -> - Targets = lists:map(fun(Range) -> - {Range, ?tempdb()} - end, Ranges), - maps:from_list(Targets). - - -make_ranges(Q) when Q > 0 -> - Incr = (2 bsl 31) div Q, - lists:map(fun - (End) when End >= ?RINGTOP - 1 -> - [End - Incr, ?RINGTOP - 1]; - (End) -> - [End - Incr, End - 1] - end, lists:seq(Incr, ?RINGTOP, Incr)); -make_ranges(_) -> - []. - - -create_docs(DbName, 0) -> - couch_util:with_db(DbName, fun(Db) -> - UpdateSeq = couch_db:get_update_seq(Db), - {ok, UpdateSeq} - end); -create_docs(DbName, DocNum) -> - Docs = lists:foldl(fun(I, Acc) -> - [create_doc(I), create_local_doc(I) | Acc] - end, [], lists:seq(DocNum, 1, -1)), - couch_util:with_db(DbName, fun(Db) -> - {ok, _Result} = couch_db:update_docs(Db, Docs), - {ok, _StartTime} = couch_db:ensure_full_commit(Db), - {ok, Db1} = couch_db:reopen(Db), - UpdateSeq = couch_db:get_update_seq(Db1), - {ok, UpdateSeq} - end). - - -create_doc(I) -> - create_prefix_id_doc(I, ""). - - -create_local_doc(I) -> - create_prefix_id_doc(I, "_local/"). - - -create_prefix_id_doc(I, Prefix) -> - Id = iolist_to_binary(io_lib:format(Prefix ++ "~3..0B", [I])), - couch_doc:from_json_obj({[{<<"_id">>, Id}, {<<"value">>, I}]}). - - -docid_to_integer(<<"_local/", DocId/binary>>) -> - docid_to_integer(DocId); -docid_to_integer(DocId) -> - list_to_integer(binary_to_list(DocId)). diff --git a/src/couch/test/couch_db_tests.erl b/src/couch/test/couch_db_tests.erl deleted file mode 100644 index d64f7c640..000000000 --- a/src/couch/test/couch_db_tests.erl +++ /dev/null @@ -1,171 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_db_tests). - --include_lib("couch/include/couch_eunit.hrl"). - --define(TIMEOUT, 120). - - - -create_delete_db_test_()-> - { - "Database create/delete tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun() -> ?tempdb() end, - [ - fun should_create_db/1, - fun should_delete_db/1 - ] - } - } - }. - -create_delete_multiple_dbs_test_()-> - { - "Multiple database create/delete tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun() -> [?tempdb() || _ <- lists:seq(1, 6)] end, - [ - fun should_create_multiple_dbs/1, - fun should_delete_multiple_dbs/1 - ] - } - } - }. - -create_delete_database_continuously_test_() -> - { - "Continious database create/delete tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreachx, - fun(_) -> ?tempdb() end, - [ - {10, fun should_create_delete_database_continuously/2}, - {100, fun should_create_delete_database_continuously/2} - ] - } - } - }. - -open_db_test_()-> - { - "Database open tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun() -> ?tempdb() end, - [ - fun should_create_db_if_missing/1, - fun should_open_db_if_exists/1 - ] - } - } - }. - - -should_create_db(DbName) -> - ?_test(begin - {ok, Before} = couch_server:all_databases(), - ?assertNot(lists:member(DbName, Before)), - ?assert(create_db(DbName)), - {ok, After} = couch_server:all_databases(), - ?assert(lists:member(DbName, After)) - end). - -should_delete_db(DbName) -> - ?_test(begin - ?assert(create_db(DbName)), - {ok, Before} = couch_server:all_databases(), - ?assert(lists:member(DbName, Before)), - couch_server:delete(DbName, []), - {ok, After} = couch_server:all_databases(), - ?assertNot(lists:member(DbName, After)) - end). - -should_create_multiple_dbs(DbNames) -> - ?_test(begin - gen_server:call(couch_server, {set_max_dbs_open, 3}), - {ok, Before} = couch_server:all_databases(), - [?assertNot(lists:member(DbName, Before)) || DbName <- DbNames], - [?assert(create_db(DbName)) || DbName <- DbNames], - {ok, After} = couch_server:all_databases(), - [?assert(lists:member(DbName, After)) || DbName <- DbNames] - end). - -should_delete_multiple_dbs(DbNames) -> - ?_test(begin - [?assert(create_db(DbName)) || DbName <- DbNames], - {ok, Before} = couch_server:all_databases(), - [?assert(lists:member(DbName, Before)) || DbName <- DbNames], - [?assert(delete_db(DbName)) || DbName <- DbNames], - {ok, After} = couch_server:all_databases(), - [?assertNot(lists:member(DbName, After)) || DbName <- DbNames] - end). - -should_create_delete_database_continuously(Times, DbName) -> - {lists:flatten(io_lib:format("~b times", [Times])), - {timeout, ?TIMEOUT, ?_test(begin - ?assert(create_db(DbName)), - lists:foreach(fun(_) -> - ?assert(delete_db(DbName)), - ?assert(create_db(DbName)) - end, lists:seq(1, Times)) - end)}}. - -should_create_db_if_missing(DbName) -> - ?_test(begin - {ok, Before} = couch_server:all_databases(), - ?assertNot(lists:member(DbName, Before)), - {ok, Db} = couch_db:open(DbName, [{create_if_missing, true}]), - ok = couch_db:close(Db), - {ok, After} = couch_server:all_databases(), - ?assert(lists:member(DbName, After)) - end). - -should_open_db_if_exists(DbName) -> - ?_test(begin - ?assert(create_db(DbName)), - {ok, Before} = couch_server:all_databases(), - ?assert(lists:member(DbName, Before)), - {ok, Db} = couch_db:open(DbName, [{create_if_missing, true}]), - ok = couch_db:close(Db), - {ok, After} = couch_server:all_databases(), - ?assert(lists:member(DbName, After)) - end). - - -create_db(DbName) -> - create_db(DbName, []). - -create_db(DbName, Opts) -> - {ok, Db} = couch_db:create(DbName, Opts), - ok = couch_db:close(Db), - true. - -delete_db(DbName) -> - ok = couch_server:delete(DbName, []), - true. diff --git a/src/couch/test/couch_doc_json_tests.erl b/src/couch/test/couch_doc_json_tests.erl deleted file mode 100644 index 51f228900..000000000 --- a/src/couch/test/couch_doc_json_tests.erl +++ /dev/null @@ -1,493 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_doc_json_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - - -setup() -> - mock(couch_log), - mock(config), - mock(couch_db_plugin), - ok. - -teardown(_) -> - meck:unload(couch_log), - meck:unload(config), - meck:unload(couch_db_plugin), - ok. - -mock(couch_db_plugin) -> - ok = meck:new(couch_db_plugin, [passthrough]), - ok = meck:expect(couch_db_plugin, validate_docid, fun(_) -> false end), - ok; -mock(couch_log) -> - ok = meck:new(couch_log, [passthrough]), - ok = meck:expect(couch_log, debug, fun(_, _) -> ok end), - ok; -mock(config) -> - meck:new(config, [passthrough]), - meck:expect(config, get_integer, - fun("couchdb", "max_document_size", 4294967296) -> 1024 end), - meck:expect(config, get, fun(_, _) -> undefined end), - meck:expect(config, get, fun(_, _, Default) -> Default end), - ok. - - -json_doc_test_() -> - { - setup, - fun setup/0, fun teardown/1, - fun(_) -> - [{"Document from JSON", [ - from_json_with_dbname_error_cases(), - from_json_with_db_name_success_cases(), - from_json_success_cases(), - from_json_error_cases() - ]}, - {"Document to JSON", [ - to_json_success_cases() - ]}] - end - }. - -from_json_success_cases() -> - Cases = [ - { - {[]}, - #doc{}, - "Return an empty document for an empty JSON object." - }, - { - {[{<<"_id">>, <<"zing!">>}]}, - #doc{id = <<"zing!">>}, - "Parses document ids." - }, - { - {[{<<"_id">>, <<"_design/foo">>}]}, - #doc{id = <<"_design/foo">>}, - "_design/document ids." - }, - { - {[{<<"_id">>, <<"_local/bam">>}]}, - #doc{id = <<"_local/bam">>}, - "_local/document ids." - }, - { - {[{<<"_rev">>, <<"4-230234">>}]}, - #doc{revs = {4, [<<"230234">>]}}, - "_rev stored in revs." - }, - { - {[{<<"soap">>, 35}]}, - #doc{body = {[{<<"soap">>, 35}]}}, - "Non underscore prefixed fields stored in body." - }, - { - {[{<<"_attachments">>, {[ - {<<"my_attachment.fu">>, {[ - {<<"stub">>, true}, - {<<"content_type">>, <<"application/awesome">>}, - {<<"length">>, 45} - ]}}, - {<<"noahs_private_key.gpg">>, {[ - {<<"data">>, <<"SSBoYXZlIGEgcGV0IGZpc2gh">>}, - {<<"content_type">>, <<"application/pgp-signature">>} - ]}} - ]}}]}, - #doc{atts = [ - couch_att:new([ - {name, <<"my_attachment.fu">>}, - {data, stub}, - {type, <<"application/awesome">>}, - {att_len, 45}, - {disk_len, 45}, - {revpos, undefined} - ]), - couch_att:new([ - {name, <<"noahs_private_key.gpg">>}, - {data, <<"I have a pet fish!">>}, - {type, <<"application/pgp-signature">>}, - {att_len, 18}, - {disk_len, 18}, - {revpos, 0} - ]) - ]}, - "Attachments are parsed correctly." - }, - { - {[{<<"_deleted">>, true}]}, - #doc{deleted = true}, - "_deleted controls the deleted field." - }, - { - {[{<<"_deleted">>, false}]}, - #doc{}, - "{\"_deleted\": false} is ok." - }, - { - {[ - {<<"_revisions">>, - {[{<<"start">>, 4}, - {<<"ids">>, [<<"foo1">>, <<"phi3">>, <<"omega">>]}]}}, - {<<"_rev">>, <<"6-something">>} - ]}, - #doc{revs = {4, [<<"foo1">>, <<"phi3">>, <<"omega">>]}}, - "_revisions attribute are preferred to _rev." - }, - { - {[{<<"_revs_info">>, dropping}]}, - #doc{}, - "Drops _revs_info." - }, - { - {[{<<"_local_seq">>, dropping}]}, - #doc{}, - "Drops _local_seq." - }, - { - {[{<<"_conflicts">>, dropping}]}, - #doc{}, - "Drops _conflicts." - }, - { - {[{<<"_deleted_conflicts">>, dropping}]}, - #doc{}, - "Drops _deleted_conflicts." - } - ], - lists:map( - fun({EJson, Expect, Msg}) -> - {Msg, ?_assertMatch(Expect, couch_doc:from_json_obj_validate(EJson))} - end, - Cases). - -from_json_with_db_name_success_cases() -> - Cases = [ - { - {[]}, - <<"_dbs">>, - #doc{}, - "DbName _dbs is acceptable with no docid" - }, - { - {[{<<"_id">>, <<"zing!">>}]}, - <<"_dbs">>, - #doc{id = <<"zing!">>}, - "DbName _dbs is acceptable with a normal docid" - }, - { - {[{<<"_id">>, <<"_users">>}]}, - <<"_dbs">>, - #doc{id = <<"_users">>}, - "_dbs/_users is acceptable" - }, - { - {[{<<"_id">>, <<"_replicator">>}]}, - <<"_dbs">>, - #doc{id = <<"_replicator">>}, - "_dbs/_replicator is acceptable" - }, - { - {[{<<"_id">>, <<"_global_changes">>}]}, - <<"_dbs">>, - #doc{id = <<"_global_changes">>}, - "_dbs/_global_changes is acceptable" - } - ], - lists:map( - fun({EJson, DbName, Expect, Msg}) -> - {Msg, ?_assertMatch(Expect, couch_doc:from_json_obj_validate(EJson, DbName))} - end, - Cases). - -from_json_error_cases() -> - Cases = [ - { - [], - {bad_request, "Document must be a JSON object"}, - "arrays are invalid" - }, - { - 4, - {bad_request, "Document must be a JSON object"}, - "integers are invalid" - }, - { - true, - {bad_request, "Document must be a JSON object"}, - "literals are invalid" - }, - { - {[{<<"_id">>, {[{<<"foo">>, 5}]}}]}, - {illegal_docid, <<"Document id must be a string">>}, - "Document id must be a string." - }, - { - {[{<<"_id">>, <<"_random">>}]}, - {illegal_docid, - <<"Only reserved document ids may start with underscore.">>}, - "Disallow arbitrary underscore prefixed docids." - }, - { - {[{<<"_rev">>, 5}]}, - {bad_request, <<"Invalid rev format">>}, - "_rev must be a string" - }, - { - {[{<<"_rev">>, "foobar"}]}, - {bad_request, <<"Invalid rev format">>}, - "_rev must be %d-%s" - }, - { - {[{<<"_rev">>, "foo-bar"}]}, - "Error if _rev's integer expection is broken." - }, - { - {[{<<"_revisions">>, {[{<<"start">>, true}]}}]}, - {doc_validation, "_revisions.start isn't an integer."}, - "_revisions.start must be an integer." - }, - { - {[{<<"_revisions">>, {[{<<"start">>, 0}, {<<"ids">>, 5}]}}]}, - {doc_validation, "_revisions.ids isn't a array."}, - "_revions.ids must be a list." - }, - { - {[{<<"_revisions">>, {[{<<"start">>, 0}, {<<"ids">>, [5]}]}}]}, - {doc_validation, "RevId isn't a string"}, - "Revision ids must be strings." - }, - { - {[{<<"_revisions">>, {[{<<"start">>, 0}, - {<<"ids">>, [<<"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx">>]}]}}]}, - {doc_validation, "RevId isn't a valid hexadecimal"}, - "Revision ids must be a valid hex." - }, - { - {[{<<"_something">>, 5}]}, - {doc_validation, <<"Bad special document member: _something">>}, - "Underscore prefix fields are reserved." - }, - { - fun() -> - {[ - {<<"_id">>, <<"large_doc">>}, - {<<"x">> , << <<"x">> || _ <- lists:seq(1,1025) >>} - ]} - end, - {request_entity_too_large, <<"large_doc">>}, - "Document too large." - } - ], - - lists:map(fun - ({Fun, Expect, Msg}) when is_function(Fun, 0) -> - {Msg, - ?_assertThrow(Expect, couch_doc:from_json_obj_validate(Fun()))}; - ({EJson, Expect, Msg}) -> - {Msg, - ?_assertThrow(Expect, couch_doc:from_json_obj_validate(EJson))}; - ({EJson, Msg}) -> - {Msg, - ?_assertThrow(_, couch_doc:from_json_obj_validate(EJson))} - end, Cases). - -from_json_with_dbname_error_cases() -> - Cases = [ - { - {[{<<"_id">>, <<"_random">>}]}, - <<"_dbs">>, - {illegal_docid, - <<"Only reserved document ids may start with underscore.">>}, - "Disallow non-system-DB underscore prefixed docids in _dbs database." - }, - { - {[{<<"_id">>, <<"_random">>}]}, - <<"foobar">>, - {illegal_docid, - <<"Only reserved document ids may start with underscore.">>}, - "Disallow arbitrary underscore prefixed docids in regular database." - }, - { - {[{<<"_id">>, <<"_users">>}]}, - <<"foobar">>, - {illegal_docid, - <<"Only reserved document ids may start with underscore.">>}, - "Disallow system-DB docid _users in regular database." - } - ], - - lists:map( - fun({EJson, DbName, Expect, Msg}) -> - Error = (catch couch_doc:from_json_obj_validate(EJson, DbName)), - {Msg, ?_assertMatch(Expect, Error)} - end, - Cases). - -to_json_success_cases() -> - Cases = [ - { - #doc{}, - {[{<<"_id">>, <<"">>}]}, - "Empty docs are {\"_id\": \"\"}" - }, - { - #doc{id = <<"foo">>}, - {[{<<"_id">>, <<"foo">>}]}, - "_id is added." - }, - { - #doc{revs = {5, ["foo"]}}, - {[{<<"_id">>, <<>>}, {<<"_rev">>, <<"5-foo">>}]}, - "_rev is added." - }, - { - [revs], - #doc{revs = {5, [<<"first">>, <<"second">>]}}, - {[ - {<<"_id">>, <<>>}, - {<<"_rev">>, <<"5-first">>}, - {<<"_revisions">>, {[ - {<<"start">>, 5}, - {<<"ids">>, [<<"first">>, <<"second">>]} - ]}} - ]}, - "_revisions include with revs option" - }, - { - #doc{body = {[{<<"foo">>, <<"bar">>}]}}, - {[{<<"_id">>, <<>>}, {<<"foo">>, <<"bar">>}]}, - "Arbitrary fields are added." - }, - { - #doc{deleted = true, body = {[{<<"foo">>, <<"bar">>}]}}, - {[{<<"_id">>, <<>>}, {<<"foo">>, <<"bar">>}, {<<"_deleted">>, true}]}, - "Deleted docs no longer drop body members." - }, - { - #doc{meta = [ - {revs_info, 4, [{<<"fin">>, deleted}, {<<"zim">>, missing}]} - ]}, - {[ - {<<"_id">>, <<>>}, - {<<"_revs_info">>, [ - {[{<<"rev">>, <<"4-fin">>}, {<<"status">>, <<"deleted">>}]}, - {[{<<"rev">>, <<"3-zim">>}, {<<"status">>, <<"missing">>}]} - ]} - ]}, - "_revs_info field is added correctly." - }, - { - #doc{meta = [{local_seq, 5}]}, - {[{<<"_id">>, <<>>}, {<<"_local_seq">>, 5}]}, - "_local_seq is added as an integer." - }, - { - #doc{meta = [{conflicts, [{3, <<"yep">>}, {1, <<"snow">>}]}]}, - {[ - {<<"_id">>, <<>>}, - {<<"_conflicts">>, [<<"3-yep">>, <<"1-snow">>]} - ]}, - "_conflicts is added as an array of strings." - }, - { - #doc{meta = [{deleted_conflicts, [{10923, <<"big_cowboy_hat">>}]}]}, - {[ - {<<"_id">>, <<>>}, - {<<"_deleted_conflicts">>, [<<"10923-big_cowboy_hat">>]} - ]}, - "_deleted_conflicsts is added as an array of strings." - }, - { - #doc{atts = [ - couch_att:new([ - {name, <<"big.xml">>}, - {type, <<"xml/sucks">>}, - {data, fun() -> ok end}, - {revpos, 1}, - {att_len, 400}, - {disk_len, 400} - ]), - couch_att:new([ - {name, <<"fast.json">>}, - {type, <<"json/ftw">>}, - {data, <<"{\"so\": \"there!\"}">>}, - {revpos, 1}, - {att_len, 16}, - {disk_len, 16} - ]) - ]}, - {[ - {<<"_id">>, <<>>}, - {<<"_attachments">>, {[ - {<<"big.xml">>, {[ - {<<"content_type">>, <<"xml/sucks">>}, - {<<"revpos">>, 1}, - {<<"length">>, 400}, - {<<"stub">>, true} - ]}}, - {<<"fast.json">>, {[ - {<<"content_type">>, <<"json/ftw">>}, - {<<"revpos">>, 1}, - {<<"length">>, 16}, - {<<"stub">>, true} - ]}} - ]}} - ]}, - "Attachments attached as stubs only include a length." - }, - { - [attachments], - #doc{atts = [ - couch_att:new([ - {name, <<"stuff.txt">>}, - {type, <<"text/plain">>}, - {data, fun() -> <<"diet pepsi">> end}, - {revpos, 1}, - {att_len, 10}, - {disk_len, 10} - ]), - couch_att:new([ - {name, <<"food.now">>}, - {type, <<"application/food">>}, - {revpos, 1}, - {data, <<"sammich">>} - ]) - ]}, - {[ - {<<"_id">>, <<>>}, - {<<"_attachments">>, {[ - {<<"stuff.txt">>, {[ - {<<"content_type">>, <<"text/plain">>}, - {<<"revpos">>, 1}, - {<<"data">>, <<"ZGlldCBwZXBzaQ==">>} - ]}}, - {<<"food.now">>, {[ - {<<"content_type">>, <<"application/food">>}, - {<<"revpos">>, 1}, - {<<"data">>, <<"c2FtbWljaA==">>} - ]}} - ]}} - ]}, - "Attachments included inline with attachments option." - } - ], - - lists:map(fun - ({Doc, EJson, Msg}) -> - {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, []))}; - ({Options, Doc, EJson, Msg}) -> - {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, Options))} - end, Cases). diff --git a/src/couch/test/couch_doc_tests.erl b/src/couch/test/couch_doc_tests.erl deleted file mode 100644 index cf41df61d..000000000 --- a/src/couch/test/couch_doc_tests.erl +++ /dev/null @@ -1,145 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_doc_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - - --define(REQUEST_FIXTURE, - filename:join([?FIXTURESDIR, "multipart.http"])). - -parse_rev_test() -> - ?assertEqual({1, <<"123">>}, couch_doc:parse_rev("1-123")), - ?assertEqual({1, <<"123">>}, couch_doc:parse_rev(<<"1-123">>)), - ?assertException(throw, {bad_request, _}, couch_doc:parse_rev("1f-123")), - ?assertException(throw, {bad_request, _}, couch_doc:parse_rev("bar")). - -doc_from_multi_part_stream_test() -> - ContentType = "multipart/related;boundary=multipart_related_boundary~~~~~~~~~~~~~~~~~~~~", - DataFun = fun() -> request(start) end, - - mock_config(), - {ok, #doc{id = <<"doc0">>, atts = [_]}, _Fun, _Parser} = - couch_doc:doc_from_multi_part_stream(ContentType, DataFun), - meck:unload(config), - ok. - -doc_to_multi_part_stream_test() -> - Boundary = <<"multipart_related_boundary~~~~~~~~~~~~~~~~~~~~">>, - JsonBytes = <<"{\n \"_id\": \"our document goes here\"\n}\n\n">>, - AttData = <<"Hello my important document">>, - AttLength = size(AttData), - Atts = [couch_att:new([ - {name, <<"test">>}, {data, AttData}, {type, <<"text/plain">>}, - {att_len, AttLength}, {disk_len, AttLength}])], - couch_doc:doc_to_multi_part_stream(Boundary, JsonBytes, Atts, fun send/1, true), - AttLengthStr = integer_to_binary(AttLength), - BoundaryLen = size(Boundary), - [ - <<"--", Boundary/binary>>, - <<"Content-Type: application/json">>, - <<>>, - JsonBytes, - <<"--", Boundary/binary>>, - <<"Content-Disposition: attachment; filename=\"test\"">>, - <<"Content-Type: text/plain">>, - <<"Content-Length: ", AttLengthStr/binary>>, - <<>>, - AttData, - <<"--", Boundary:BoundaryLen/binary, "--">> - ] = collected(), - ok. - -len_doc_to_multi_part_stream_test() -> - Boundary = <<"simple_boundary">>, - JsonBytes = <<"{\n \"_id\": \"our document goes here\"\n}\n\n">>, - ContentType = <<"multipart/related; boundary=\"", Boundary/binary, "\"">>, - AttData = <<"Hello my important document">>, - AttLength = size(AttData), - Atts = [couch_att:new([ - {name, <<"test">>}, {data, AttData}, {type, <<"text/plain">>}, - {att_len, AttLength}, {disk_len, AttLength}])], - {ContentType, 258} = %% 258 is expected size of the document - couch_doc:len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, true), - ok. - -validate_docid_test_() -> - {setup, - fun() -> - mock_config(), - ok = meck:new(couch_db_plugin, [passthrough]), - meck:expect(couch_db_plugin, validate_docid, fun(_) -> false end) - end, - fun(_) -> - meck:unload(config), - meck:unload(couch_db_plugin) - end, - [ - ?_assertEqual(ok, couch_doc:validate_docid(<<"idx">>)), - ?_assertEqual(ok, couch_doc:validate_docid(<<"_design/idx">>)), - ?_assertEqual(ok, couch_doc:validate_docid(<<"_local/idx">>)), - ?_assertEqual(ok, couch_doc:validate_docid(large_id(1024))), - ?_assertEqual(ok, couch_doc:validate_docid(<<"_users">>, <<"_dbs">>)), - ?_assertEqual(ok, couch_doc:validate_docid(<<"_replicator">>, <<"_dbs">>)), - ?_assertEqual(ok, couch_doc:validate_docid(<<"_global_changes">>, <<"_dbs">>)), - ?_assertThrow({illegal_docid, _}, - couch_doc:validate_docid(<<>>)), - ?_assertThrow({illegal_docid, _}, - couch_doc:validate_docid(<<16#80>>)), - ?_assertThrow({illegal_docid, _}, - couch_doc:validate_docid(<<"_idx">>)), - ?_assertThrow({illegal_docid, _}, - couch_doc:validate_docid(<<"_">>)), - ?_assertThrow({illegal_docid, _}, - couch_doc:validate_docid(<<"_design/">>)), - ?_assertThrow({illegal_docid, _}, - couch_doc:validate_docid(<<"_local/">>)), - ?_assertThrow({illegal_docid, _}, - couch_doc:validate_docid(large_id(1025))), - ?_assertThrow({illegal_docid, _}, - couch_doc:validate_docid(<<"_users">>, <<"foo">>)), - ?_assertThrow({illegal_docid, _}, - couch_doc:validate_docid(<<"_weeee">>, <<"_dbs">>)) - ] - }. - -large_id(N) -> - << <<"x">> || _ <- lists:seq(1, N) >>. - -request(start) -> - {ok, Doc} = file:read_file(?REQUEST_FIXTURE), - {Doc, fun() -> request(stop) end}; -request(stop) -> - {"", fun() -> request(stop) end}. - -send(Data) -> - send(Data, get(data)). -send(Data, undefined) -> - send(Data, []); -send(Data, Acc) -> - put(data, [Acc|Data]). - -collected() -> - B = binary:replace(iolist_to_binary(get(data)), <<"\r\n">>, <<0>>, [global]), - binary:split(B, [<<0>>], [global]). - -mock_config() -> - ok = meck:new(config, [passthrough]), - meck:expect(config, get, - fun("couchdb", "max_document_id_length", "infinity") -> "1024"; - ("couchdb", "max_attachment_size", "infinity") -> "infinity"; - ("mem3", "shards_db", "_dbs") -> "_dbs"; - (Key, Val, Default) -> meck:passthrough([Key, Val, Default]) - end - ). diff --git a/src/couch/test/couch_ejson_size_tests.erl b/src/couch/test/couch_ejson_size_tests.erl deleted file mode 100644 index df9168ed1..000000000 --- a/src/couch/test/couch_ejson_size_tests.erl +++ /dev/null @@ -1,72 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_ejson_size_tests). - --include_lib("eunit/include/eunit.hrl"). - --define(HWAIR, $\x{10348}). % 4 byte utf8 encoding --define(EURO, $\x{20ac}). % 3 byte utf8 encoding --define(CENT, $\x{a2}). % 2 byte utf8 encoding - - -ejson_size_test_() -> - [?_assertEqual(R, couch_ejson_size:encoded_size(Input)) || {R, Input} <- [ - {1, 1}, {1, 1}, {2, -1}, {1, 9}, {2, 10}, {3, -10}, - {2, 11}, {2, 99}, {3, 100}, {3, 999}, {4, 1000}, {4, 9999}, - {5, 10000}, - - {3, 0.0}, {3, 0.1}, {3, 1.0}, {4, -1.0}, {3, 1.0e9}, - {4, 1.0e10}, {5, 1.0e-10}, {5, 1.0e-99}, {6, 1.0e-100}, {3, 1.0e-323}, - - {2, arr_nested(0)}, {22, arr_nested(10)}, {2002, arr_nested(1000)}, - {9, obj_nested(0)}, {69, obj_nested(10)}, {6009, obj_nested(1000)}, - - {4, null}, {4, true}, {5, false}, - - {3, str(1, $x)}, {4, str(1, ?CENT)}, {5, str(1, ?EURO)}, - {6, str(1, ?HWAIR)}, {3, str(1, $\x{1})}, {12, str(10, $x)}, - {22, str(10, ?CENT)}, {32, str(10, ?EURO)}, {42, str(10, ?HWAIR)}, - {12, str(10, $\x{1})} - ]]. - - -%% Helper functions - -arr_nested(MaxDepth) -> - arr_nested(MaxDepth, 0). - - -obj_nested(MaxDepth) -> - obj_nested(MaxDepth, 0). - - -obj(N, K, V) -> - {[{K, V} || _ <- lists:seq(1, N)]}. - - -str(N, C) -> - unicode:characters_to_binary([C || _ <- lists:seq(1, N)]). - - -arr_nested(MaxDepth, MaxDepth) -> - []; - -arr_nested(MaxDepth, Depth) -> - [arr_nested(MaxDepth, Depth + 1)]. - - -obj_nested(MaxDepth, MaxDepth) -> - obj(1, <<"k">>, <<"v">>); - -obj_nested(MaxDepth, Depth) -> - {[{<<"k">>, obj_nested(MaxDepth, Depth + 1)}]}. diff --git a/src/couch/test/couch_etag_tests.erl b/src/couch/test/couch_etag_tests.erl deleted file mode 100644 index 9d15e483f..000000000 --- a/src/couch/test/couch_etag_tests.erl +++ /dev/null @@ -1,30 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_etag_tests). - --include_lib("eunit/include/eunit.hrl"). - -local_with_empty_body_test() -> - Etag = couch_httpd:doc_etag(<<"_local/local-and-empty">>, {[]}, {0, <<"1">>}), - ?assertEqual(Etag, <<"\"5ZVXQYO7VLEOU0TL9VXDNP5PV\"">>). - - -local_with_body_test() -> - DocBody = {[{<<"hello">>,<<"world">>},{<<"relax">>,true}]}, - Etag = couch_httpd:doc_etag(<<"_local/local-with-body">>, DocBody, {0, <<"1">>}), - ?assertEqual(Etag, <<"\"CEFXP6WH8OKYIWO1GLGBHKCCA\"">>). - -normal_doc_uses_rev_test() -> - DocBody = {[{<<"hello">>,<<"world">>},{<<"relax">>,true}]}, - Etag = couch_httpd:doc_etag(<<"nomal-doc">>, DocBody, {1, <<"efda11e34e88ebe31a2f83e84a0435b6">>}), - ?assertEqual(Etag, <<"\"1-efda11e34e88ebe31a2f83e84a0435b6\"">>). diff --git a/src/couch/test/couch_file_tests.erl b/src/couch/test/couch_file_tests.erl deleted file mode 100644 index e9806c09a..000000000 --- a/src/couch/test/couch_file_tests.erl +++ /dev/null @@ -1,533 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_file_tests). - --include_lib("couch/include/couch_eunit.hrl"). - --define(BLOCK_SIZE, 4096). --define(setup(F), {setup, fun setup/0, fun teardown/1, F}). --define(foreach(Fs), {foreach, fun setup/0, fun teardown/1, Fs}). - - -setup() -> - {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]), - Fd. - -teardown(Fd) -> - case is_process_alive(Fd) of - true -> ok = couch_file:close(Fd); - false -> ok - end. - -open_close_test_() -> - { - "Test for proper file open and close", - { - setup, - fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, - [ - should_return_enoent_if_missed(), - should_ignore_invalid_flags_with_open(), - ?setup(fun should_return_pid_on_file_open/1), - should_close_file_properly(), - ?setup(fun should_create_empty_new_files/1) - ] - } - }. - -should_return_enoent_if_missed() -> - ?_assertEqual({error, enoent}, couch_file:open("not a real file")). - -should_ignore_invalid_flags_with_open() -> - ?_assertMatch({ok, _}, - couch_file:open(?tempfile(), [create, invalid_option])). - -should_return_pid_on_file_open(Fd) -> - ?_assert(is_pid(Fd)). - -should_close_file_properly() -> - {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]), - ok = couch_file:close(Fd), - ?_assert(true). - -should_create_empty_new_files(Fd) -> - ?_assertMatch({ok, 0}, couch_file:bytes(Fd)). - - -read_write_test_() -> - { - "Common file read/write tests", - { - setup, - fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, - ?foreach([ - fun should_increase_file_size_on_write/1, - fun should_return_current_file_size_on_write/1, - fun should_write_and_read_term/1, - fun should_write_and_read_binary/1, - fun should_write_and_read_large_binary/1, - fun should_return_term_as_binary_for_reading_binary/1, - fun should_read_term_written_as_binary/1, - fun should_read_iolist/1, - fun should_fsync/1, - fun should_not_read_beyond_eof/1, - fun should_truncate/1 - ]) - } - }. - - -should_increase_file_size_on_write(Fd) -> - {ok, 0, _} = couch_file:append_term(Fd, foo), - {ok, Size} = couch_file:bytes(Fd), - ?_assert(Size > 0). - -should_return_current_file_size_on_write(Fd) -> - {ok, 0, _} = couch_file:append_term(Fd, foo), - {ok, Size} = couch_file:bytes(Fd), - ?_assertMatch({ok, Size, _}, couch_file:append_term(Fd, bar)). - -should_write_and_read_term(Fd) -> - {ok, Pos, _} = couch_file:append_term(Fd, foo), - ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, Pos)). - -should_write_and_read_binary(Fd) -> - {ok, Pos, _} = couch_file:append_binary(Fd, <<"fancy!">>), - ?_assertMatch({ok, <<"fancy!">>}, couch_file:pread_binary(Fd, Pos)). - -should_return_term_as_binary_for_reading_binary(Fd) -> - {ok, Pos, _} = couch_file:append_term(Fd, foo), - Foo = couch_compress:compress(foo, snappy), - ?_assertMatch({ok, Foo}, couch_file:pread_binary(Fd, Pos)). - -should_read_term_written_as_binary(Fd) -> - {ok, Pos, _} = couch_file:append_binary(Fd, <<131,100,0,3,102,111,111>>), - ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, Pos)). - -should_write_and_read_large_binary(Fd) -> - BigBin = list_to_binary(lists:duplicate(100000, 0)), - {ok, Pos, _} = couch_file:append_binary(Fd, BigBin), - ?_assertMatch({ok, BigBin}, couch_file:pread_binary(Fd, Pos)). - -should_read_iolist(Fd) -> - %% append_binary == append_iolist? - %% Possible bug in pread_iolist or iolist() -> append_binary - {ok, Pos, _} = couch_file:append_binary(Fd, ["foo", $m, <<"bam">>]), - {ok, IoList} = couch_file:pread_iolist(Fd, Pos), - ?_assertMatch(<<"foombam">>, iolist_to_binary(IoList)). - -should_fsync(Fd) -> - {"How does on test fsync?", ?_assertMatch(ok, couch_file:sync(Fd))}. - -should_not_read_beyond_eof(Fd) -> - BigBin = list_to_binary(lists:duplicate(100000, 0)), - DoubleBin = round(byte_size(BigBin) * 2), - {ok, Pos, _Size} = couch_file:append_binary(Fd, BigBin), - {_, Filepath} = couch_file:process_info(Fd), - %% corrupt db file - {ok, Io} = file:open(Filepath, [read, write, binary]), - ok = file:pwrite(Io, Pos, <<0:1/integer, DoubleBin:31/integer>>), - file:close(Io), - unlink(Fd), - ExpectedError = {badmatch, {'EXIT', {bad_return_value, - {read_beyond_eof, Filepath}}}}, - ?_assertError(ExpectedError, couch_file:pread_binary(Fd, Pos)). - -should_truncate(Fd) -> - {ok, 0, _} = couch_file:append_term(Fd, foo), - {ok, Size} = couch_file:bytes(Fd), - BigBin = list_to_binary(lists:duplicate(100000, 0)), - {ok, _, _} = couch_file:append_binary(Fd, BigBin), - ok = couch_file:truncate(Fd, Size), - ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, 0)). - -pread_limit_test_() -> - { - "Read limit tests", - { - setup, - fun() -> - Ctx = test_util:start(?MODULE), - config:set("couchdb", "max_pread_size", "50000"), - Ctx - end, - fun(Ctx) -> - config:delete("couchdb", "max_pread_size"), - test_util:stop(Ctx) - end, - ?foreach([ - fun should_increase_file_size_on_write/1, - fun should_return_current_file_size_on_write/1, - fun should_write_and_read_term/1, - fun should_write_and_read_binary/1, - fun should_not_read_more_than_pread_limit/1 - ]) - } - }. - -should_not_read_more_than_pread_limit(Fd) -> - {_, Filepath} = couch_file:process_info(Fd), - BigBin = list_to_binary(lists:duplicate(100000, 0)), - {ok, Pos, _Size} = couch_file:append_binary(Fd, BigBin), - unlink(Fd), - ExpectedError = {badmatch, {'EXIT', {bad_return_value, - {exceed_pread_limit, Filepath, 50000}}}}, - ?_assertError(ExpectedError, couch_file:pread_binary(Fd, Pos)). - - -header_test_() -> - { - "File header read/write tests", - { - setup, - fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, - [ - ?foreach([ - fun should_write_and_read_atom_header/1, - fun should_write_and_read_tuple_header/1, - fun should_write_and_read_second_header/1, - fun should_truncate_second_header/1, - fun should_produce_same_file_size_on_rewrite/1, - fun should_save_headers_larger_than_block_size/1 - ]), - should_recover_header_marker_corruption(), - should_recover_header_size_corruption(), - should_recover_header_md5sig_corruption(), - should_recover_header_data_corruption() - ] - } - }. - - -should_write_and_read_atom_header(Fd) -> - ok = couch_file:write_header(Fd, hello), - ?_assertMatch({ok, hello}, couch_file:read_header(Fd)). - -should_write_and_read_tuple_header(Fd) -> - ok = couch_file:write_header(Fd, {<<"some_data">>, 32}), - ?_assertMatch({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd)). - -should_write_and_read_second_header(Fd) -> - ok = couch_file:write_header(Fd, {<<"some_data">>, 32}), - ok = couch_file:write_header(Fd, [foo, <<"more">>]), - ?_assertMatch({ok, [foo, <<"more">>]}, couch_file:read_header(Fd)). - -should_truncate_second_header(Fd) -> - ok = couch_file:write_header(Fd, {<<"some_data">>, 32}), - {ok, Size} = couch_file:bytes(Fd), - ok = couch_file:write_header(Fd, [foo, <<"more">>]), - ok = couch_file:truncate(Fd, Size), - ?_assertMatch({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd)). - -should_produce_same_file_size_on_rewrite(Fd) -> - ok = couch_file:write_header(Fd, {<<"some_data">>, 32}), - {ok, Size1} = couch_file:bytes(Fd), - ok = couch_file:write_header(Fd, [foo, <<"more">>]), - {ok, Size2} = couch_file:bytes(Fd), - ok = couch_file:truncate(Fd, Size1), - ok = couch_file:write_header(Fd, [foo, <<"more">>]), - ?_assertMatch({ok, Size2}, couch_file:bytes(Fd)). - -should_save_headers_larger_than_block_size(Fd) -> - Header = erlang:make_tuple(5000, <<"CouchDB">>), - couch_file:write_header(Fd, Header), - {"COUCHDB-1319", ?_assertMatch({ok, Header}, couch_file:read_header(Fd))}. - - -should_recover_header_marker_corruption() -> - ?_assertMatch( - ok, - check_header_recovery( - fun(CouchFd, RawFd, Expect, HeaderPos) -> - ?assertNotMatch(Expect, couch_file:read_header(CouchFd)), - file:pwrite(RawFd, HeaderPos, <<0>>), - ?assertMatch(Expect, couch_file:read_header(CouchFd)) - end) - ). - -should_recover_header_size_corruption() -> - ?_assertMatch( - ok, - check_header_recovery( - fun(CouchFd, RawFd, Expect, HeaderPos) -> - ?assertNotMatch(Expect, couch_file:read_header(CouchFd)), - % +1 for 0x1 byte marker - file:pwrite(RawFd, HeaderPos + 1, <<10/integer>>), - ?assertMatch(Expect, couch_file:read_header(CouchFd)) - end) - ). - -should_recover_header_md5sig_corruption() -> - ?_assertMatch( - ok, - check_header_recovery( - fun(CouchFd, RawFd, Expect, HeaderPos) -> - ?assertNotMatch(Expect, couch_file:read_header(CouchFd)), - % +5 = +1 for 0x1 byte and +4 for term size. - file:pwrite(RawFd, HeaderPos + 5, <<"F01034F88D320B22">>), - ?assertMatch(Expect, couch_file:read_header(CouchFd)) - end) - ). - -should_recover_header_data_corruption() -> - ?_assertMatch( - ok, - check_header_recovery( - fun(CouchFd, RawFd, Expect, HeaderPos) -> - ?assertNotMatch(Expect, couch_file:read_header(CouchFd)), - % +21 = +1 for 0x1 byte, +4 for term size and +16 for MD5 sig - file:pwrite(RawFd, HeaderPos + 21, <<"some data goes here!">>), - ?assertMatch(Expect, couch_file:read_header(CouchFd)) - end) - ). - - -check_header_recovery(CheckFun) -> - Path = ?tempfile(), - {ok, Fd} = couch_file:open(Path, [create, overwrite]), - {ok, RawFd} = file:open(Path, [read, write, raw, binary]), - - {ok, _} = write_random_data(Fd), - ExpectHeader = {some_atom, <<"a binary">>, 756}, - ok = couch_file:write_header(Fd, ExpectHeader), - - {ok, HeaderPos} = write_random_data(Fd), - ok = couch_file:write_header(Fd, {2342, <<"corruption! greed!">>}), - - CheckFun(Fd, RawFd, {ok, ExpectHeader}, HeaderPos), - - ok = file:close(RawFd), - ok = couch_file:close(Fd), - ok. - -write_random_data(Fd) -> - write_random_data(Fd, 100 + couch_rand:uniform(1000)). - -write_random_data(Fd, 0) -> - {ok, Bytes} = couch_file:bytes(Fd), - {ok, (1 + Bytes div ?BLOCK_SIZE) * ?BLOCK_SIZE}; -write_random_data(Fd, N) -> - Choices = [foo, bar, <<"bizzingle">>, "bank", ["rough", stuff]], - Term = lists:nth(couch_rand:uniform(4) + 1, Choices), - {ok, _, _} = couch_file:append_term(Fd, Term), - write_random_data(Fd, N - 1). - - -delete_test_() -> - { - "File delete tests", - { - foreach, - fun() -> - meck:new(config, [passthrough]), - File = ?tempfile() ++ ".couch", - RootDir = filename:dirname(File), - ok = couch_file:init_delete_dir(RootDir), - ok = file:write_file(File, <<>>), - {RootDir, File} - end, - fun({_, File}) -> - meck:unload(config), - file:delete(File) - end, - [ - fun(Cfg) -> - {"enable_database_recovery = false, context = delete", - make_enable_recovery_test_case(Cfg, false, delete)} - end, - fun(Cfg) -> - {"enable_database_recovery = true, context = delete", - make_enable_recovery_test_case(Cfg, true, delete)} - end, - fun(Cfg) -> - {"enable_database_recovery = false, context = compaction", - make_enable_recovery_test_case(Cfg, false, compaction)} - end, - fun(Cfg) -> - {"enable_database_recovery = true, context = compaction", - make_enable_recovery_test_case(Cfg, true, compaction)} - end, - fun(Cfg) -> - {"delete_after_rename = true", - make_delete_after_rename_test_case(Cfg, true)} - end, - fun(Cfg) -> - {"delete_after_rename = false", - make_delete_after_rename_test_case(Cfg, false)} - end - ] - } - }. - - -make_enable_recovery_test_case({RootDir, File}, EnableRecovery, Context) -> - meck:expect(config, get_boolean, fun - ("couchdb", "enable_database_recovery", _) -> EnableRecovery; - ("couchdb", "delete_after_rename", _) -> false - end), - FileExistsBefore = filelib:is_regular(File), - couch_file:delete(RootDir, File, [{context, Context}]), - FileExistsAfter = filelib:is_regular(File), - RenamedFiles = filelib:wildcard(filename:rootname(File) ++ "*.deleted.*"), - DeletedFiles = filelib:wildcard(RootDir ++ "/.delete/*"), - {ExpectRenamedCount, ExpectDeletedCount} = if - EnableRecovery andalso Context =:= delete -> {1, 0}; - true -> {0, 1} - end, - [ - ?_assert(FileExistsBefore), - ?_assertNot(FileExistsAfter), - ?_assertEqual(ExpectRenamedCount, length(RenamedFiles)), - ?_assertEqual(ExpectDeletedCount, length(DeletedFiles)) - ]. - -make_delete_after_rename_test_case({RootDir, File}, DeleteAfterRename) -> - meck:expect(config, get_boolean, fun - ("couchdb", "enable_database_recovery", _) -> false; - ("couchdb", "delete_after_rename", _) -> DeleteAfterRename - end), - FileExistsBefore = filelib:is_regular(File), - couch_file:delete(RootDir, File), - FileExistsAfter = filelib:is_regular(File), - RenamedFiles = filelib:wildcard(filename:join([RootDir, ".delete", "*"])), - ExpectRenamedCount = if DeleteAfterRename -> 0; true -> 1 end, - [ - ?_assert(FileExistsBefore), - ?_assertNot(FileExistsAfter), - ?_assertEqual(ExpectRenamedCount, length(RenamedFiles)) - ]. - - -nuke_dir_test_() -> - { - "Nuke directory tests", - { - foreach, - fun() -> - meck:new(config, [passthrough]), - File0 = ?tempfile() ++ ".couch", - RootDir = filename:dirname(File0), - BaseName = filename:basename(File0), - Seed = couch_rand:uniform(8999999999) + 999999999, - DDocDir = io_lib:format("db.~b_design", [Seed]), - ViewDir = filename:join([RootDir, DDocDir]), - file:make_dir(ViewDir), - File = filename:join([ViewDir, BaseName]), - file:rename(File0, File), - ok = couch_file:init_delete_dir(RootDir), - ok = file:write_file(File, <<>>), - {RootDir, ViewDir} - end, - fun({RootDir, ViewDir}) -> - meck:unload(config), - remove_dir(ViewDir), - Ext = filename:extension(ViewDir), - case filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext) of - [DelDir] -> remove_dir(DelDir); - _ -> ok - end - end, - [ - fun(Cfg) -> - {"enable_database_recovery = false", - make_rename_dir_test_case(Cfg, false)} - end, - fun(Cfg) -> - {"enable_database_recovery = true", - make_rename_dir_test_case(Cfg, true)} - end, - fun(Cfg) -> - {"delete_after_rename = true", - make_delete_dir_test_case(Cfg, true)} - end, - fun(Cfg) -> - {"delete_after_rename = false", - make_delete_dir_test_case(Cfg, false)} - end - ] - } - }. - - -make_rename_dir_test_case({RootDir, ViewDir}, EnableRecovery) -> - meck:expect(config, get_boolean, fun - ("couchdb", "enable_database_recovery", _) -> EnableRecovery; - ("couchdb", "delete_after_rename", _) -> true - end), - DirExistsBefore = filelib:is_dir(ViewDir), - couch_file:nuke_dir(RootDir, ViewDir), - DirExistsAfter = filelib:is_dir(ViewDir), - Ext = filename:extension(ViewDir), - RenamedDirs = filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext), - ExpectRenamedCount = if EnableRecovery -> 1; true -> 0 end, - [ - ?_assert(DirExistsBefore), - ?_assertNot(DirExistsAfter), - ?_assertEqual(ExpectRenamedCount, length(RenamedDirs)) - ]. - -make_delete_dir_test_case({RootDir, ViewDir}, DeleteAfterRename) -> - meck:expect(config, get_boolean, fun - ("couchdb", "enable_database_recovery", _) -> false; - ("couchdb", "delete_after_rename", _) -> DeleteAfterRename - end), - DirExistsBefore = filelib:is_dir(ViewDir), - couch_file:nuke_dir(RootDir, ViewDir), - DirExistsAfter = filelib:is_dir(ViewDir), - Ext = filename:extension(ViewDir), - RenamedDirs = filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext), - RenamedFiles = filelib:wildcard(RootDir ++ "/.delete/*"), - ExpectRenamedCount = if DeleteAfterRename -> 0; true -> 1 end, - [ - ?_assert(DirExistsBefore), - ?_assertNot(DirExistsAfter), - ?_assertEqual(0, length(RenamedDirs)), - ?_assertEqual(ExpectRenamedCount, length(RenamedFiles)) - ]. - -remove_dir(Dir) -> - [file:delete(File) || File <- filelib:wildcard(filename:join([Dir, "*"]))], - file:del_dir(Dir). - - -fsync_error_test_() -> - { - "Test fsync raises errors", - { - setup, - fun() -> - test_util:start(?MODULE, [ioq]) - end, - fun(Ctx) -> - test_util:stop(Ctx) - end, - [ - fun fsync_raises_errors/0 - ] - } - }. - - -fsync_raises_errors() -> - Fd = spawn(fun() -> fake_fsync_fd() end), - ?assertError({fsync_error, eio}, couch_file:sync(Fd)). - - -fake_fsync_fd() -> - % Mocking gen_server did not go very - % well so faking the couch_file pid - % will have to do. - receive - {'$gen_call', From, sync} -> - gen:reply(From, {error, eio}) - end. diff --git a/src/couch/test/couch_flags_config_tests.erl b/src/couch/test/couch_flags_config_tests.erl deleted file mode 100644 index 1a66cdcff..000000000 --- a/src/couch/test/couch_flags_config_tests.erl +++ /dev/null @@ -1,116 +0,0 @@ --module(couch_flags_config_tests). --include_lib("eunit/include/eunit.hrl"). - -%% value copied from couch_flags_config --define(MAX_FLAG_NAME_LENGTH, 256). - -setup() -> - meck:new(couch_log), - meck:expect(couch_log, error, ['_', '_'], meck:val(ok)), - ok. - -teardown(_) -> - meck:unload(). - -couch_flags_config_test_() -> - { - "test couch_flags_config", - { - setup, fun setup/0, fun teardown/1, - all_combinations_return_same_result() - ++ latest_overide_wins() - ++ [ - {"rules_are_sorted", fun rules_are_sorted/0} - ] - } - }. - -all_combinations_return_same_result() -> - Config = [ - {"foo, bar||*", "true"}, - {"baz, qux||*", "false"}, - {"baz||shards/test*", "true"}, - {"baz||shards/blacklist*", "false"}, - {"bar||shards/test*", "false"}, - {"bar||shards/test/blacklist*", "true"} - ], - Expected = [ - {{<<"shards/test/blacklist*">>},{<<"shards/test/blacklist*">>,22,[bar, baz, foo]}}, - {{<<"shards/test*">>},{<<"shards/test*">>, 12, [baz, foo]}}, - {{<<"shards/blacklist*">>},{<<"shards/blacklist*">>, 17, [bar, foo]}}, - {{<<"*">>},{<<"*">>, 1, [bar, foo]}} - ], - Combinations = couch_tests_combinatorics:permutations(Config), - [{test_id(Items), ?_assertEqual(Expected, couch_flags_config:data(Items))} - || Items <- Combinations]. - -rules_are_sorted() -> - Expected = [ - {{<<"shards/test/exact">>},{<<"shards/test/exact">>, 17, [baz,flag_bar,flag_foo]}}, - {{<<"shards/test/blacklist*">>},{<<"shards/test/blacklist*">>,22,[flag_foo]}}, - {{<<"shards/test*">>},{<<"shards/test*">>, 12, [baz,flag_bar,flag_foo]}}, - {{<<"shards/exact">>},{<<"shards/exact">>, 12, [flag_bar,flag_foo]}}, - {{<<"shards/blacklist*">>},{<<"shards/blacklist*">>, 17, []}}, - {{<<"*">>},{<<"*">>, 1, [flag_foo]}} - ], - ?assertEqual(Expected, couch_flags_config:data(test_config())). - -latest_overide_wins() -> - Cases = [ - {[ - {"flag||*", "false"}, {"flag||a*", "true"}, - {"flag||ab*", "true"}, {"flag||abc*", "true"} - ], true}, - {[ - {"flag||*", "true"}, {"flag||a*", "false"}, - {"flag||ab*", "true"}, {"flag||abc*", "false"} - ], false} - ], - [{test_id(Rules, Expected), - ?_assertEqual(Expected, lists:member(flag, - flags(hd(couch_flags_config:data(Rules)))))} - || {Rules, Expected} <- Cases]. - -flags({{_Pattern}, {_Pattern, _Size, Flags}}) -> - Flags. - -test_id(Items, ExpectedResult) -> - lists:flatten(io_lib:format("~p -> ~p", [[P || {P, _} <- Items], ExpectedResult])). - - -test_id(Items) -> - lists:flatten(io_lib:format("~p", [[P || {P, _} <- Items]])). - -test_config() -> - [ - {"flag_foo||*", "true"}, - {"flag_bar||*", "false"}, - {"flag_bar||shards/test*", "true"}, - {"flag_foo||shards/blacklist*", "false"}, - {"baz||shards/test*", "true"}, - {"baz||shards/test/blacklist*", "false"}, - {"flag_bar||shards/exact", "true"}, - {"flag_bar||shards/test/exact", "true"} - ]. - -parse_flags_term_test_() -> - LongBinary = binary:copy(<<"a">>, ?MAX_FLAG_NAME_LENGTH + 1), - ExpectedError = {error, {"Cannot parse list of tags: ~n~p", - [{too_long, LongBinary}]}}, - ExpectedUnknownError = {error,{"Cannot parse list of tags: ~n~p", - [{invalid_flag,<<"dddddddd">>}]}}, - [ - {"empty binary", ?_assertEqual( - [], couch_flags_config:parse_flags_term(<<>>))}, - {"single flag", ?_assertEqual( - [fff], couch_flags_config:parse_flags_term(<<"fff">>))}, - {"sorted", ?_assertEqual( - [aaa,bbb,fff], couch_flags_config:parse_flags_term(<<"fff,aaa,bbb">>))}, - {"whitespace", ?_assertEqual( - [aaa,bbb,fff], couch_flags_config:parse_flags_term(<<"fff , aaa, bbb ">>))}, - {"error", ?_assertEqual( - ExpectedError, couch_flags_config:parse_flags_term(LongBinary))}, - {"unknown_flag", ?_assertEqual( - ExpectedUnknownError, couch_flags_config:parse_flags_term(<<"dddddddd">>))} - ]. - diff --git a/src/couch/test/couch_flags_tests.erl b/src/couch/test/couch_flags_tests.erl deleted file mode 100644 index 32ec57b77..000000000 --- a/src/couch/test/couch_flags_tests.erl +++ /dev/null @@ -1,150 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_flags_tests). - --include_lib("couch/include/couch_eunit.hrl"). - -%% couch_epi_plugin behaviour callbacks --export([ - app/0, - providers/0, - services/0, - data_providers/0, - data_subscriptions/0, - processes/0, - notify/3 -]). - --export([ - rules/0 -]). - -app() -> - test_app. - -providers() -> - [{feature_flags, ?MODULE}]. - -services() -> - []. - -data_providers() -> - []. - -data_subscriptions() -> - []. - -processes() -> - []. - -notify(_, _, _) -> - ok. - -rules() -> - test_config(). - -setup() -> - %% FIXME after we upgrade couch_epi - application:stop(couch_epi), % in case it's already running from other tests... - application:unload(couch_epi), - - application:load(couch_epi), - application:set_env(couch_epi, plugins, [couch_db_epi, ?MODULE]), - meck:expect(config, get, 1, []), - - Ctx = test_util:start_couch([couch_epi]), - Ctx. - - -teardown(Ctx) -> - test_util:stop_couch(Ctx), - ok = application:unload(couch_epi), - meck:unload(), - ok. - -couch_flags_test_() -> - { - "test couch_flags", - { - setup, fun setup/0, fun teardown/1, - enabled_flags_tests() - ++ is_enabled() -%% ++ match_performance() - } - }. - -enabled_flags_tests() -> - - [{"enabled_flags_tests", [ - {"flags_default_rule", - ?_assertEqual( - [foo], couch_flags:enabled("something"))}, - {"flags_wildcard_rule", - ?_assertEqual( - [bar, baz, foo], - couch_flags:enabled("shards/test/something"))}, - {"flags_exact_rule", - ?_assertEqual( - [bar, baz, foo], - couch_flags:enabled("shards/test/exact"))}, - {"flags_blacklist_rule", - ?_assertEqual( - [], - couch_flags:enabled("shards/blacklist/4"))} - ]}]. - -is_enabled() -> - [{"is_enabled_tests", [ - {"flags_default_rule [enabled]", - ?_assert(couch_flags:is_enabled(foo, "something"))}, - {"flags_default_rule [disabled]", - ?_assertNot(couch_flags:is_enabled(baz, "something"))}, - {"flags_default_rule [not_existent]", - ?_assertNot(couch_flags:is_enabled(non_existent, "something"))}, - - {"flags_wildcard_rule [enabled]", - ?_assert(couch_flags:is_enabled(bar, "shards/test/something"))}, - {"flags_wildcard_rule [not_existent]", - ?_assertNot(couch_flags:is_enabled(non_existent, "shards/test/something"))}, - - {"flags_exact_rule [overide_disbled]", - ?_assert(couch_flags:is_enabled(bar, "shards/test/exact"))}, - {"flags_exact_rule [not_existent]", - ?_assertNot(couch_flags:is_enabled(non_existent, "shards/test/exact"))}, - - {"flags_blacklist_rule [overide_enabled]", - ?_assertNot(couch_flags:is_enabled(foo, "shards/blacklist/4"))}, - {"flags_blacklist_rule [not_existent]", - ?_assertNot(couch_flags:is_enabled(non_existent, "shards/blacklist/4"))} - ]}]. - -%% match_performance() -> -%% [{"match_performance", [ -%% ?_test(begin -%% ?debugTime("1 million of operations took", lists:foreach(fun(_) -> -%% couch_flags:is_enabled(bar, "shards/test/exact") -%% end, lists:seq(1, 1000000))) -%% end) -%% ]}]. - - -test_config() -> - [ - {"foo||/*", "true"}, - {"bar||/*", "false"}, - {"bar||/shards/test*", "true"}, - {"foo||/shards/blacklist*", "false"}, - {"baz||/shards/test*", "true"}, - {"bar||/shards/exact", "true"}, - {"bar||/shards/test/exact", "true"} - ]. diff --git a/src/couch/test/couch_hotp_tests.erl b/src/couch/test/couch_hotp_tests.erl deleted file mode 100644 index fee10ff5e..000000000 --- a/src/couch/test/couch_hotp_tests.erl +++ /dev/null @@ -1,28 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_hotp_tests). - --include_lib("eunit/include/eunit.hrl"). - -hotp_test() -> - Key = <<"12345678901234567890">>, - ?assertEqual(755224, couch_hotp:generate(sha, Key, 0, 6)), - ?assertEqual(287082, couch_hotp:generate(sha, Key, 1, 6)), - ?assertEqual(359152, couch_hotp:generate(sha, Key, 2, 6)), - ?assertEqual(969429, couch_hotp:generate(sha, Key, 3, 6)), - ?assertEqual(338314, couch_hotp:generate(sha, Key, 4, 6)), - ?assertEqual(254676, couch_hotp:generate(sha, Key, 5, 6)), - ?assertEqual(287922, couch_hotp:generate(sha, Key, 6, 6)), - ?assertEqual(162583, couch_hotp:generate(sha, Key, 7, 6)), - ?assertEqual(399871, couch_hotp:generate(sha, Key, 8, 6)), - ?assertEqual(520489, couch_hotp:generate(sha, Key, 9, 6)). diff --git a/src/couch/test/couch_index_tests.erl b/src/couch/test/couch_index_tests.erl deleted file mode 100644 index fab3806d0..000000000 --- a/src/couch/test/couch_index_tests.erl +++ /dev/null @@ -1,234 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_index_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include_lib("couch_mrview/include/couch_mrview.hrl"). --include_lib("stdlib/include/ms_transform.hrl"). - --define(TIMEOUT, 1000). - -setup() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = couch_db:close(Db), - create_design_doc(DbName, <<"_design/foo">>, <<"bar">>), - tracer_new(), - DbName. - -teardown(DbName) -> - tracer_delete(), - couch_server:delete(DbName, [?ADMIN_CTX]). - -couch_index_ioq_priority_test_() -> - { - "Test ioq_priority for views", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun check_io_priority_for_updater/1, - fun check_io_priority_for_compactor/1 - ] - } - } - }. - - -check_io_priority_for_updater(DbName) -> - ?_test(begin - {ok, IndexerPid} = couch_index_server:get_index( - couch_mrview_index, DbName, <<"_design/foo">>), - CouchIndexUpdaterPid = updater_pid(IndexerPid), - tracer_record(CouchIndexUpdaterPid), - - create_docs(DbName), - - CommittedSeq = couch_util:with_db(DbName, fun(Db) -> couch_db:get_update_seq(Db) end), - couch_index:get_state(IndexerPid, CommittedSeq), - [UpdaterPid] = wait_spawn_event_for_pid(CouchIndexUpdaterPid), - - [UpdaterMapProcess] = wait_spawn_by_anonymous_fun( - UpdaterPid, '-start_update/4-fun-0-'), - - ?assert(wait_set_io_priority( - UpdaterMapProcess, {view_update, DbName, <<"_design/foo">>})), - - [UpdaterWriterProcess] = wait_spawn_by_anonymous_fun( - UpdaterPid, '-start_update/4-fun-1-'), - ?assert(wait_set_io_priority( - UpdaterWriterProcess, {view_update, DbName, <<"_design/foo">>})), - - ok - end). - -check_io_priority_for_compactor(DbName) -> - ?_test(begin - {ok, IndexerPid} = couch_index_server:get_index( - couch_mrview_index, DbName, <<"_design/foo">>), - {ok, CompactorPid} = couch_index:get_compactor_pid(IndexerPid), - tracer_record(CompactorPid), - - create_docs(DbName), - - couch_index:compact(IndexerPid), - wait_spawn_event_for_pid(CompactorPid), - - [CompactorProcess] = wait_spawn_by_anonymous_fun( - CompactorPid, '-handle_call/3-fun-0-'), - ?assert(wait_set_io_priority( - CompactorProcess, {view_compact, DbName, <<"_design/foo">>})), - ok - end). - -create_docs(DbName) -> - {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - Doc1 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc1">>}, - {<<"value">>, 1} - - ]}), - Doc2 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc2">>}, - {<<"value">>, 2} - - ]}), - Doc3 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc3">>}, - {<<"value">>, 3} - - ]}), - {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]), - couch_db:ensure_full_commit(Db), - couch_db:close(Db). - -create_design_doc(DbName, DDName, ViewName) -> - {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDName}, - {<<"language">>, <<"javascript">>}, - {<<"views">>, {[ - {ViewName, {[ - {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>} - ]}} - ]}} - ]}), - {ok, Rev} = couch_db:update_doc(Db, DDoc, []), - couch_db:ensure_full_commit(Db), - couch_db:close(Db), - Rev. - -wait_set_io_priority(Pid, IOPriority) -> - test_util:wait_value(fun() -> - does_process_set_io_priority(Pid, IOPriority) - end, true). - -does_process_set_io_priority(Pid, IOPriority) -> - PutCallsArgs = find_calls_to_fun(Pid, {erlang, put, 2}), - lists:any(fun([_, Priority]) -> Priority =:= IOPriority end, PutCallsArgs). - -wait_events(MatchSpec) -> - test_util:wait_other_value(fun() -> select(MatchSpec) end, []). - -find_spawned_by_anonymous_fun(ParentPid, Name) -> - AnonymousFuns = select(ets:fun2ms(fun - ({spawned, Pid, _TS, _Name, _Dict, [PPid, {erlang, apply, [Fun, _]}]}) - when is_function(Fun) andalso PPid =:= ParentPid -> {Pid, Fun} - end)), - lists:filtermap(fun({Pid, Fun}) -> - case erlang:fun_info(Fun, name) of - {name, Name} -> {true, Pid}; - _ -> false - end - end, AnonymousFuns). - -find_calls_to_fun(Pid, {Module, Function, Arity}) -> - select(ets:fun2ms(fun - ({call, P, _TS, _Name, _Dict, [{M, F, Args}]}) - when length(Args) =:= Arity - andalso M =:= Module - andalso F =:= Function - andalso P =:= Pid - -> Args - end)). - -wait_spawn_event_for_pid(ParentPid) -> - wait_events(ets:fun2ms(fun - ({spawned, Pid, _TS, _Name, _Dict, [P, _]}) when P =:= ParentPid -> Pid - end)). - -wait_spawn_by_anonymous_fun(ParentPid, Name) -> - test_util:wait_other_value(fun() -> - find_spawned_by_anonymous_fun(ParentPid, Name) - end, []). - -updater_pid(IndexerPid) -> - {links, Links} = process_info(IndexerPid, links), - [Pid] = select_process_by_name_prefix(Links, "couch_index_updater:init/1"), - Pid. - -select_process_by_name_prefix(Pids, Name) -> - lists:filter(fun(Pid) -> - Key = couch_debug:process_name(Pid), - string:str(Key, Name) =:= 1 - end, Pids). - -select(MatchSpec) -> - lists:filtermap(fun(Event) -> - case ets:test_ms(Event, MatchSpec) of - {ok, false} -> false; - {ok, Result} -> {true, Result}; - _ -> false - end - end, tracer_events()). - - -%% ======================== -%% Tracer related functions -%% ------------------------ -tracer_new() -> - ets:new(?MODULE, [public, named_table]), - {ok, _Tracer} = dbg:tracer(process, {fun tracer_collector/2, 0}), - ok. - -tracer_delete() -> - dbg:stop_clear(), - (catch ets:delete(?MODULE)), - ok. - -tracer_record(Pid) -> - {ok, _} = dbg:tp(erlang, put, x), - {ok, _} = dbg:p(Pid, [c, p, sos]), - ok. - -tracer_events() -> - Events = [{Idx, E} || [Idx, E] <- ets:match(?MODULE, {{trace, '$1'}, '$2'})], - {_, Sorted} = lists:unzip(lists:keysort(1, Events)), - Sorted. - -tracer_collector(Msg, Seq) -> - ets:insert(?MODULE, {{trace, Seq}, normalize_trace_msg(Msg)}), - Seq + 1. - -normalize_trace_msg(TraceMsg) -> - case tuple_to_list(TraceMsg) of - [trace_ts, Pid, Type | Info] -> - {TraceInfo, [Timestamp]} = lists:split(length(Info)-1, Info), - {Type, Pid, Timestamp, couch_debug:process_name(Pid), process_info(Pid), TraceInfo}; - [trace, Pid, Type | TraceInfo] -> - {Type, Pid, os:timestamp(), couch_debug:process_name(Pid), process_info(Pid), TraceInfo} - end. diff --git a/src/couch/test/couch_key_tree_prop_tests.erl b/src/couch/test/couch_key_tree_prop_tests.erl deleted file mode 100644 index f8146926a..000000000 --- a/src/couch/test/couch_key_tree_prop_tests.erl +++ /dev/null @@ -1,530 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_key_tree_prop_tests). - --include_lib("triq/include/triq.hrl"). --triq(eunit). - --define(SIZE_REDUCTION, 3). % How much to reduce size with tree depth. --define(MAX_BRANCHES, 4). % Maximum number of branches. --define(RAND_SIZE, 1 bsl 64). - - -% -% Properties -% - - -% Merge random paths from a revtree into itself. Check that no revisions have -% been lost in the process and that result is one of the 3 expected values. -% -prop_revtree_merge_with_subset_of_own_nodes() -> - ?FORALL(Revs, g_revs(), - ?FORALL({RevTree, Branch}, {g_revtree(Revs), g_revtree(Revs, 1)}, - ?IMPLIES(length(Branch) > 0 andalso repeating_revs(levels(RevTree ++ Branch)) == [], - begin - {Merged, Result} = couch_key_tree:merge(RevTree, hd(Branch)), - lists:member(Result, [new_leaf, new_branch, internal_node]) - andalso same_keys(RevTree ++ Branch, Merged) - andalso valid_revtree(Merged) - end - ) - ) - ). - - -% Merge random trees into revtree. -% -prop_revtree_merge_random_nodes() -> - ?FORALL({RevTree, Branch}, {g_revtree(), g_revtree([], 1)}, - ?IMPLIES(length(Branch) > 0, - begin - {Merged, _} = couch_key_tree:merge(RevTree, hd(Branch)), - valid_revtree(Merged) - end - ) - ). - - - -% Merge mix or random and existing revtree paths into revtree -% -prop_revtree_merge_some_existing_some_new() -> - ?FORALL(RevTree, g_revtree(), - ?FORALL(Branch, - begin - KeyList = keylist(RevTree), - Half = lists:sublist(KeyList, length(KeyList) div 2), - g_revtree(Half, 1) - end, - ?IMPLIES(length(Branch) > 0 andalso repeating_revs(levels(RevTree ++ Branch)) == [], - begin - {Merged, _} = couch_key_tree:merge(RevTree, hd(Branch)), - valid_revtree(Merged) - end - ) - ) - ). - - - -% Stem deeper than the current max level. Expect no changes to the revtree -% -prop_no_change_stemming_deeper_than_current_depth() -> - ?FORALL(RevTree, g_revtree(), - begin - StemDepth = depth(RevTree) + 1, - Stemmed = couch_key_tree:stem(RevTree, StemDepth), - StemmedKeys = lists:usort(keylist(Stemmed)), - InputKeys = lists:usort(keylist(RevTree)), - StemmedKeys == InputKeys - end - ). - - -% Stem at a random small depth, make sure that resulting tree has -% unique revisions and the same number or less revisions than input -% -prop_stemming_results_in_same_or_less_total_revs() -> - ?FORALL({RevTree, StemDepth}, {g_revtree(), choose(1, 20)}, - begin - Stemmed = couch_key_tree:stem(RevTree, StemDepth), - OldRealDepth = real_depth(RevTree), - StemmedKeys = keylist(Stemmed), - UniqueStemmedKeys = lists:usort(StemmedKeys), - UniqueInputKeys = lists:usort(keylist(RevTree)), - NewRealDepth = real_depth(Stemmed), - length(StemmedKeys) == length(UniqueStemmedKeys) - andalso length(UniqueStemmedKeys) =< length(UniqueInputKeys) - andalso OldRealDepth >= NewRealDepth - end - ). - - -% Generate a longer path (revtree with no branches) then stem it. -% Always expect it to shrink to stemmed depth. -prop_stem_path_expect_size_to_get_smaller() -> - ?FORALL({RevTree, StemDepth}, - { - ?SIZED(Size, resize(Size * 10, g_revtree([], 1))), - choose(1,5) - }, - ?IMPLIES(real_depth(RevTree) > 5, - begin - Stemmed = couch_key_tree:stem(RevTree, StemDepth), - StemmedKeys = lists:usort(keylist(Stemmed)), - InputKeys = lists:usort(keylist(RevTree)), - length(InputKeys) > length(StemmedKeys) - andalso real_depth(Stemmed) == StemDepth - end - ) - ). - - -% After stemming all leaves are still present -prop_after_stemming_all_leaves_are_present() -> - ?FORALL({RevTree, StemDepth}, - {g_revtree(), choose(1,20)}, - begin - OldRealDepth = real_depth(RevTree), - OldLeaves = leaves(RevTree), - Stemmed = couch_key_tree:stem(RevTree, StemDepth), - NewRealDepth = real_depth(Stemmed), - NewLeaves = leaves(Stemmed), - valid_revtree(Stemmed) - andalso OldRealDepth >= NewRealDepth - andalso OldLeaves == NewLeaves - - end - ). - - -% After stemming paths to root didn't get longer -prop_after_stemming_paths_are_shorter() -> - ?FORALL({StemDepth, RevTree}, {choose(2,10), g_revtree()}, - begin - OldPaths = paths(RevTree), - Stemmed = couch_key_tree:stem(RevTree, StemDepth), - NewPaths = paths(Stemmed), - GrowingPaths = orddict:fold(fun(Rev, Path, Acc) -> - OldPath = orddict:fetch(Rev, OldPaths), - case length(Path) > length(OldPath) of - true -> - [{Rev, Path, OldPath}| Acc]; - false -> - Acc - end - end, [], NewPaths), - valid_revtree(Stemmed) andalso GrowingPaths == [] - end - ). - - -% Check leaf count -prop_leaf_count() -> - ?FORALL(RevTree, g_revtree(), - length(leaves(RevTree)) == couch_key_tree:count_leafs(RevTree) - ). - - -% Check get leafs -prop_get_leafs() -> - ?FORALL(RevTree, g_revtree(), - begin - LeafsFull = couch_key_tree:get_all_leafs(RevTree), - lists:usort([Rev || {_V, {_D, [Rev | _]}} <- LeafsFull]) == leaves(RevTree) - end - ). - - -% -% Generators -% - -% Generate a full rev tree. Most of the forms are just there to set up default -% parameters, _revtree/3 does all heavy lifting. -% - -g_revtree() -> - ?SIZED(Size, g_revtree(Size)). - - -g_revtree(Size) when is_integer(Size) -> - g_revtree(Size, [], ?MAX_BRANCHES); -g_revtree(Revs) when is_list(Revs) -> - ?SIZED(Size, g_revtree(Size, Revs, ?MAX_BRANCHES)). - - -g_revtree(Size, Revs) when is_integer(Size), is_list(Revs) -> - g_revtree(Size, Revs, ?MAX_BRANCHES); -g_revtree(Revs, MaxBranches) when is_list(Revs), is_integer(MaxBranches) -> - ?SIZED(Size, g_revtree(Size, Revs, MaxBranches)). - - -g_revtree(0, _Revs, _MaxBranches) -> - []; -g_revtree(Size, ERevs, MaxBranches) -> - ?LET({Depth, Revs}, {g_stem_depth(Size), g_revs(Size, ERevs)}, - [{Depth, g_treenode(Size, Revs, MaxBranches)}] - ). - - -% Generate a tree node and then recursively generate its children. -% -g_treenode(0, Revs, _) -> - {elements(Revs), x, []}; -g_treenode(Size, Revs, MaxBranches) -> - ?DELAY(?LET(N, int(0, MaxBranches), - begin - [Rev | ChildRevs] = Revs, - {Rev, x, g_nodes(Size div ?SIZE_REDUCTION, N, ChildRevs, MaxBranches)} - end - )). - - -% Generate a list of child nodes. Depending on how many children there are -% the pre-generarated revision list is split into that many sublists. -% -g_nodes(0, _N, _Revs, _MaxBranches) -> - []; -g_nodes(_Size, 0, _Revs, _MaxBranches) -> - []; -g_nodes(Size, ChildCount, Revs, MaxBranches) -> - ?LETSHRINK( - ChildNodes, - begin - ChildRevList = child_revs(ChildCount, Revs, Size, MaxBranches), - [g_treenode(Size, ChildRevs, MaxBranches) || ChildRevs <- ChildRevList] - end, - ordered_nodes(ChildNodes) - ). - - -% Generate each subtree's stem depth -% - - -g_stem_depth(Size) -> - choose(0, expected_height(Size, ?SIZE_REDUCTION) div 2). - - -% Uses the shuffle/1 function to shuffle the input list. Unshuffled list is -% used as the shrink value. -% -g_shuffle(L) when is_list(L) -> - triq_dom:domain(g_shuffle, - fun(Self, _Size) -> {Self, shuffle(L)} end, - fun(Self, _Val) -> {Self, L} end - ). - - -% Wrapper to make a list shuffling generator that doesn't shrink -% -g_shuffle_noshrink(L) when is_list(L) -> - triq_dom:noshrink(g_shuffle(L)). - - -% Generate shuffled sublists up to N items long from a list. -% -g_shuffled_sublists(L, N) -> - ?LET(Shuffled, g_shuffle_noshrink(L), lists:sublist(Shuffled, N)). - - -% Generate revision lists. -% -g_revs() -> - ?SIZED(Size, g_revs(Size)). - - -g_revs(Size) when is_integer(Size) -> - g_revs(Size, []). - - -g_revs(Size, Existing) when is_integer(Size), is_list(Existing) -> - Expected = keys_needed(Size, ?SIZE_REDUCTION, ?MAX_BRANCHES), - Revs = revs(Expected, Existing), - case length(Revs) > Expected of - true -> % have extra, try various sublists - g_shuffled_sublists(Revs, Expected); - false -> - triq_dom:return(Revs) - end. - - -% -% Helper functions -% - - -valid_revtree(RevTree) -> - repeating_revs(levels(RevTree)) == [] andalso children_sorted(RevTree). - - -same_keys(RevTree1, RevTree2) -> - Keys1 = lists:usort(keylist(RevTree1)), - Keys2 = lists:usort(keylist(RevTree2)), - Keys1 == Keys2. - - -all(L) -> - lists:all(fun(E) -> E end, L). - -% Shufle a list of items. Tag each item with a random number then sort -% the list and remove the tags. -% -shuffle(L) -> - Tagged = [{triq_rnd:uniform(), X} || X <- L], - [X || {_, X} <- lists:sort(Tagged)]. - - -% Generate list of relateively unique large random numbers -rand_list(N) when N =< 0 -> - []; -rand_list(N) -> - [triq_rnd:uniform(?RAND_SIZE) || _ <- lists:seq(1, N)]. - - -% Generate a list of revisions to be used as key in revision trees. Expected -% must the number of maximum expected nodes in a revision tree. Existing is an -% optional list revisions which must be included in the result. The output list -% is sorted. -revs(0, _Existing) -> - []; -revs(Expected, Existing) when is_integer(Expected), is_list(Existing) -> - Need = Expected - length(Existing), - lists:usort(lists:append(Existing, rand_list(Need))). - - -% Get the list of all the keys in a revision tree. The input can also be a -% an individual tree (tagged with the depth to virtual root) or a node. -% Yes, this is not tail recursive but the idea is to keep it simple. -% -keylist({_D, Node}) when is_tuple(Node) -> - keylist(Node); -keylist({K, _V, Nodes}) -> - [K | keylist(Nodes)]; -keylist(Nodes) -> - lists:append([keylist(Node) || Node <- Nodes]). - - -% Get the list of leaves from a revision tree. -leaves([]) -> - []; -leaves({_D, Node}) when is_tuple(Node) -> - leaves(Node); -leaves({K, _V, []}) -> - [K]; -leaves({_K, _V, Nodes}) -> - leaves(Nodes); -leaves(Nodes) -> - lists:usort(lists:append([leaves(N) || N <- Nodes])). - - -% Get paths from leaf to root. Result is an orddict of [{LeafRev, [Rev]}] -% -paths([]) -> - orddict:new(); -paths(RevTree) when is_list(RevTree) -> - paths_merge_dicts([paths(T) || T <- RevTree]); -paths({_Depth, Node}) when is_tuple(Node) -> - paths(Node); -paths({K, _V, []}) -> - orddict:store(K, [], orddict:new()); -paths({K, _V, Nodes}) -> - CombinedDict = paths_merge_dicts([paths(N) || N <- Nodes]), - orddict:map(fun(_LeafKey, Path) -> Path ++ [K] end, CombinedDict). - - -paths_merge_dicts(Dicts) -> - lists:foldl(fun(D, AccD) -> - orddict:merge(fun(K, V1, V2) -> - throw({found_duplicates, K, V1, V2}) - end, D, AccD) - end, orddict:new(), Dicts). - - -% Get lists of all the keys at each depth level. Result is an orddict that -% looks like [{depth, [key]}]. The depth used here is the "virtual" depth as -% indicated by the stemmed depth tag that goes with every top level subtree. -% -levels([]) -> - orddict:new(); -levels(RevTree) when is_list(RevTree) -> - lists:foldl(fun(T, Dict) -> levels(T, Dict) end, orddict:new(), RevTree). - - -levels({Depth, Node}, Dict) when is_tuple(Node) -> - levels(Node, Depth, Dict). - - -levels({K, _V, Nodes}, Depth, Dict) -> - Dict1 = case orddict:is_key(Depth, Dict) of - true -> orddict:append(Depth, K, Dict); - false -> orddict:store(Depth, [K], Dict) - end, - levels(Nodes, Depth + 1, Dict1); -levels(Nodes, Depth, Dict) -> - lists:foldl(fun(Node, AccDict) -> - levels(Node, Depth, AccDict) - end, Dict, Nodes). - - -% Using the output of leaves/1 as input return any repeating revisions if -% there are any at a particular level. Levels which have not revisions are -% not returned. -% -repeating_revs(Dict) -> - orddict:filter(fun(_Depth, Revs) -> - length(lists:usort(Revs)) =/= length(Revs) - end, Dict). - - -% Check that children of all nodes are sorted -children_sorted([]) -> - true; -children_sorted(Nodes) when is_list(Nodes) -> - all([children_sorted(N) || N <- Nodes]); -children_sorted({_D, Node}) when is_tuple(Node) -> - children_sorted(Node); -children_sorted({_K, _V, Nodes}) -> - children_sorted(Nodes). - - -% Get the maximum depth of a revtree. The depth is "virtual" as it takes into -% account the distance to the now stemmed root node as indicated by the top -% level subtrees. -% -depth([]) -> - 0; -depth(RevTree) when is_list(RevTree) -> - lists:max([depth(T) || T <- RevTree]); -depth({Depth, Node}) when is_tuple(Node) -> - depth(Node, Depth - 1). - - -depth({_K, _V, Nodes}, Depth) -> - depth(Nodes, Depth + 1); -depth([], Depth) -> - Depth; -depth(Nodes, Depth) -> - lists:max([depth(Node, Depth) || Node <- Nodes]). - - -% Get the "real" tree depth, not the virtual one. As revtrees gets stemmed they -% will keep their virtual depth but the actual number of nodes in the tree -% could be reduced. -% -real_depth([]) -> - 0; -real_depth(RevTree) when is_list(RevTree) -> - lists:max([real_depth(T) || T <- RevTree]); -real_depth({_Depth, Node}) when is_tuple(Node) -> - depth(Node, 0). % Note from here on use the depth/3 function - - -% Return an ordered list of revtree nodes. When sorting only immediate keys -% (revisions) are looked at and comparison doesn't descent into the treee. -% -ordered_nodes(Nodes) -> - lists:sort(fun({K1, _, _}, {K2, _, _}) -> K1 =< K2 end, Nodes). - - -% Calculate a maximum number of rev tree nodes needed for a tree of a given -% height and branchiness. Height is derived from Size and LevelReductionFactor, -% that is how big the sample should be and quickly the size parameter would -% shrink on each level. -% -keys_needed(0, _, _) -> - 0; -keys_needed(Size, LevelReductionFactor, 1) -> - expected_height(Size, LevelReductionFactor); -keys_needed(Size, LevelReductionFactor, Branches) -> - Height = expected_height(Size, LevelReductionFactor), - trunc(math:pow(Branches, Height + 1)) + 1. - - -% Calculate expected tree height for a given sample size and branchiness. -% At each step the size is divided by the reduction factor. -expected_height(Size, LevelReductionFactor) -> - trunc(log(LevelReductionFactor, Size)) + 1. - - -log(B, X) -> - math:log(X) / math:log(B). - - -% Distribute items in a list into roughly equal chunks of a given size. -% -distribute(_ChunkSize, []) -> - []; -distribute(ChunkSize, L) when ChunkSize >= length(L) -> - [L]; -distribute(ChunkSize, L) -> - {L1, L2} = lists:split(ChunkSize, L), - [L1 | distribute(ChunkSize, L2)]. - - -% Split a single (parent) revision list into chunks (sub-lists), one for each -% child. Also, for safety, double check that at this point in the process the -% list of revisions is sufficiently large. If it isn't something went wrong and -% a specific exception is thrown ({not_enough_revisions, Got, Needed}). -% -child_revs(ChildCount, Revs, Size, MaxBranches) -> - NeedKeys = keys_needed(Size, ?SIZE_REDUCTION, MaxBranches), - case length(Revs) >= NeedKeys of - true -> - ChunkSize = trunc(length(Revs) / ChildCount) + 1, - distribute(ChunkSize, Revs); - false -> - throw({not_enough_revisions, length(Revs), NeedKeys}) - end. diff --git a/src/couch/test/couch_key_tree_tests.erl b/src/couch/test/couch_key_tree_tests.erl deleted file mode 100644 index 5d9cc8372..000000000 --- a/src/couch/test/couch_key_tree_tests.erl +++ /dev/null @@ -1,413 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_key_tree_tests). - --include_lib("couch/include/couch_eunit.hrl"). - --define(DEPTH, 10). - - -key_tree_merge_test_()-> - { - "Key tree merge", - [ - should_merge_with_empty_tree(), - should_merge_reflexive(), - should_merge_prefix_of_a_tree_with_tree(), - should_produce_conflict_on_merge_with_unrelated_branch(), - should_merge_reflexive_for_child_nodes(), - should_merge_tree_to_itself(), - should_merge_tree_of_odd_length(), - should_merge_tree_with_stem(), - should_merge_with_stem_at_deeper_level(), - should_merge_with_stem_at_deeper_level_with_deeper_paths(), - should_merge_single_tree_with_deeper_stem(), - should_merge_tree_with_large_stem(), - should_merge_stems(), - should_create_conflicts_on_merge(), - should_create_no_conflicts_on_merge(), - should_ignore_conflicting_branch() - ] - }. - -key_tree_missing_leaves_test_()-> - { - "Missing tree leaves", - [ - should_not_find_missing_leaves(), - should_find_missing_leaves() - ] - }. - -key_tree_remove_leaves_test_()-> - { - "Remove tree leaves", - [ - should_have_no_effect_on_removing_no_leaves(), - should_have_no_effect_on_removing_non_existant_branch(), - should_remove_leaf(), - should_produce_empty_tree_on_removing_all_leaves(), - should_have_no_effect_on_removing_non_existant_node(), - should_produce_empty_tree_on_removing_last_leaf() - ] - }. - -key_tree_get_leaves_test_()-> - { - "Leaves retrieving", - [ - should_extract_subtree(), - should_extract_subsubtree(), - should_gather_non_existant_leaf(), - should_gather_leaf(), - shoul_gather_multiple_leaves(), - should_gather_single_leaf_for_multiple_revs(), - should_gather_multiple_for_multiple_revs(), - should_retrieve_full_key_path(), - should_retrieve_full_key_path_for_node(), - should_retrieve_leaves_with_parent_node(), - should_retrieve_all_leaves() - ] - }. - -key_tree_leaf_counting_test_()-> - { - "Leaf counting", - [ - should_have_no_leaves_for_empty_tree(), - should_have_single_leaf_for_tree_with_single_node(), - should_have_two_leaves_for_tree_with_chindler_siblings(), - should_not_affect_on_leaf_counting_for_stemmed_tree() - ] - }. - -key_tree_stemming_test_()-> - { - "Stemming", - [ - should_have_no_effect_for_stemming_more_levels_than_exists(), - should_return_one_deepest_node(), - should_return_two_deepest_nodes() - ] - }. - - -should_merge_with_empty_tree()-> - One = {1, {"1","foo",[]}}, - ?_assertEqual({[One], new_leaf}, - merge_and_stem([], One)). - -should_merge_reflexive()-> - One = {1, {"1","foo",[]}}, - ?_assertEqual({[One], internal_node}, - merge_and_stem([One], One)). - -should_merge_prefix_of_a_tree_with_tree()-> - One = {1, {"1","foo",[]}}, - TwoSibs = [{1, {"1","foo",[]}}, - {1, {"2","foo",[]}}], - ?_assertEqual({TwoSibs, internal_node}, - merge_and_stem(TwoSibs, One)). - -should_produce_conflict_on_merge_with_unrelated_branch()-> - TwoSibs = [{1, {"1","foo",[]}}, - {1, {"2","foo",[]}}], - Three = {1, {"3","foo",[]}}, - ThreeSibs = [{1, {"1","foo",[]}}, - {1, {"2","foo",[]}}, - {1, {"3","foo",[]}}], - ?_assertEqual({ThreeSibs, new_branch}, - merge_and_stem(TwoSibs, Three)). - -should_merge_reflexive_for_child_nodes()-> - TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, - ?_assertEqual({[TwoChild], internal_node}, - merge_and_stem([TwoChild], TwoChild)). - -should_merge_tree_to_itself()-> - TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []}, - {"1b", "bar", []}]}}, - Leafs = couch_key_tree:get_all_leafs([TwoChildSibs]), - Paths = lists:map(fun leaf_to_path/1, Leafs), - FinalTree = lists:foldl(fun(Path, TreeAcc) -> - {NewTree, internal_node} = merge_and_stem(TreeAcc, Path), - NewTree - end, [TwoChildSibs], Paths), - ?_assertEqual([TwoChildSibs], FinalTree). - -leaf_to_path({Value, {Start, Keys}}) -> - [Branch] = to_branch(Value, lists:reverse(Keys)), - {Start - length(Keys) + 1, Branch}. - -to_branch(Value, [Key]) -> - [{Key, Value, []}]; -to_branch(Value, [Key | RestKeys]) -> - [{Key, [], to_branch(Value, RestKeys)}]. - - -should_merge_tree_of_odd_length()-> - TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, - TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []}, - {"1b", "bar", []}]}}, - TwoChildPlusSibs = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}, - {"1b", "bar", []}]}}, - ?_assertEqual({[TwoChildPlusSibs], new_leaf}, - merge_and_stem([TwoChildSibs], TwoChild)). - -should_merge_tree_with_stem()-> - Stemmed = {2, {"1a", "bar", []}}, - TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []}, - {"1b", "bar", []}]}}, - - ?_assertEqual({[TwoChildSibs], internal_node}, - merge_and_stem([TwoChildSibs], Stemmed)). - -should_merge_with_stem_at_deeper_level()-> - Stemmed = {3, {"1bb", "boo", []}}, - TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []}, - {"1b", "bar", [{"1bb", "boo", []}]}]}}, - ?_assertEqual({[TwoChildSibs], internal_node}, - merge_and_stem([TwoChildSibs], Stemmed)). - -should_merge_with_stem_at_deeper_level_with_deeper_paths()-> - Stemmed = {3, {"1bb", "boo", []}}, - StemmedTwoChildSibs = [{2,{"1a", "bar", []}}, - {2,{"1b", "bar", [{"1bb", "boo", []}]}}], - ?_assertEqual({StemmedTwoChildSibs, internal_node}, - merge_and_stem(StemmedTwoChildSibs, Stemmed)). - -should_merge_single_tree_with_deeper_stem()-> - Stemmed = {3, {"1aa", "bar", []}}, - TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, - ?_assertEqual({[TwoChild], internal_node}, - merge_and_stem([TwoChild], Stemmed)). - -should_merge_tree_with_large_stem()-> - Stemmed = {2, {"1a", "bar", [{"1aa", "bar", []}]}}, - TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, - ?_assertEqual({[TwoChild], internal_node}, - merge_and_stem([TwoChild], Stemmed)). - -should_merge_stems()-> - StemmedA = {2, {"1a", "bar", [{"1aa", "bar", []}]}}, - StemmedB = {3, {"1aa", "bar", []}}, - ?_assertEqual({[StemmedA], internal_node}, - merge_and_stem([StemmedA], StemmedB)). - -should_create_conflicts_on_merge()-> - OneChild = {1, {"1","foo",[{"1a", "bar", []}]}}, - Stemmed = {3, {"1aa", "bar", []}}, - ?_assertEqual({[OneChild, Stemmed], new_branch}, - merge_and_stem([OneChild], Stemmed)). - -should_create_no_conflicts_on_merge()-> - OneChild = {1, {"1","foo",[{"1a", "bar", []}]}}, - Stemmed = {3, {"1aa", "bar", []}}, - TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, - ?_assertEqual({[TwoChild], new_leaf}, - merge_and_stem([OneChild, Stemmed], TwoChild)). - -should_ignore_conflicting_branch()-> - %% this test is based on couch-902-test-case2.py - %% foo has conflicts from replication at depth two - %% foo3 is the current value - Foo = {1, {"foo", - "val1", - [{"foo2","val2",[]}, - {"foo3", "val3", []} - ]}}, - %% foo now has an attachment added, which leads to foo4 and val4 - %% off foo3 - Bar = {1, {"foo", - [], - [{"foo3", - [], - [{"foo4","val4",[]} - ]}]}}, - %% this is what the merge returns - %% note that it ignore the conflicting branch as there's no match - FooBar = {1, {"foo", - "val1", - [{"foo2","val2",[]}, - {"foo3", "val3", [{"foo4","val4",[]}]} - ]}}, - { - "COUCHDB-902", - ?_assertEqual({[FooBar], new_leaf}, - merge_and_stem([Foo], Bar)) - }. - -should_not_find_missing_leaves()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual([], - couch_key_tree:find_missing(TwoChildSibs, - [{0,"1"}, {1,"1a"}])). - -should_find_missing_leaves()-> - Stemmed1 = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}], - Stemmed2 = [{2, {"1aa", "bar", []}}], - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - [ - ?_assertEqual( - [{0, "10"}, {100, "x"}], - couch_key_tree:find_missing( - TwoChildSibs, - [{0,"1"}, {0, "10"}, {1,"1a"}, {100, "x"}])), - ?_assertEqual( - [{0, "1"}, {100, "x"}], - couch_key_tree:find_missing( - Stemmed1, - [{0,"1"}, {1,"1a"}, {100, "x"}])), - ?_assertEqual( - [{0, "1"}, {1,"1a"}, {100, "x"}], - couch_key_tree:find_missing( - Stemmed2, - [{0,"1"}, {1,"1a"}, {100, "x"}])) - ]. - -should_have_no_effect_on_removing_no_leaves()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({TwoChildSibs, []}, - couch_key_tree:remove_leafs(TwoChildSibs, - [])). - -should_have_no_effect_on_removing_non_existant_branch()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({TwoChildSibs, []}, - couch_key_tree:remove_leafs(TwoChildSibs, - [{0, "1"}])). - -should_remove_leaf()-> - OneChild = [{0, {"1","foo",[{"1a", "bar", []}]}}], - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({OneChild, [{1, "1b"}]}, - couch_key_tree:remove_leafs(TwoChildSibs, - [{1, "1b"}])). - -should_produce_empty_tree_on_removing_all_leaves()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({[], [{1, "1b"}, {1, "1a"}]}, - couch_key_tree:remove_leafs(TwoChildSibs, - [{1, "1b"}, {1, "1a"}])). - -should_have_no_effect_on_removing_non_existant_node()-> - Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}], - ?_assertEqual({Stemmed, []}, - couch_key_tree:remove_leafs(Stemmed, - [{1, "1a"}])). - -should_produce_empty_tree_on_removing_last_leaf()-> - Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}], - ?_assertEqual({[], [{2, "1aa"}]}, - couch_key_tree:remove_leafs(Stemmed, - [{2, "1aa"}])). - -should_extract_subtree()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({[{"foo", {0, ["1"]}}],[]}, - couch_key_tree:get(TwoChildSibs, [{0, "1"}])). - -should_extract_subsubtree()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({[{"bar", {1, ["1a", "1"]}}],[]}, - couch_key_tree:get(TwoChildSibs, [{1, "1a"}])). - -should_gather_non_existant_leaf()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({[],[{0, "x"}]}, - couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "x"}])). - -should_gather_leaf()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({[{"bar", {1, ["1a","1"]}}],[]}, - couch_key_tree:get_key_leafs(TwoChildSibs, [{1, "1a"}])). - -shoul_gather_multiple_leaves()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({[{"bar", {1, ["1a","1"]}},{"bar",{1, ["1b","1"]}}],[]}, - couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "1"}])). - -should_gather_single_leaf_for_multiple_revs() -> - OneChild = [{0, {"1","foo",[{"1a", "bar", []}]}}], - ToFind = [{0, "1"}, {1, "1a"}], - ?_assertEqual({[{"bar", {1, ["1a", "1"]}}],[]}, - couch_key_tree:get_key_leafs(OneChild, ToFind)). - -should_gather_multiple_for_multiple_revs() -> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ToFind = [{0, "1"}, {1, "1a"}], - ?_assertEqual({[{"bar", {1, ["1a","1"]}},{"bar",{1, ["1b","1"]}}],[]}, - couch_key_tree:get_key_leafs(TwoChildSibs, ToFind)). - -should_retrieve_full_key_path()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({[{0,[{"1", "foo"}]}],[]}, - couch_key_tree:get_full_key_paths(TwoChildSibs, [{0, "1"}])). - -should_retrieve_full_key_path_for_node()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual({[{1,[{"1a", "bar"},{"1", "foo"}]}],[]}, - couch_key_tree:get_full_key_paths(TwoChildSibs, [{1, "1a"}])). - -should_retrieve_leaves_with_parent_node()-> - Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}], - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - [ - ?_assertEqual([{2, [{"1aa", "bar"},{"1a", "bar"}]}], - couch_key_tree:get_all_leafs_full(Stemmed)), - ?_assertEqual([{1, [{"1a", "bar"},{"1", "foo"}]}, - {1, [{"1b", "bar"},{"1", "foo"}]}], - couch_key_tree:get_all_leafs_full(TwoChildSibs)) - ]. - -should_retrieve_all_leaves()-> - Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}], - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - [ - ?_assertEqual([{"bar", {2, ["1aa","1a"]}}], - couch_key_tree:get_all_leafs(Stemmed)), - ?_assertEqual([{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b","1"]}}], - couch_key_tree:get_all_leafs(TwoChildSibs)) - ]. - -should_have_no_leaves_for_empty_tree()-> - ?_assertEqual(0, couch_key_tree:count_leafs([])). - -should_have_single_leaf_for_tree_with_single_node()-> - ?_assertEqual(1, couch_key_tree:count_leafs([{0, {"1","foo",[]}}])). - -should_have_two_leaves_for_tree_with_chindler_siblings()-> - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], - ?_assertEqual(2, couch_key_tree:count_leafs(TwoChildSibs)). - -should_not_affect_on_leaf_counting_for_stemmed_tree()-> - ?_assertEqual(1, couch_key_tree:count_leafs([{2, {"1bb", "boo", []}}])). - -should_have_no_effect_for_stemming_more_levels_than_exists()-> - TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}], - ?_assertEqual(TwoChild, couch_key_tree:stem(TwoChild, 3)). - -should_return_one_deepest_node()-> - TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}], - Stemmed = [{2, {"1aa", "bar", []}}], - ?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 1)). - -should_return_two_deepest_nodes()-> - TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}], - Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}], - ?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 2)). - - -merge_and_stem(RevTree, Tree) -> - {Merged, Result} = couch_key_tree:merge(RevTree, Tree), - {couch_key_tree:stem(Merged, ?DEPTH), Result}. diff --git a/src/couch/test/couch_passwords_tests.erl b/src/couch/test/couch_passwords_tests.erl deleted file mode 100644 index 88de8530f..000000000 --- a/src/couch/test/couch_passwords_tests.erl +++ /dev/null @@ -1,54 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_passwords_tests). - --include_lib("couch/include/couch_eunit.hrl"). - - -pbkdf2_test_()-> - {"PBKDF2", - [ - {"Iterations: 1, length: 20", - ?_assertEqual( - {ok, <<"0c60c80f961f0e71f3a9b524af6012062fe037a6">>}, - couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 1, 20))}, - - {"Iterations: 2, length: 20", - ?_assertEqual( - {ok, <<"ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957">>}, - couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 2, 20))}, - - {"Iterations: 4096, length: 20", - ?_assertEqual( - {ok, <<"4b007901b765489abead49d926f721d065a429c1">>}, - couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 4096, 20))}, - - {"Iterations: 4096, length: 25", - ?_assertEqual( - {ok, <<"3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038">>}, - couch_passwords:pbkdf2(<<"passwordPASSWORDpassword">>, - <<"saltSALTsaltSALTsaltSALTsaltSALTsalt">>, - 4096, 25))}, - {"Null byte", - ?_assertEqual( - {ok, <<"56fa6aa75548099dcc37d7f03425e0c3">>}, - couch_passwords:pbkdf2(<<"pass\0word">>, - <<"sa\0lt">>, - 4096, 16))}, - - {timeout, 600, %% this may runs too long on slow hosts - {"Iterations: 16777216 - this may take some time", - ?_assertEqual( - {ok, <<"eefe3d61cd4da4e4e9945b3d6ba2158c2634e984">>}, - couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 16777216, 20) - )}}]}. diff --git a/src/couch/test/couch_query_servers_tests.erl b/src/couch/test/couch_query_servers_tests.erl deleted file mode 100644 index f8df896c4..000000000 --- a/src/couch/test/couch_query_servers_tests.erl +++ /dev/null @@ -1,95 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_query_servers_tests). - --include_lib("couch/include/couch_eunit.hrl"). - - -setup() -> - meck:new([config, couch_log]). - - -teardown(_) -> - meck:unload(). - - -sum_overflow_test_() -> - { - "Test overflow detection in the _sum reduce function", - { - setup, - fun setup/0, - fun teardown/1, - [ - fun should_return_error_on_overflow/0, - fun should_return_object_on_log/0, - fun should_return_object_on_false/0 - ] - } - }. - - -should_return_error_on_overflow() -> - meck:reset([config, couch_log]), - meck:expect( - config, get, ["query_server_config", "reduce_limit", "true"], - "true" - ), - meck:expect(couch_log, error, ['_', '_'], ok), - KVs = gen_sum_kvs(), - {ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs), - ?assertMatch({[{<<"error">>, <<"builtin_reduce_error">>} | _]}, Result), - ?assert(meck:called(config, get, '_')), - ?assert(meck:called(couch_log, error, '_')). - - -should_return_object_on_log() -> - meck:reset([config, couch_log]), - meck:expect( - config, get, ["query_server_config", "reduce_limit", "true"], - "log" - ), - meck:expect(couch_log, error, ['_', '_'], ok), - KVs = gen_sum_kvs(), - {ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs), - ?assertMatch({[_ | _]}, Result), - Keys = [K || {K, _} <- element(1, Result)], - ?assert(not lists:member(<<"error">>, Keys)), - ?assert(meck:called(config, get, '_')), - ?assert(meck:called(couch_log, error, '_')). - - -should_return_object_on_false() -> - meck:reset([config, couch_log]), - meck:expect( - config, get, ["query_server_config", "reduce_limit", "true"], - "false" - ), - meck:expect(couch_log, error, ['_', '_'], ok), - KVs = gen_sum_kvs(), - {ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs), - ?assertMatch({[_ | _]}, Result), - Keys = [K || {K, _} <- element(1, Result)], - ?assert(not lists:member(<<"error">>, Keys)), - ?assert(meck:called(config, get, '_')), - ?assertNot(meck:called(couch_log, error, '_')). - - -gen_sum_kvs() -> - lists:map(fun(I) -> - Props = lists:map(fun(_) -> - K = couch_util:encodeBase64Url(crypto:strong_rand_bytes(16)), - {K, 1} - end, lists:seq(1, 20)), - [I, {Props}] - end, lists:seq(1, 10)). diff --git a/src/couch/test/couch_server_tests.erl b/src/couch/test/couch_server_tests.erl deleted file mode 100644 index 530b7efd0..000000000 --- a/src/couch/test/couch_server_tests.erl +++ /dev/null @@ -1,310 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_server_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include("../src/couch_server_int.hrl"). - -start() -> - Ctx = test_util:start_couch(), - config:set("log", "include_sasl", "false", false), - Ctx. - -setup() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, []), - Db. - -setup(rename) -> - config:set("couchdb", "enable_database_recovery", "true", false), - setup(); -setup(_) -> - setup(). - -teardown(Db) -> - FilePath = couch_db:get_filepath(Db), - (catch couch_db:close(Db)), - (catch file:delete(FilePath)). - -teardown(rename, Db) -> - config:set("couchdb", "enable_database_recovery", "false", false), - teardown(Db); -teardown(_, Db) -> - teardown(Db). - - -delete_db_test_() -> - { - "Test for proper deletion of db file", - { - setup, - fun start/0, fun test_util:stop/1, - [ - make_test_case(rename, [fun should_rename_on_delete/2]), - make_test_case(delete, [fun should_delete/2]) - ] - } - }. - -make_test_case(Mod, Funs) -> - { - lists:flatten(io_lib:format("~s", [Mod])), - {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]} - }. - -should_rename_on_delete(_, Db) -> - DbName = couch_db:name(Db), - Origin = couch_db:get_filepath(Db), - ?_test(begin - ?assert(filelib:is_regular(Origin)), - ?assertMatch(ok, couch_server:delete(DbName, [])), - ?assertNot(filelib:is_regular(Origin)), - DeletedFiles = deleted_files(Origin), - ?assertMatch([_], DeletedFiles), - [Renamed] = DeletedFiles, - ?assertEqual( - filename:extension(Origin), filename:extension(Renamed)), - ?assert(filelib:is_regular(Renamed)) - end). - -should_delete(_, Db) -> - DbName = couch_db:name(Db), - Origin = couch_db:get_filepath(Db), - ?_test(begin - ?assert(filelib:is_regular(Origin)), - ?assertMatch(ok, couch_server:delete(DbName, [])), - ?assertNot(filelib:is_regular(Origin)), - ?assertMatch([], deleted_files(Origin)) - end). - -deleted_files(ViewFile) -> - filelib:wildcard(filename:rootname(ViewFile) ++ "*.deleted.*"). - - -bad_engine_option_test_() -> - { - setup, - fun start/0, - fun test_util:stop/1, - [ - fun t_bad_engine_option/0 - ] - }. - - -t_bad_engine_option() -> - Resp = couch_server:create(?tempdb(), [{engine, <<"cowabunga!">>}]), - ?assertEqual(Resp, {error, {invalid_engine_extension, <<"cowabunga!">>}}). - - -get_engine_path_test_() -> - { - setup, - fun start/0, fun test_util:stop/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_return_engine_path/1, - fun should_return_invalid_engine_error/1 - ] - } - }. - - -should_return_engine_path(Db) -> - DbName = couch_db:name(Db), - Engine = couch_db_engine:get_engine(Db), - Resp = couch_server:get_engine_path(DbName, Engine), - FilePath = couch_db:get_filepath(Db), - ?_assertMatch({ok, FilePath}, Resp). - - -should_return_invalid_engine_error(Db) -> - DbName = couch_db:name(Db), - Engine = fake_engine, - Resp = couch_server:get_engine_path(DbName, Engine), - ?_assertMatch({error, {invalid_engine, Engine}}, Resp). - - -interleaved_requests_test_() -> - { - setup, - fun start_interleaved/0, - fun stop_interleaved/1, - fun make_interleaved_requests/1 - }. - - -start_interleaved() -> - TestDbName = ?tempdb(), - meck:new(couch_db, [passthrough]), - meck:expect(couch_db, start_link, fun(Engine, DbName, Filename, Options) -> - case DbName of - TestDbName -> - receive - go -> ok - end, - Res = meck:passthrough([Engine, DbName, Filename, Options]), - % We're unlinking and sending a delayed - % EXIT signal so that we can mimic a specific - % message order in couch_server. On a test machine - % this is a big race condition which affects the - % ability to induce the bug. - case Res of - {ok, Db} -> - DbPid = couch_db:get_pid(Db), - unlink(DbPid), - Msg = {'EXIT', DbPid, killed}, - erlang:send_after(2000, whereis(couch_server), Msg); - _ -> - ok - end, - Res; - _ -> - meck:passthrough([Engine, DbName, Filename, Options]) - end - end), - {test_util:start_couch(), TestDbName}. - - -stop_interleaved({Ctx, TestDbName}) -> - couch_server:delete(TestDbName, [?ADMIN_CTX]), - meck:unload(), - test_util:stop_couch(Ctx). - - -make_interleaved_requests({_, TestDbName}) -> - [ - fun() -> t_interleaved_create_delete_open(TestDbName) end - ]. - - -t_interleaved_create_delete_open(DbName) -> - {CrtRef, DelRef, OpenRef} = {make_ref(), make_ref(), make_ref()}, - CrtMsg = {'$gen_call', {self(), CrtRef}, {create, DbName, [?ADMIN_CTX]}}, - DelMsg = {'$gen_call', {self(), DelRef}, {delete, DbName, [?ADMIN_CTX]}}, - OpenMsg = {'$gen_call', {self(), OpenRef}, {open, DbName, [?ADMIN_CTX]}}, - - % Get the current couch_server pid so we're sure - % to not end up messaging two different pids - CouchServer = whereis(couch_server), - - % Start our first instance that will succeed in - % an invalid state. Notice that the opener pid - % spawned by couch_server:open_async/5 will halt - % in our meck expect function waiting for a message. - % - % We're using raw message passing here so that we don't - % have to coordinate multiple processes for this test. - CouchServer ! CrtMsg, - {ok, Opener} = get_opener_pid(DbName), - - % We have to suspend couch_server so that we can enqueue - % our next requests and let the opener finish processing. - erlang:suspend_process(CouchServer), - - % Since couch_server is suspend, this delete request won't - % be processed until after the opener has sent its - % successful open response via gen_server:call/3 - CouchServer ! DelMsg, - - % This open request will be in the queue after the - % delete request but before the gen_server:call/3 - % message which will establish the mixed up state - % in the couch_dbs ets table - CouchServer ! OpenMsg, - - % First release the opener pid so it can continue - % working while we tweak meck - Opener ! go, - - % Replace our expect call to meck so that the OpenMsg - % isn't blocked on the receive - meck:expect(couch_db, start_link, fun(Engine, DbName1, Filename, Options) -> - meck:passthrough([Engine, DbName1, Filename, Options]) - end), - - % Wait for the '$gen_call' message from OpenerPid to arrive - % in couch_server's mailbox - ok = wait_for_open_async_result(CouchServer, Opener), - - % Now monitor and resume the couch_server and assert that - % couch_server does not crash while processing OpenMsg - CSRef = erlang:monitor(process, CouchServer), - erlang:resume_process(CouchServer), - check_monitor_not_triggered(CSRef), - - % The create response is expected to return not_found - % due to the delete request canceling the async opener - % pid and sending not_found to all waiters unconditionally - ?assertEqual({CrtRef, not_found}, get_next_message()), - - % Our delete request was processed normally - ?assertEqual({DelRef, ok}, get_next_message()), - - % The db was deleted thus it should be not found - % when we try and open it. - ?assertMatch({OpenRef, {not_found, no_db_file}}, get_next_message()), - - % And finally assert that couch_server is still - % alive. - ?assert(is_process_alive(CouchServer)), - check_monitor_not_triggered(CSRef). - - -get_opener_pid(DbName) -> - WaitFun = fun() -> - case ets:lookup(couch_dbs, DbName) of - [#entry{pid = Pid}] -> - {ok, Pid}; - [] -> - wait - end - end, - test_util:wait(WaitFun). - - -wait_for_open_async_result(CouchServer, Opener) -> - WaitFun = fun() -> - {_, Messages} = erlang:process_info(CouchServer, messages), - Found = lists:foldl(fun(Msg, Acc) -> - case Msg of - {'$gen_call', {Opener, _}, {open_result, _, _, {ok, _}}} -> - true; - _ -> - Acc - end - end, false, Messages), - if Found -> ok; true -> wait end - end, - test_util:wait(WaitFun). - - -check_monitor_not_triggered(Ref) -> - receive - {'DOWN', Ref, _, _, Reason0} -> - erlang:error({monitor_triggered, Reason0}) - after 100 -> - ok - end. - - -get_next_message() -> - receive - Msg -> - Msg - after 5000 -> - erlang:error(timeout) - end. diff --git a/src/couch/test/couch_stream_tests.erl b/src/couch/test/couch_stream_tests.erl deleted file mode 100644 index a7fedf0af..000000000 --- a/src/couch/test/couch_stream_tests.erl +++ /dev/null @@ -1,124 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_stream_tests). - --include_lib("couch/include/couch_eunit.hrl"). - --define(ENGINE(FdVar), {couch_bt_engine_stream, {FdVar, []}}). - -setup() -> - {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]), - {ok, Stream} = couch_stream:open(?ENGINE(Fd), []), - {Fd, Stream}. - -teardown({Fd, _}) -> - ok = couch_file:close(Fd). - - -stream_test_() -> - { - "CouchDB stream tests", - { - setup, - fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_write/1, - fun should_write_consecutive/1, - fun should_write_empty_binary/1, - fun should_return_file_pointers_on_close/1, - fun should_return_stream_size_on_close/1, - fun should_return_valid_pointers/1, - fun should_recall_last_pointer_position/1, - fun should_stream_more_with_4K_chunk_size/1, - fun should_stop_on_normal_exit_of_stream_opener/1 - ] - } - } - }. - - -should_write({_, Stream}) -> - ?_assertEqual(ok, couch_stream:write(Stream, <<"food">>)). - -should_write_consecutive({_, Stream}) -> - couch_stream:write(Stream, <<"food">>), - ?_assertEqual(ok, couch_stream:write(Stream, <<"foob">>)). - -should_write_empty_binary({_, Stream}) -> - ?_assertEqual(ok, couch_stream:write(Stream, <<>>)). - -should_return_file_pointers_on_close({_, Stream}) -> - couch_stream:write(Stream, <<"foodfoob">>), - {NewEngine, _, _, _, _} = couch_stream:close(Stream), - {ok, Ptrs} = couch_stream:to_disk_term(NewEngine), - ?_assertEqual([{0, 8}], Ptrs). - -should_return_stream_size_on_close({_, Stream}) -> - couch_stream:write(Stream, <<"foodfoob">>), - {_, Length, _, _, _} = couch_stream:close(Stream), - ?_assertEqual(8, Length). - -should_return_valid_pointers({_Fd, Stream}) -> - couch_stream:write(Stream, <<"foodfoob">>), - {NewEngine, _, _, _, _} = couch_stream:close(Stream), - ?_assertEqual(<<"foodfoob">>, read_all(NewEngine)). - -should_recall_last_pointer_position({Fd, Stream}) -> - couch_stream:write(Stream, <<"foodfoob">>), - {_, _, _, _, _} = couch_stream:close(Stream), - {ok, ExpPtr} = couch_file:bytes(Fd), - {ok, Stream2} = couch_stream:open(?ENGINE(Fd)), - ZeroBits = <<0:(8 * 10)>>, - OneBits = <<1:(8 * 10)>>, - ok = couch_stream:write(Stream2, OneBits), - ok = couch_stream:write(Stream2, ZeroBits), - {NewEngine, 20, _, _, _} = couch_stream:close(Stream2), - {ok, Ptrs} = couch_stream:to_disk_term(NewEngine), - [{ExpPtr, 20}] = Ptrs, - AllBits = iolist_to_binary([OneBits, ZeroBits]), - ?_assertEqual(AllBits, read_all(NewEngine)). - -should_stream_more_with_4K_chunk_size({Fd, _}) -> - {ok, Stream} = couch_stream:open(?ENGINE(Fd), [{buffer_size, 4096}]), - lists:foldl( - fun(_, Acc) -> - Data = <<"a1b2c">>, - couch_stream:write(Stream, Data), - [Data | Acc] - end, [], lists:seq(1, 1024)), - {NewEngine, Length, _, _, _} = couch_stream:close(Stream), - {ok, Ptrs} = couch_stream:to_disk_term(NewEngine), - ?_assertMatch({[{0, 4100}, {4106, 1020}], 5120}, {Ptrs, Length}). - -should_stop_on_normal_exit_of_stream_opener({Fd, _}) -> - RunnerPid = self(), - OpenerPid = spawn( - fun() -> - {ok, StreamPid} = couch_stream:open(?ENGINE(Fd)), - RunnerPid ! {pid, StreamPid} - end), - StreamPid = receive - {pid, StreamPid0} -> StreamPid0 - end, - % Confirm the validity of the test by verifying the stream opener has died - ?assertNot(is_process_alive(OpenerPid)), - % Verify the stream itself has also died - ?_assertNot(is_process_alive(StreamPid)). - - -read_all(Engine) -> - Data = couch_stream:foldl(Engine, fun(Bin, Acc) -> [Bin, Acc] end, []), - iolist_to_binary(Data). diff --git a/src/couch/test/couch_task_status_tests.erl b/src/couch/test/couch_task_status_tests.erl deleted file mode 100644 index 0ec03563b..000000000 --- a/src/couch/test/couch_task_status_tests.erl +++ /dev/null @@ -1,233 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_task_status_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 1000). - - -setup() -> - Ctx = test_util:start(?MODULE, [couch_log], [{dont_mock, [config]}]), - {ok, TaskStatusPid} = couch_task_status:start_link(), - TaskUpdaterPid = spawn(fun() -> loop() end), - {TaskStatusPid, TaskUpdaterPid, Ctx}. - - -teardown({TaskStatusPid, _, Ctx})-> - test_util:stop_sync_throw(TaskStatusPid, fun() -> - couch_task_status:stop() - end, timeout_error, ?TIMEOUT), - test_util:stop(Ctx). - - -couch_task_status_test_() -> - { - "CouchDB task status updates", - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_register_task/1, - fun should_set_task_startup_time/1, - fun should_have_update_time_as_startup_before_any_progress/1, - fun should_set_task_type/1, - fun should_not_register_multiple_tasks_for_same_pid/1, - fun should_set_task_progress/1, - fun should_update_task_progress/1, - fun should_update_time_changes_on_task_progress/1, - %% fun should_control_update_frequency/1, - fun should_reset_control_update_frequency/1, - fun should_track_multiple_tasks/1, - fun should_finish_task/1 - - ] - } - }. - - -should_register_task({_, Pid, _Ctx}) -> - ok = call(Pid, add, [{type, replication}, {progress, 0}]), - ?_assertEqual(1, length(couch_task_status:all())). - -should_set_task_startup_time({_, Pid, _Ctx}) -> - ok = call(Pid, add, [{type, replication}, {progress, 0}]), - ?_assert(is_integer(get_task_prop(Pid, started_on))). - -should_have_update_time_as_startup_before_any_progress({_, Pid, _Ctx}) -> - ok = call(Pid, add, [{type, replication}, {progress, 0}]), - StartTime = get_task_prop(Pid, started_on), - ?_assertEqual(StartTime, get_task_prop(Pid, updated_on)). - -should_set_task_type({_, Pid, _Ctx}) -> - ok = call(Pid, add, [{type, replication}, {progress, 0}]), - ?_assertEqual(replication, get_task_prop(Pid, type)). - -should_not_register_multiple_tasks_for_same_pid({_, Pid, _Ctx}) -> - ok = call(Pid, add, [{type, replication}, {progress, 0}]), - ?_assertEqual({add_task_error, already_registered}, - call(Pid, add, [{type, compaction}, {progress, 0}])). - -should_set_task_progress({_, Pid, _Ctx}) -> - ok = call(Pid, add, [{type, replication}, {progress, 0}]), - ?_assertEqual(0, get_task_prop(Pid, progress)). - -should_update_task_progress({_, Pid, _Ctx}) -> - ok = call(Pid, add, [{type, replication}, {progress, 0}]), - call(Pid, update, [{progress, 25}]), - ?_assertEqual(25, get_task_prop(Pid, progress)). - -should_update_time_changes_on_task_progress({_, Pid, _Ctx}) -> - ?_assert( - begin - ok = call(Pid, add, [{type, replication}, {progress, 0}]), - ok = timer:sleep(1000), % sleep awhile to customize update time - call(Pid, update, [{progress, 25}]), - get_task_prop(Pid, updated_on) > get_task_prop(Pid, started_on) - end). - -%%should_control_update_frequency({_, Pid, _Ctx}) -> -%% ?_assertEqual(66, -%% begin -%% ok = call(Pid, add, [{type, replication}, {progress, 0}]), -%% call(Pid, update, [{progress, 50}]), -%% call(Pid, update_frequency, 500), -%% call(Pid, update, [{progress, 66}]), -%% call(Pid, update, [{progress, 77}]), -%% get_task_prop(Pid, progress) -%% end). - -should_reset_control_update_frequency({_, Pid, _Ctx}) -> - ?_assertEqual(87, - begin - ok = call(Pid, add, [{type, replication}, {progress, 0}]), - call(Pid, update, [{progress, 50}]), - call(Pid, update_frequency, 500), - call(Pid, update, [{progress, 66}]), - call(Pid, update, [{progress, 77}]), - call(Pid, update_frequency, 0), - call(Pid, update, [{progress, 87}]), - get_task_prop(Pid, progress) - end). - -should_track_multiple_tasks(_) -> - ?_assert(run_multiple_tasks()). - -should_finish_task({_, Pid, _Ctx}) -> - ok = call(Pid, add, [{type, replication}, {progress, 0}]), - ?assertEqual(1, length(couch_task_status:all())), - ok = call(Pid, done), - ?_assertEqual(0, length(couch_task_status:all())). - - -run_multiple_tasks() -> - Pid1 = spawn(fun() -> loop() end), - Pid2 = spawn(fun() -> loop() end), - Pid3 = spawn(fun() -> loop() end), - call(Pid1, add, [{type, replication}, {progress, 0}]), - call(Pid2, add, [{type, compaction}, {progress, 0}]), - call(Pid3, add, [{type, indexer}, {progress, 0}]), - - ?assertEqual(3, length(couch_task_status:all())), - ?assertEqual(replication, get_task_prop(Pid1, type)), - ?assertEqual(compaction, get_task_prop(Pid2, type)), - ?assertEqual(indexer, get_task_prop(Pid3, type)), - - call(Pid2, update, [{progress, 33}]), - call(Pid3, update, [{progress, 42}]), - call(Pid1, update, [{progress, 11}]), - ?assertEqual(42, get_task_prop(Pid3, progress)), - call(Pid1, update, [{progress, 72}]), - ?assertEqual(72, get_task_prop(Pid1, progress)), - ?assertEqual(33, get_task_prop(Pid2, progress)), - - call(Pid1, done), - ?assertEqual(2, length(couch_task_status:all())), - call(Pid3, done), - ?assertEqual(1, length(couch_task_status:all())), - call(Pid2, done), - ?assertEqual(0, length(couch_task_status:all())), - - true. - - -loop() -> - receive - {add, Props, From} -> - Resp = couch_task_status:add_task(Props), - From ! {ok, self(), Resp}, - loop(); - {update, Props, From} -> - Resp = couch_task_status:update(Props), - From ! {ok, self(), Resp}, - loop(); - {update_frequency, Msecs, From} -> - Resp = couch_task_status:set_update_frequency(Msecs), - From ! {ok, self(), Resp}, - loop(); - {done, From} -> - From ! {ok, self(), ok} - end. - -call(Pid, done) -> - Ref = erlang:monitor(process, Pid), - Pid ! {done, self()}, - Res = wait(Pid), - receive - {'DOWN', Ref, _Type, Pid, _Info} -> - Res - after ?TIMEOUT -> - throw(timeout_error) - end; -call(Pid, Command) -> - Pid ! {Command, self()}, - wait(Pid). - -call(Pid, Command, Arg) -> - Pid ! {Command, Arg, self()}, - wait(Pid). - -wait(Pid) -> - receive - {ok, Pid, Msg} -> - Msg - after ?TIMEOUT -> - throw(timeout_error) - end. - -get_task_prop(Pid, Prop) -> - From = list_to_binary(pid_to_list(Pid)), - Element = lists:foldl( - fun(PropList, Acc) -> - case couch_util:get_value(pid, PropList) of - From -> - [PropList | Acc]; - _ -> - Acc - end - end, - [], couch_task_status:all() - ), - case couch_util:get_value(Prop, hd(Element), nil) of - nil -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "Could not get property '" - ++ couch_util:to_list(Prop) - ++ "' for task " - ++ pid_to_list(Pid)}]}); - Value -> - Value - end. diff --git a/src/couch/test/couch_totp_tests.erl b/src/couch/test/couch_totp_tests.erl deleted file mode 100644 index 6817a092a..000000000 --- a/src/couch/test/couch_totp_tests.erl +++ /dev/null @@ -1,55 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_totp_tests). - --include_lib("eunit/include/eunit.hrl"). - -totp_sha_test() -> - Key = <<"12345678901234567890">>, - ?assertEqual(94287082, couch_totp:generate(sha, Key, 59, 30, 8)), - ?assertEqual(07081804, couch_totp:generate(sha, Key, 1111111109, 30, 8)), - ?assertEqual(14050471, couch_totp:generate(sha, Key, 1111111111, 30, 8)), - ?assertEqual(89005924, couch_totp:generate(sha, Key, 1234567890, 30, 8)), - ?assertEqual(69279037, couch_totp:generate(sha, Key, 2000000000, 30, 8)), - ?assertEqual(65353130, couch_totp:generate(sha, Key, 20000000000, 30, 8)). - -totp_sha256_test() -> - Key = <<"12345678901234567890123456789012">>, - case sha_256_512_supported() of - true -> - ?assertEqual(46119246, couch_totp:generate(sha256, Key, 59, 30, 8)), - ?assertEqual(68084774, couch_totp:generate(sha256, Key, 1111111109, 30, 8)), - ?assertEqual(67062674, couch_totp:generate(sha256, Key, 1111111111, 30, 8)), - ?assertEqual(91819424, couch_totp:generate(sha256, Key, 1234567890, 30, 8)), - ?assertEqual(90698825, couch_totp:generate(sha256, Key, 2000000000, 30, 8)), - ?assertEqual(77737706, couch_totp:generate(sha256, Key, 20000000000, 30, 8)); - false -> - ?debugMsg("sha256 not supported, tests skipped") - end. - -totp_sha512_test() -> - Key = <<"1234567890123456789012345678901234567890123456789012345678901234">>, - case sha_256_512_supported() of - true -> - ?assertEqual(90693936, couch_totp:generate(sha512, Key, 59, 30, 8)), - ?assertEqual(25091201, couch_totp:generate(sha512, Key, 1111111109, 30, 8)), - ?assertEqual(99943326, couch_totp:generate(sha512, Key, 1111111111, 30, 8)), - ?assertEqual(93441116, couch_totp:generate(sha512, Key, 1234567890, 30, 8)), - ?assertEqual(38618901, couch_totp:generate(sha512, Key, 2000000000, 30, 8)), - ?assertEqual(47863826, couch_totp:generate(sha512, Key, 20000000000, 30, 8)); - false -> - ?debugMsg("sha512 not supported, tests skipped") - end. - -sha_256_512_supported() -> - erlang:function_exported(crypto, hmac, 3). diff --git a/src/couch/test/couch_util_tests.erl b/src/couch/test/couch_util_tests.erl deleted file mode 100644 index 3e145c4f6..000000000 --- a/src/couch/test/couch_util_tests.erl +++ /dev/null @@ -1,170 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_util_tests). - --include_lib("couch/include/couch_eunit.hrl"). - - -setup() -> - %% We cannot start driver from here since it becomes bounded to eunit - %% master process and the next couch_server_sup:start_link call will - %% fail because server couldn't load driver since it already is. - %% - %% On other hand, we cannot unload driver here due to - %% {error, not_loaded_by_this_process} while it is. Any ideas is welcome. - %% - Ctx = test_util:start_couch(), - %% config:start_link(?CONFIG_CHAIN), - %% {ok, _} = couch_drv:start_link(), - Ctx. - -teardown(Ctx) -> - ok = test_util:stop_couch(Ctx), - %% config:stop(), - %% erl_ddll:unload_driver(couch_icu_driver), - ok. - - -collation_test_() -> - { - "Collation tests", - [ - { - setup, - fun setup/0, fun teardown/1, - [ - should_collate_ascii(), - should_collate_non_ascii() - ] - } - ] - }. - -validate_callback_exists_test_() -> - { - "validate_callback_exists tests", - [ - fun should_succeed_for_existent_cb/0, - should_fail_for_missing_cb() - ] - }. - -should_collate_ascii() -> - ?_assertEqual(1, couch_util:collate(<<"foo">>, <<"bar">>)). - -should_collate_non_ascii() -> - ?_assertEqual(-1, couch_util:collate(<<"A">>, <<"aa">>)). - -to_existed_atom_test() -> - ?assert(couch_util:to_existing_atom(true)), - ?assertMatch(foo, couch_util:to_existing_atom(<<"foo">>)), - ?assertMatch(foobarbaz, couch_util:to_existing_atom("foobarbaz")). - -implode_test() -> - ?assertEqual([1, 38, 2, 38, 3], couch_util:implode([1, 2, 3], "&")). - -trim_test() -> - lists:map(fun(S) -> ?assertEqual("foo", couch_util:trim(S)) end, - [" foo", "foo ", "\tfoo", " foo ", "foo\t", "foo\n", "\nfoo"]). - -abs_pathname_test() -> - {ok, Cwd} = file:get_cwd(), - ?assertEqual(Cwd ++ "/foo", couch_util:abs_pathname("./foo")). - -flush_test() -> - ?assertNot(couch_util:should_flush()), - AcquireMem = fun() -> - _IntsToAGazillion = lists:seq(1, 200000), - _LotsOfData = lists:map(fun(_) -> <<"foobar">> end, - lists:seq(1, 500000)), - _ = list_to_binary(_LotsOfData), - - %% Allocation 200K tuples puts us above the memory threshold - %% Originally, there should be: - %% ?assertNot(should_flush()) - %% however, unlike for etap test, GC collects all allocated bits - %% making this conditions fail. So we have to invert the condition - %% since GC works, cleans the memory and everything is fine. - ?assertNot(couch_util:should_flush()) - end, - AcquireMem(), - - %% Checking to flush invokes GC - ?assertNot(couch_util:should_flush()). - -verify_test() -> - ?assert(couch_util:verify("It4Vooya", "It4Vooya")), - ?assertNot(couch_util:verify("It4VooyaX", "It4Vooya")), - ?assert(couch_util:verify(<<"ahBase3r">>, <<"ahBase3r">>)), - ?assertNot(couch_util:verify(<<"ahBase3rX">>, <<"ahBase3r">>)), - ?assertNot(couch_util:verify(nil, <<"ahBase3r">>)). - -find_in_binary_test_() -> - Cases = [ - {<<"foo">>, <<"foobar">>, {exact, 0}}, - {<<"foo">>, <<"foofoo">>, {exact, 0}}, - {<<"foo">>, <<"barfoo">>, {exact, 3}}, - {<<"foo">>, <<"barfo">>, {partial, 3}}, - {<<"f">>, <<"fobarfff">>, {exact, 0}}, - {<<"f">>, <<"obarfff">>, {exact, 4}}, - {<<"f">>, <<"obarggf">>, {exact, 6}}, - {<<"f">>, <<"f">>, {exact, 0}}, - {<<"f">>, <<"g">>, not_found}, - {<<"foo">>, <<"f">>, {partial, 0}}, - {<<"foo">>, <<"g">>, not_found}, - {<<"foo">>, <<"">>, not_found}, - {<<"fofo">>, <<"foofo">>, {partial, 3}}, - {<<"foo">>, <<"gfobarfo">>, {partial, 6}}, - {<<"foo">>, <<"gfobarf">>, {partial, 6}}, - {<<"foo">>, <<"gfobar">>, not_found}, - {<<"fog">>, <<"gbarfogquiz">>, {exact, 4}}, - {<<"ggg">>, <<"ggg">>, {exact, 0}}, - {<<"ggg">>, <<"ggggg">>, {exact, 0}}, - {<<"ggg">>, <<"bggg">>, {exact, 1}}, - {<<"ggg">>, <<"bbgg">>, {partial, 2}}, - {<<"ggg">>, <<"bbbg">>, {partial, 3}}, - {<<"ggg">>, <<"bgbggbggg">>, {exact, 6}}, - {<<"ggg">>, <<"bgbggb">>, not_found} - ], - lists:map( - fun({Needle, Haystack, Result}) -> - Msg = lists:flatten(io_lib:format("Looking for ~s in ~s", - [Needle, Haystack])), - {Msg, ?_assertMatch(Result, - couch_util:find_in_binary(Needle, Haystack))} - end, Cases). - -should_succeed_for_existent_cb() -> - ?_assert(couch_util:validate_callback_exists(lists, any, 2)). - -should_fail_for_missing_cb() -> - Cases = [ - {unknown_module, any, 1}, - {erlang, unknown_function, 1}, - {erlang, whereis, 100} - ], - lists:map( - fun({M, F, A} = MFA) -> - Name = lists:flatten(io_lib:format("~w:~w/~w", [M, F, A])), - {Name, ?_assertThrow( - {error, {undefined_callback, Name, MFA}}, - couch_util:validate_callback_exists(M, F, A))} - end, Cases). - -to_hex_test_() -> - [ - ?_assertEqual("", couch_util:to_hex([])), - ?_assertEqual("010203faff", couch_util:to_hex([1, 2, 3, 250, 255])), - ?_assertEqual("", couch_util:to_hex(<<>>)), - ?_assertEqual("010203faff", couch_util:to_hex(<<1, 2, 3, 250, 255>>)) - ]. diff --git a/src/couch/test/couch_uuids_tests.erl b/src/couch/test/couch_uuids_tests.erl deleted file mode 100644 index a836eccc6..000000000 --- a/src/couch/test/couch_uuids_tests.erl +++ /dev/null @@ -1,155 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_uuids_tests). - --include_lib("couch/include/couch_eunit.hrl"). - --define(TIMEOUT_S, 20). - - -setup() -> - Ctx = test_util:start(?MODULE, [], [{dont_mock, [config]}]), - couch_uuids:start(), - Ctx. - -setup(Opts) -> - Pid = setup(), - lists:foreach( - fun({Option, Value}) -> - config:set("uuids", Option, Value, false) - end, Opts), - Pid. - -teardown(Ctx) -> - couch_uuids:stop(), - test_util:stop(Ctx). - -teardown(_, Ctx) -> - teardown(Ctx). - - -default_test_() -> - { - "Default UUID algorithm", - { - setup, - fun setup/0, fun teardown/1, - fun should_be_unique/1 - } - }. - -sequential_test_() -> - Opts = [{"algorithm", "sequential"}], - Cases = [ - fun should_be_unique/2, - fun should_increment_monotonically/2, - fun should_rollover/2 - ], - { - "UUID algorithm: sequential", - { - foreachx, - fun setup/1, fun teardown/2, - [{Opts, Fun} || Fun <- Cases] - } - }. - -utc_test_() -> - Opts = [{"algorithm", "utc_random"}], - Cases = [ - fun should_be_unique/2, - fun should_increment_monotonically/2 - ], - { - "UUID algorithm: utc_random", - { - foreachx, - fun setup/1, fun teardown/2, - [{Opts, Fun} || Fun <- Cases] - } - }. - -utc_id_suffix_test_() -> - Opts = [{"algorithm", "utc_id"}, {"utc_id_suffix", "bozo"}], - Cases = [ - fun should_be_unique/2, - fun should_increment_monotonically/2, - fun should_preserve_suffix/2 - ], - { - "UUID algorithm: utc_id", - { - foreachx, - fun setup/1, fun teardown/2, - [{Opts, Fun} || Fun <- Cases] - } - }. - - -should_be_unique() -> - %% this one may really runs for too long on slow hosts - {timeout, ?TIMEOUT_S, ?_assert(test_unique(10000, [couch_uuids:new()]))}. -should_be_unique(_) -> - should_be_unique(). -should_be_unique(_, _) -> - should_be_unique(). - -should_increment_monotonically(_, _) -> - ?_assert(couch_uuids:new() < couch_uuids:new()). - -should_rollover(_, _) -> - ?_test(begin - UUID = binary_to_list(couch_uuids:new()), - Prefix = element(1, lists:split(26, UUID)), - N = gen_until_pref_change(Prefix, 0), - ?assert(N >= 5000 andalso N =< 11000) - end). - -should_preserve_suffix(_, _) -> - ?_test(begin - UUID = binary_to_list(couch_uuids:new()), - Suffix = get_suffix(UUID), - ?assert(test_same_suffix(10000, Suffix)) - end). - - -test_unique(0, _) -> - true; -test_unique(N, UUIDs) -> - UUID = couch_uuids:new(), - ?assertNot(lists:member(UUID, UUIDs)), - test_unique(N - 1, [UUID| UUIDs]). - -get_prefix(UUID) -> - element(1, lists:split(26, binary_to_list(UUID))). - -gen_until_pref_change(_, Count) when Count > 8251 -> - Count; -gen_until_pref_change(Prefix, N) -> - case get_prefix(couch_uuids:new()) of - Prefix -> gen_until_pref_change(Prefix, N + 1); - _ -> N - end. - -get_suffix(UUID) when is_binary(UUID) -> - get_suffix(binary_to_list(UUID)); -get_suffix(UUID) -> - element(2, lists:split(14, UUID)). - -test_same_suffix(0, _) -> - true; -test_same_suffix(N, Suffix) -> - case get_suffix(couch_uuids:new()) of - Suffix -> test_same_suffix(N - 1, Suffix); - _ -> false - end. diff --git a/src/couch/test/couch_work_queue_tests.erl b/src/couch/test/couch_work_queue_tests.erl deleted file mode 100644 index a192230ef..000000000 --- a/src/couch/test/couch_work_queue_tests.erl +++ /dev/null @@ -1,402 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_work_queue_tests). - --include_lib("couch/include/couch_eunit.hrl"). - --define(TIMEOUT, 100). - - -setup(Opts) -> - {ok, Q} = couch_work_queue:new(Opts), - Producer = spawn_producer(Q), - Consumer = spawn_consumer(Q), - {Q, Producer, Consumer}. - -setup_max_items() -> - setup([{max_items, 3}]). - -setup_max_size() -> - setup([{max_size, 160}]). - -setup_max_items_and_size() -> - setup([{max_size, 160}, {max_items, 3}]). - -setup_multi_workers() -> - {Q, Producer, Consumer1} = setup([{max_size, 160}, - {max_items, 3}, - {multi_workers, true}]), - Consumer2 = spawn_consumer(Q), - Consumer3 = spawn_consumer(Q), - {Q, Producer, [Consumer1, Consumer2, Consumer3]}. - -teardown({Q, Producer, Consumers}) when is_list(Consumers) -> - % consume all to unblock and let producer/consumer stop without timeout - [consume(Consumer, all) || Consumer <- Consumers], - - ok = close_queue(Q), - ok = stop(Producer, "producer"), - R = [stop(Consumer, "consumer") || Consumer <- Consumers], - R = [ok || _ <- Consumers], - ok; -teardown({Q, Producer, Consumer}) -> - teardown({Q, Producer, [Consumer]}). - - -single_consumer_test_() -> - { - "Single producer and consumer", - [ - { - "Queue with 3 max items", - { - foreach, - fun setup_max_items/0, fun teardown/1, - single_consumer_max_item_count() ++ common_cases() - } - }, - { - "Queue with max size of 160 bytes", - { - foreach, - fun setup_max_size/0, fun teardown/1, - single_consumer_max_size() ++ common_cases() - } - }, - { - "Queue with max size of 160 bytes and 3 max items", - { - foreach, - fun setup_max_items_and_size/0, fun teardown/1, - single_consumer_max_items_and_size() ++ common_cases() - } - } - ] - }. - -multiple_consumers_test_() -> - { - "Single producer and multiple consumers", - [ - { - "Queue with max size of 160 bytes and 3 max items", - { - foreach, - fun setup_multi_workers/0, fun teardown/1, - common_cases() ++ multiple_consumers() - } - - } - ] - }. - -common_cases()-> - [ - fun should_block_consumer_on_dequeue_from_empty_queue/1, - fun should_consume_right_item/1, - fun should_timeout_on_close_non_empty_queue/1, - fun should_not_block_producer_for_non_empty_queue_after_close/1, - fun should_be_closed/1 - ]. - -single_consumer_max_item_count()-> - [ - fun should_have_no_items_for_new_queue/1, - fun should_block_producer_on_full_queue_count/1, - fun should_receive_first_queued_item/1, - fun should_consume_multiple_items/1, - fun should_consume_all/1 - ]. - -single_consumer_max_size()-> - [ - fun should_have_zero_size_for_new_queue/1, - fun should_block_producer_on_full_queue_size/1, - fun should_increase_queue_size_on_produce/1, - fun should_receive_first_queued_item/1, - fun should_consume_multiple_items/1, - fun should_consume_all/1 - ]. - -single_consumer_max_items_and_size() -> - single_consumer_max_item_count() ++ single_consumer_max_size(). - -multiple_consumers() -> - [ - fun should_have_zero_size_for_new_queue/1, - fun should_have_no_items_for_new_queue/1, - fun should_increase_queue_size_on_produce/1 - ]. - - -should_have_no_items_for_new_queue({Q, _, _}) -> - ?_assertEqual(0, couch_work_queue:item_count(Q)). - -should_have_zero_size_for_new_queue({Q, _, _}) -> - ?_assertEqual(0, couch_work_queue:size(Q)). - -should_block_consumer_on_dequeue_from_empty_queue({_, _, Consumers}) when is_list(Consumers) -> - [consume(C, 2) || C <- Consumers], - Pongs = [ping(C) || C <- Consumers], - ?_assertEqual([timeout, timeout, timeout], Pongs); -should_block_consumer_on_dequeue_from_empty_queue({_, _, Consumer}) -> - consume(Consumer, 1), - Pong = ping(Consumer), - ?_assertEqual(timeout, Pong). - -should_consume_right_item({Q, Producer, Consumers}) when is_list(Consumers) -> - [consume(C, 3) || C <- Consumers], - - Item1 = produce(Q, Producer, 10, false), - ok = ping(Producer), - ?assertEqual(0, couch_work_queue:item_count(Q)), - ?assertEqual(0, couch_work_queue:size(Q)), - - Item2 = produce(Q, Producer, 10, false), - ok = ping(Producer), - ?assertEqual(0, couch_work_queue:item_count(Q)), - ?assertEqual(0, couch_work_queue:size(Q)), - - Item3 = produce(Q, Producer, 10, false), - ok = ping(Producer), - ?assertEqual(0, couch_work_queue:item_count(Q)), - ?assertEqual(0, couch_work_queue:size(Q)), - - R = [{ping(C), Item} - || {C, Item} <- lists:zip(Consumers, [Item1, Item2, Item3])], - - ?_assertEqual([{ok, Item1}, {ok, Item2}, {ok, Item3}], R); -should_consume_right_item({Q, Producer, Consumer}) -> - consume(Consumer, 1), - Item = produce(Q, Producer, 10, false), - produce(Q, Producer, 20, true), - ok = ping(Producer), - ok = ping(Consumer), - {ok, Items} = last_consumer_items(Consumer), - ?_assertEqual([Item], Items). - -should_increase_queue_size_on_produce({Q, Producer, _}) -> - produce(Q, Producer, 50, true), - ok = ping(Producer), - Count1 = couch_work_queue:item_count(Q), - Size1 = couch_work_queue:size(Q), - - produce(Q, Producer, 10, true), - Count2 = couch_work_queue:item_count(Q), - Size2 = couch_work_queue:size(Q), - - ?_assertEqual([{Count1, Size1}, {Count2, Size2}], [{1, 50}, {2, 60}]). - -should_block_producer_on_full_queue_count({Q, Producer, _}) -> - produce(Q, Producer, 10, true), - ?assertEqual(1, couch_work_queue:item_count(Q)), - ok = ping(Producer), - - produce(Q, Producer, 15, true), - ?assertEqual(2, couch_work_queue:item_count(Q)), - ok = ping(Producer), - - produce(Q, Producer, 20, true), - ?assertEqual(3, couch_work_queue:item_count(Q)), - Pong = ping(Producer), - - ?_assertEqual(timeout, Pong). - -should_block_producer_on_full_queue_size({Q, Producer, _}) -> - produce(Q, Producer, 100, true), - ok = ping(Producer), - ?assertEqual(1, couch_work_queue:item_count(Q)), - ?assertEqual(100, couch_work_queue:size(Q)), - - produce(Q, Producer, 110, false), - Pong = ping(Producer), - ?assertEqual(2, couch_work_queue:item_count(Q)), - ?assertEqual(210, couch_work_queue:size(Q)), - - ?_assertEqual(timeout, Pong). - -should_consume_multiple_items({Q, Producer, Consumer}) -> - Item1 = produce(Q, Producer, 10, true), - ok = ping(Producer), - - Item2 = produce(Q, Producer, 15, true), - ok = ping(Producer), - - consume(Consumer, 2), - - {ok, Items} = last_consumer_items(Consumer), - ?_assertEqual([Item1, Item2], Items). - -should_receive_first_queued_item({Q, Producer, Consumer}) -> - consume(Consumer, 100), - timeout = ping(Consumer), - - Item = produce(Q, Producer, 11, false), - ok = ping(Producer), - - ok = ping(Consumer), - ?assertEqual(0, couch_work_queue:item_count(Q)), - - {ok, Items} = last_consumer_items(Consumer), - ?_assertEqual([Item], Items). - -should_consume_all({Q, Producer, Consumer}) -> - Item1 = produce(Q, Producer, 10, true), - Item2 = produce(Q, Producer, 15, true), - Item3 = produce(Q, Producer, 20, true), - - consume(Consumer, all), - - {ok, Items} = last_consumer_items(Consumer), - ?_assertEqual([Item1, Item2, Item3], Items). - -should_timeout_on_close_non_empty_queue({Q, Producer, _}) -> - produce(Q, Producer, 1, true), - Status = close_queue(Q), - - ?_assertEqual(timeout, Status). - -should_not_block_producer_for_non_empty_queue_after_close({Q, Producer, _}) -> - produce(Q, Producer, 1, true), - close_queue(Q), - Pong = ping(Producer), - Size = couch_work_queue:size(Q), - Count = couch_work_queue:item_count(Q), - - ?_assertEqual({ok, 1, 1}, {Pong, Size, Count}). - -should_be_closed({Q, _, Consumers}) when is_list(Consumers) -> - ok = close_queue(Q), - - [consume(C, 1) || C <- Consumers], - - LastConsumerItems = [last_consumer_items(C) || C <- Consumers], - ItemsCount = couch_work_queue:item_count(Q), - Size = couch_work_queue:size(Q), - - ?_assertEqual({[closed, closed, closed], closed, closed}, - {LastConsumerItems, ItemsCount, Size}); -should_be_closed({Q, _, Consumer}) -> - ok = close_queue(Q), - - consume(Consumer, 1), - - LastConsumerItems = last_consumer_items(Consumer), - ItemsCount = couch_work_queue:item_count(Q), - Size = couch_work_queue:size(Q), - - ?_assertEqual({closed, closed, closed}, - {LastConsumerItems, ItemsCount, Size}). - - -close_queue(Q) -> - test_util:stop_sync(Q, fun() -> - ok = couch_work_queue:close(Q) - end, ?TIMEOUT). - -spawn_consumer(Q) -> - Parent = self(), - spawn(fun() -> consumer_loop(Parent, Q, nil) end). - -consumer_loop(Parent, Q, PrevItem) -> - receive - {stop, Ref} -> - Parent ! {ok, Ref}; - {ping, Ref} -> - Parent ! {pong, Ref}, - consumer_loop(Parent, Q, PrevItem); - {last_item, Ref} -> - Parent ! {item, Ref, PrevItem}, - consumer_loop(Parent, Q, PrevItem); - {consume, N} -> - Result = couch_work_queue:dequeue(Q, N), - consumer_loop(Parent, Q, Result) - end. - -spawn_producer(Q) -> - Parent = self(), - spawn(fun() -> producer_loop(Parent, Q) end). - -producer_loop(Parent, Q) -> - receive - {stop, Ref} -> - Parent ! {ok, Ref}; - {ping, Ref} -> - Parent ! {pong, Ref}, - producer_loop(Parent, Q); - {produce, Ref, Size} -> - Item = crypto:strong_rand_bytes(Size), - Parent ! {item, Ref, Item}, - ok = couch_work_queue:queue(Q, Item), - producer_loop(Parent, Q) - end. - -consume(Consumer, N) -> - Consumer ! {consume, N}. - -last_consumer_items(Consumer) -> - Ref = make_ref(), - Consumer ! {last_item, Ref}, - receive - {item, Ref, Items} -> - Items - after ?TIMEOUT -> - timeout - end. - -produce(Q, Producer, Size, Wait) -> - Ref = make_ref(), - ItemsCount = couch_work_queue:item_count(Q), - Producer ! {produce, Ref, Size}, - receive - {item, Ref, Item} when Wait -> - ok = wait_increment(Q, ItemsCount), - Item; - {item, Ref, Item} -> - Item - after ?TIMEOUT -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "Timeout asking producer to produce an item"}]}) - end. - -ping(Pid) -> - Ref = make_ref(), - Pid ! {ping, Ref}, - receive - {pong, Ref} -> - ok - after ?TIMEOUT -> - timeout - end. - -stop(Pid, Name) -> - Ref = make_ref(), - Pid ! {stop, Ref}, - receive - {ok, Ref} -> ok - after ?TIMEOUT -> - ?debugMsg("Timeout stopping " ++ Name), - timeout - end. - -wait_increment(Q, ItemsCount) -> - test_util:wait(fun() -> - case couch_work_queue:item_count(Q) > ItemsCount of - true -> - ok; - false -> - wait - end - end). diff --git a/src/couch/test/couchdb_attachments_tests.erl b/src/couch/test/couchdb_attachments_tests.erl deleted file mode 100644 index 04859dbc9..000000000 --- a/src/couch/test/couchdb_attachments_tests.erl +++ /dev/null @@ -1,765 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couchdb_attachments_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include_lib("mem3/include/mem3.hrl"). - --define(COMPRESSION_LEVEL, 8). --define(ATT_BIN_NAME, <<"logo.png">>). --define(ATT_TXT_NAME, <<"file.erl">>). --define(FIXTURE_PNG, filename:join([?FIXTURESDIR, "logo.png"])). --define(FIXTURE_TXT, ?ABS_PATH(?FILE)). --define(TIMEOUT, 5000). --define(TIMEOUT_EUNIT, 100). --define(TIMEWAIT, 1000). --define(i2l(I), integer_to_list(I)). - - -start() -> - Ctx = test_util:start_couch(), - % ensure in default compression settings for attachments_compression_tests - config:set("attachments", "compression_level", - ?i2l(?COMPRESSION_LEVEL), false), - config:set("attachments", "compressible_types", "text/*", false), - Ctx. - -setup() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, []), - ok = couch_db:close(Db), - Addr = config:get("httpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(couch_httpd, port), - Host = Addr ++ ":" ++ ?i2l(Port), - {Host, ?b2l(DbName)}. - -setup({binary, standalone}) -> - {Host, DbName} = setup(), - setup_att(fun create_standalone_png_att/2, Host, DbName, ?FIXTURE_PNG); -setup({text, standalone}) -> - {Host, DbName} = setup(), - setup_att(fun create_standalone_text_att/2, Host, DbName, ?FIXTURE_TXT); -setup({binary, inline}) -> - {Host, DbName} = setup(), - setup_att(fun create_inline_png_att/2, Host, DbName, ?FIXTURE_PNG); -setup({text, inline}) -> - {Host, DbName} = setup(), - setup_att(fun create_inline_text_att/2, Host, DbName, ?FIXTURE_TXT); -setup(compressed) -> - {Host, DbName} = setup(), - setup_att(fun create_already_compressed_att/2, Host, DbName, ?FIXTURE_TXT). -setup_att(Fun, Host, DbName, File) -> - HttpHost = "http://" ++ Host, - AttUrl = Fun(HttpHost, DbName), - {ok, Data} = file:read_file(File), - DocUrl = string:join([HttpHost, DbName, "doc"], "/"), - Helpers = {DbName, DocUrl, AttUrl}, - {Data, Helpers}. - -teardown(_, {_, {DbName, _, _}}) -> - teardown(DbName). - -teardown({_, DbName}) -> - teardown(DbName); -teardown(DbName) -> - ok = couch_server:delete(?l2b(DbName), []), - ok. - - -attachments_test_() -> - { - "Attachments tests", - { - setup, - fun start/0, fun test_util:stop_couch/1, - [ - attachments_md5_tests(), - attachments_compression_tests() - ] - } - }. - -attachments_md5_tests() -> - { - "Attachments MD5 tests", - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_upload_attachment_without_md5/1, - fun should_upload_attachment_by_chunks_without_md5/1, - fun should_upload_attachment_with_valid_md5_header/1, - fun should_upload_attachment_by_chunks_with_valid_md5_header/1, - fun should_upload_attachment_by_chunks_with_valid_md5_trailer/1, - fun should_reject_attachment_with_invalid_md5/1, - fun should_reject_chunked_attachment_with_invalid_md5/1, - fun should_reject_chunked_attachment_with_invalid_md5_trailer/1 - ] - } - }. - -attachments_compression_tests() -> - Funs = [ - fun should_get_att_without_accept_gzip_encoding/2, - fun should_get_att_with_accept_gzip_encoding/2, - fun should_get_att_with_accept_deflate_encoding/2, - fun should_return_406_response_on_unsupported_encoding/2, - fun should_get_doc_with_att_data/2, - fun should_get_doc_with_att_data_stub/2 - ], - { - "Attachments compression tests", - [ - { - "Created via Attachments API", - created_attachments_compression_tests(standalone, Funs) - }, - { - "Created inline via Document API", - created_attachments_compression_tests(inline, Funs) - }, - { - "Created already been compressed via Attachments API", - { - foreachx, - fun setup/1, fun teardown/2, - [{compressed, Fun} || Fun <- Funs] - } - }, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_not_create_compressed_att_with_deflate_encoding/1, - fun should_not_create_compressed_att_with_compress_encoding/1, - fun should_create_compressible_att_with_ctype_params/1 - ] - } - ] - }. - -created_attachments_compression_tests(Mod, Funs) -> - [ - { - "Compressiable attachments", - { - foreachx, - fun setup/1, fun teardown/2, - [{{text, Mod}, Fun} || Fun <- Funs] - } - }, - { - "Uncompressiable attachments", - { - foreachx, - fun setup/1, fun teardown/2, - [{{binary, Mod}, Fun} || Fun <- Funs] - } - } - ]. - - - -should_upload_attachment_without_md5({Host, DbName}) -> - ?_test(begin - AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), - Body = "We all live in a yellow submarine!", - Headers = [ - {"Content-Length", "34"}, - {"Content-Type", "text/plain"}, - {"Host", Host} - ], - {ok, Code, Json} = request("PUT", AttUrl, Headers, Body), - ?assertEqual(201, Code), - ?assertEqual(true, get_json(Json, [<<"ok">>])) - end). - -should_upload_attachment_by_chunks_without_md5({Host, DbName}) -> - ?_test(begin - AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), - AttData = <<"We all live in a yellow submarine!">>, - <> = AttData, - Body = [chunked_body([Part1, Part2]), "\r\n"], - Headers = [ - {"Content-Type", "text/plain"}, - {"Transfer-Encoding", "chunked"}, - {"Host", Host} - ], - {ok, Code, Json} = request("PUT", AttUrl, Headers, Body), - ?assertEqual(201, Code), - ?assertEqual(true, get_json(Json, [<<"ok">>])) - end). - -should_upload_attachment_with_valid_md5_header({Host, DbName}) -> - ?_test(begin - AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), - Body = "We all live in a yellow submarine!", - Headers = [ - {"Content-Length", "34"}, - {"Content-Type", "text/plain"}, - {"Content-MD5", ?b2l(base64:encode(couch_hash:md5_hash(Body)))}, - {"Host", Host} - ], - {ok, Code, Json} = request("PUT", AttUrl, Headers, Body), - ?assertEqual(201, Code), - ?assertEqual(true, get_json(Json, [<<"ok">>])) - end). - -should_upload_attachment_by_chunks_with_valid_md5_header({Host, DbName}) -> - ?_test(begin - AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), - AttData = <<"We all live in a yellow submarine!">>, - <> = AttData, - Body = [chunked_body([Part1, Part2]), "\r\n"], - Headers = [ - {"Content-Type", "text/plain"}, - {"Content-MD5", ?b2l(base64:encode(couch_hash:md5_hash(AttData)))}, - {"Host", Host}, - {"Transfer-Encoding", "chunked"} - ], - {ok, Code, Json} = request("PUT", AttUrl, Headers, Body), - ?assertEqual(201, Code), - ?assertEqual(true, get_json(Json, [<<"ok">>])) - end). - -should_upload_attachment_by_chunks_with_valid_md5_trailer({Host, DbName}) -> - ?_test(begin - AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), - AttData = <<"We all live in a yellow submarine!">>, - <> = AttData, - Body = [chunked_body([Part1, Part2]), - "Content-MD5: ", base64:encode(couch_hash:md5_hash(AttData)), - "\r\n\r\n"], - Headers = [ - {"Content-Type", "text/plain"}, - {"Host", Host}, - {"Trailer", "Content-MD5"}, - {"Transfer-Encoding", "chunked"} - ], - {ok, Code, Json} = request("PUT", AttUrl, Headers, Body), - ?assertEqual(201, Code), - ?assertEqual(true, get_json(Json, [<<"ok">>])) - end). - -should_reject_attachment_with_invalid_md5({Host, DbName}) -> - ?_test(begin - AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), - Body = "We all live in a yellow submarine!", - Headers = [ - {"Content-Length", "34"}, - {"Content-Type", "text/plain"}, - {"Content-MD5", ?b2l(base64:encode(<<"foobar!">>))}, - {"Host", Host} - ], - {ok, Code, Json} = request("PUT", AttUrl, Headers, Body), - ?assertEqual(400, Code), - ?assertEqual(<<"content_md5_mismatch">>, - get_json(Json, [<<"error">>])) - end). - - -should_reject_chunked_attachment_with_invalid_md5({Host, DbName}) -> - ?_test(begin - AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), - AttData = <<"We all live in a yellow submarine!">>, - <> = AttData, - Body = [chunked_body([Part1, Part2]), "\r\n"], - Headers = [ - {"Content-Type", "text/plain"}, - {"Content-MD5", ?b2l(base64:encode(<<"foobar!">>))}, - {"Host", Host}, - {"Transfer-Encoding", "chunked"} - ], - {ok, Code, Json} = request("PUT", AttUrl, Headers, Body), - ?assertEqual(400, Code), - ?assertEqual(<<"content_md5_mismatch">>, - get_json(Json, [<<"error">>])) - end). - -should_reject_chunked_attachment_with_invalid_md5_trailer({Host, DbName}) -> - ?_test(begin - AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), - AttData = <<"We all live in a yellow submarine!">>, - <> = AttData, - Body = [chunked_body([Part1, Part2]), - "Content-MD5: ", base64:encode(<<"foobar!">>), - "\r\n\r\n"], - Headers = [ - {"Content-Type", "text/plain"}, - {"Host", Host}, - {"Trailer", "Content-MD5"}, - {"Transfer-Encoding", "chunked"} - ], - {ok, Code, Json} = request("PUT", AttUrl, Headers, Body), - ?assertEqual(400, Code), - ?assertEqual(<<"content_md5_mismatch">>, get_json(Json, [<<"error">>])) - end). - -should_get_att_without_accept_gzip_encoding(_, {Data, {_, _, AttUrl}}) -> - ?_test(begin - {ok, Code, Headers, Body} = test_request:get(AttUrl), - ?assertEqual(200, Code), - ?assertNot(lists:member({"Content-Encoding", "gzip"}, Headers)), - ?assertEqual(Data, iolist_to_binary(Body)) - end). - -should_get_att_with_accept_gzip_encoding(compressed, {Data, {_, _, AttUrl}}) -> - ?_test(begin - {ok, Code, Headers, Body} = test_request:get( - AttUrl, [{"Accept-Encoding", "gzip"}]), - ?assertEqual(200, Code), - ?assert(lists:member({"Content-Encoding", "gzip"}, Headers)), - ?assertEqual(Data, zlib:gunzip(iolist_to_binary(Body))) - end); -should_get_att_with_accept_gzip_encoding({text, _}, {Data, {_, _, AttUrl}}) -> - ?_test(begin - {ok, Code, Headers, Body} = test_request:get( - AttUrl, [{"Accept-Encoding", "gzip"}]), - ?assertEqual(200, Code), - ?assert(lists:member({"Content-Encoding", "gzip"}, Headers)), - ?assertEqual(Data, zlib:gunzip(iolist_to_binary(Body))) - end); -should_get_att_with_accept_gzip_encoding({binary, _}, {Data, {_, _, AttUrl}}) -> - ?_test(begin - {ok, Code, Headers, Body} = test_request:get( - AttUrl, [{"Accept-Encoding", "gzip"}]), - ?assertEqual(200, Code), - ?assertEqual(undefined, - couch_util:get_value("Content-Encoding", Headers)), - ?assertEqual(Data, iolist_to_binary(Body)) - end). - -should_get_att_with_accept_deflate_encoding(_, {Data, {_, _, AttUrl}}) -> - ?_test(begin - {ok, Code, Headers, Body} = test_request:get( - AttUrl, [{"Accept-Encoding", "deflate"}]), - ?assertEqual(200, Code), - ?assertEqual(undefined, - couch_util:get_value("Content-Encoding", Headers)), - ?assertEqual(Data, iolist_to_binary(Body)) - end). - -should_return_406_response_on_unsupported_encoding(_, {_, {_, _, AttUrl}}) -> - ?_assertEqual(406, - begin - {ok, Code, _, _} = test_request:get( - AttUrl, [{"Accept-Encoding", "deflate, *;q=0"}]), - Code - end). - -should_get_doc_with_att_data(compressed, {Data, {_, DocUrl, _}}) -> - ?_test(begin - Url = DocUrl ++ "?attachments=true", - {ok, Code, _, Body} = test_request:get( - Url, [{"Accept", "application/json"}]), - ?assertEqual(200, Code), - Json = jiffy:decode(Body), - AttJson = couch_util:get_nested_json_value( - Json, [<<"_attachments">>, ?ATT_TXT_NAME]), - AttData = couch_util:get_nested_json_value( - AttJson, [<<"data">>]), - ?assertEqual( - <<"text/plain">>, - couch_util:get_nested_json_value(AttJson,[<<"content_type">>])), - ?assertEqual(Data, base64:decode(AttData)) - end); -should_get_doc_with_att_data({text, _}, {Data, {_, DocUrl, _}}) -> - ?_test(begin - Url = DocUrl ++ "?attachments=true", - {ok, Code, _, Body} = test_request:get( - Url, [{"Accept", "application/json"}]), - ?assertEqual(200, Code), - Json = jiffy:decode(Body), - AttJson = couch_util:get_nested_json_value( - Json, [<<"_attachments">>, ?ATT_TXT_NAME]), - AttData = couch_util:get_nested_json_value( - AttJson, [<<"data">>]), - ?assertEqual( - <<"text/plain">>, - couch_util:get_nested_json_value(AttJson,[<<"content_type">>])), - ?assertEqual(Data, base64:decode(AttData)) - end); -should_get_doc_with_att_data({binary, _}, {Data, {_, DocUrl, _}}) -> - ?_test(begin - Url = DocUrl ++ "?attachments=true", - {ok, Code, _, Body} = test_request:get( - Url, [{"Accept", "application/json"}]), - ?assertEqual(200, Code), - Json = jiffy:decode(Body), - AttJson = couch_util:get_nested_json_value( - Json, [<<"_attachments">>, ?ATT_BIN_NAME]), - AttData = couch_util:get_nested_json_value( - AttJson, [<<"data">>]), - ?assertEqual( - <<"image/png">>, - couch_util:get_nested_json_value(AttJson,[<<"content_type">>])), - ?assertEqual(Data, base64:decode(AttData)) - end). - -should_get_doc_with_att_data_stub(compressed, {Data, {_, DocUrl, _}}) -> - ?_test(begin - Url = DocUrl ++ "?att_encoding_info=true", - {ok, Code, _, Body} = test_request:get( - Url, [{"Accept", "application/json"}]), - ?assertEqual(200, Code), - Json = jiffy:decode(Body), - {AttJson} = couch_util:get_nested_json_value( - Json, [<<"_attachments">>, ?ATT_TXT_NAME]), - ?assertEqual(<<"gzip">>, - couch_util:get_value(<<"encoding">>, AttJson)), - AttLength = couch_util:get_value(<<"length">>, AttJson), - EncLength = couch_util:get_value(<<"encoded_length">>, AttJson), - ?assertEqual(AttLength, EncLength), - ?assertEqual(iolist_size(zlib:gzip(Data)), AttLength) - end); -should_get_doc_with_att_data_stub({text, _}, {Data, {_, DocUrl, _}}) -> - ?_test(begin - Url = DocUrl ++ "?att_encoding_info=true", - {ok, Code, _, Body} = test_request:get( - Url, [{"Accept", "application/json"}]), - ?assertEqual(200, Code), - Json = jiffy:decode(Body), - {AttJson} = couch_util:get_nested_json_value( - Json, [<<"_attachments">>, ?ATT_TXT_NAME]), - ?assertEqual(<<"gzip">>, - couch_util:get_value(<<"encoding">>, AttJson)), - AttEncLength = iolist_size(gzip(Data)), - ?assertEqual(AttEncLength, - couch_util:get_value(<<"encoded_length">>, AttJson)), - ?assertEqual(byte_size(Data), - couch_util:get_value(<<"length">>, AttJson)) - end); -should_get_doc_with_att_data_stub({binary, _}, {Data, {_, DocUrl, _}}) -> - ?_test(begin - Url = DocUrl ++ "?att_encoding_info=true", - {ok, Code, _, Body} = test_request:get( - Url, [{"Accept", "application/json"}]), - ?assertEqual(200, Code), - Json = jiffy:decode(Body), - {AttJson} = couch_util:get_nested_json_value( - Json, [<<"_attachments">>, ?ATT_BIN_NAME]), - ?assertEqual(undefined, - couch_util:get_value(<<"encoding">>, AttJson)), - ?assertEqual(undefined, - couch_util:get_value(<<"encoded_length">>, AttJson)), - ?assertEqual(byte_size(Data), - couch_util:get_value(<<"length">>, AttJson)) - end). - -should_not_create_compressed_att_with_deflate_encoding({Host, DbName}) -> - ?_assertEqual(415, - begin - HttpHost = "http://" ++ Host, - AttUrl = string:join([HttpHost, DbName, ?docid(), "file.txt"], "/"), - {ok, Data} = file:read_file(?FIXTURE_TXT), - Body = zlib:compress(Data), - Headers = [ - {"Content-Encoding", "deflate"}, - {"Content-Type", "text/plain"} - ], - {ok, Code, _, _} = test_request:put(AttUrl, Headers, Body), - Code - end). - -should_not_create_compressed_att_with_compress_encoding({Host, DbName}) -> - % Note: As of OTP R13B04, it seems there's no LZW compression - % (i.e. UNIX compress utility implementation) lib in OTP. - % However there's a simple working Erlang implementation at: - % http://scienceblogs.com/goodmath/2008/01/simple_lempelziv_compression_i.php - ?_assertEqual(415, - begin - HttpHost = "http://" ++ Host, - AttUrl = string:join([HttpHost, DbName, ?docid(), "file.txt"], "/"), - {ok, Data} = file:read_file(?FIXTURE_TXT), - Headers = [ - {"Content-Encoding", "compress"}, - {"Content-Type", "text/plain"} - ], - {ok, Code, _, _} = test_request:put(AttUrl, Headers, Data), - Code - end). - -should_create_compressible_att_with_ctype_params({Host, DbName}) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(begin - HttpHost = "http://" ++ Host, - DocUrl = string:join([HttpHost, DbName, ?docid()], "/"), - AttUrl = string:join([DocUrl, ?b2l(?ATT_TXT_NAME)], "/"), - {ok, Data} = file:read_file(?FIXTURE_TXT), - Headers = [{"Content-Type", "text/plain; charset=UTF-8"}], - {ok, Code0, _, _} = test_request:put(AttUrl, Headers, Data), - ?assertEqual(201, Code0), - - {ok, Code1, _, Body} = test_request:get( - DocUrl ++ "?att_encoding_info=true"), - ?assertEqual(200, Code1), - Json = jiffy:decode(Body), - {AttJson} = couch_util:get_nested_json_value( - Json, [<<"_attachments">>, ?ATT_TXT_NAME]), - ?assertEqual(<<"gzip">>, - couch_util:get_value(<<"encoding">>, AttJson)), - AttEncLength = iolist_size(gzip(Data)), - ?assertEqual(AttEncLength, - couch_util:get_value(<<"encoded_length">>, AttJson)), - ?assertEqual(byte_size(Data), - couch_util:get_value(<<"length">>, AttJson)) - end)}. - - -compact_after_lowering_attachment_size_limit_test_() -> - { - "Compact after lowering attachment size limit", - { - foreach, - fun() -> - Ctx = test_util:start_couch(), - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = couch_db:close(Db), - {Ctx, DbName} - end, - fun({Ctx, DbName}) -> - config:delete("couchdb", "max_attachment_size"), - ok = couch_server:delete(DbName, [?ADMIN_CTX]), - test_util:stop_couch(Ctx) - end, - [ - fun should_compact_after_lowering_attachment_size_limit/1 - ] - } - }. - - -should_compact_after_lowering_attachment_size_limit({_Ctx, DbName}) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(begin - {ok, Db1} = couch_db:open(DbName, [?ADMIN_CTX]), - Doc1 = #doc{id = <<"doc1">>, atts = att(1000)}, - {ok, _} = couch_db:update_doc(Db1, Doc1, []), - couch_db:close(Db1), - config:set("couchdb", "max_attachment_size", "1", _Persist = false), - compact_db(DbName), - {ok, Db2} = couch_db:open_int(DbName, []), - {ok, Doc2} = couch_db:open_doc(Db2, <<"doc1">>), - couch_db:close(Db2), - [Att] = Doc2#doc.atts, - ?assertEqual(1000, couch_att:fetch(att_len, Att)) - end)}. - - -att(Size) when is_integer(Size), Size >= 1 -> - [couch_att:new([ - {name, <<"att">>}, - {type, <<"app/binary">>}, - {att_len, Size}, - {data, fun(_Bytes) -> - << <<"x">> || _ <- lists:seq(1, Size) >> - end} - ])]. - - -compact_db(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - {ok, _CompactPid} = couch_db:start_compact(Db), - wait_compaction(DbName, "database", ?LINE), - ok = couch_db:close(Db). - - -wait_compaction(DbName, Kind, Line) -> - WaitFun = fun() -> - case is_compaction_running(DbName) of - true -> wait; - false -> ok - end - end, - case test_util:wait(WaitFun, ?TIMEOUT) of - timeout -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, Line}, - {reason, "Timeout waiting for " - ++ Kind - ++ " database compaction"}]}); - _ -> - ok - end. - - -is_compaction_running(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - {ok, DbInfo} = couch_db:get_db_info(Db), - couch_db:close(Db), - couch_util:get_value(compact_running, DbInfo) =:= true. - - -internal_replication_after_lowering_attachment_size_limit_test_() -> - { - "Internal replication after lowering max attachment size", - { - foreach, - fun() -> - Ctx = test_util:start_couch([mem3]), - SrcName = ?tempdb(), - {ok, SrcDb} = couch_db:create(SrcName, [?ADMIN_CTX]), - ok = couch_db:close(SrcDb), - TgtName = ?tempdb(), - {ok, TgtDb} = couch_db:create(TgtName, [?ADMIN_CTX]), - ok = couch_db:close(TgtDb), - {Ctx, SrcName, TgtName} - end, - fun({Ctx, SrcName, TgtName}) -> - config:delete("couchdb", "max_attachment_size"), - ok = couch_server:delete(SrcName, [?ADMIN_CTX]), - ok = couch_server:delete(TgtName, [?ADMIN_CTX]), - test_util:stop_couch(Ctx) - end, - [ - fun should_replicate_after_lowering_attachment_size/1 - ] - } - }. - -should_replicate_after_lowering_attachment_size({_Ctx, SrcName, TgtName}) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(begin - {ok, SrcDb} = couch_db:open(SrcName, [?ADMIN_CTX]), - SrcDoc = #doc{id = <<"doc">>, atts = att(1000)}, - {ok, _} = couch_db:update_doc(SrcDb, SrcDoc, []), - couch_db:close(SrcDb), - config:set("couchdb", "max_attachment_size", "1", _Persist = false), - % Create a pair of "fake" shards - SrcShard = #shard{name = SrcName, node = node()}, - TgtShard = #shard{name = TgtName, node = node()}, - mem3_rep:go(SrcShard, TgtShard, []), - {ok, TgtDb} = couch_db:open_int(TgtName, []), - {ok, TgtDoc} = couch_db:open_doc(TgtDb, <<"doc">>), - couch_db:close(TgtDb), - [Att] = TgtDoc#doc.atts, - ?assertEqual(1000, couch_att:fetch(att_len, Att)) - end)}. - - -get_json(Json, Path) -> - couch_util:get_nested_json_value(Json, Path). - -to_hex(Val) -> - to_hex(Val, []). - -to_hex(0, Acc) -> - Acc; -to_hex(Val, Acc) -> - to_hex(Val div 16, [hex_char(Val rem 16) | Acc]). - -hex_char(V) when V < 10 -> $0 + V; -hex_char(V) -> $A + V - 10. - -chunked_body(Chunks) -> - chunked_body(Chunks, []). - -chunked_body([], Acc) -> - iolist_to_binary(lists:reverse(Acc, "0\r\n")); -chunked_body([Chunk | Rest], Acc) -> - Size = to_hex(size(Chunk)), - chunked_body(Rest, ["\r\n", Chunk, "\r\n", Size | Acc]). - -get_socket() -> - Options = [binary, {packet, 0}, {active, false}], - Port = mochiweb_socket_server:get(couch_httpd, port), - {ok, Sock} = gen_tcp:connect(bind_address(), Port, Options), - Sock. - -bind_address() -> - case config:get("httpd", "bind_address") of - undefined -> any; - Address -> Address - end. - -request(Method, Url, Headers, Body) -> - RequestHead = [Method, " ", Url, " HTTP/1.1"], - RequestHeaders = [[string:join([Key, Value], ": "), "\r\n"] - || {Key, Value} <- Headers], - Request = [RequestHead, "\r\n", RequestHeaders, "\r\n", Body], - Sock = get_socket(), - gen_tcp:send(Sock, list_to_binary(lists:flatten(Request))), - timer:sleep(?TIMEWAIT), % must wait to receive complete response - {ok, R} = gen_tcp:recv(Sock, 0), - gen_tcp:close(Sock), - [Header, Body1] = re:split(R, "\r\n\r\n", [{return, binary}]), - {ok, {http_response, _, Code, _}, _} = - erlang:decode_packet(http, Header, []), - Json = jiffy:decode(Body1), - {ok, Code, Json}. - -create_standalone_text_att(Host, DbName) -> - {ok, Data} = file:read_file(?FIXTURE_TXT), - Url = string:join([Host, DbName, "doc", ?b2l(?ATT_TXT_NAME)], "/"), - {ok, Code, _Headers, _Body} = test_request:put( - Url, [{"Content-Type", "text/plain"}], Data), - ?assertEqual(201, Code), - Url. - -create_standalone_png_att(Host, DbName) -> - {ok, Data} = file:read_file(?FIXTURE_PNG), - Url = string:join([Host, DbName, "doc", ?b2l(?ATT_BIN_NAME)], "/"), - {ok, Code, _Headers, _Body} = test_request:put( - Url, [{"Content-Type", "image/png"}], Data), - ?assertEqual(201, Code), - Url. - -create_inline_text_att(Host, DbName) -> - {ok, Data} = file:read_file(?FIXTURE_TXT), - Url = string:join([Host, DbName, "doc"], "/"), - Doc = {[ - {<<"_attachments">>, {[ - {?ATT_TXT_NAME, {[ - {<<"content_type">>, <<"text/plain">>}, - {<<"data">>, base64:encode(Data)} - ]} - }]}} - ]}, - {ok, Code, _Headers, _Body} = test_request:put( - Url, [{"Content-Type", "application/json"}], jiffy:encode(Doc)), - ?assertEqual(201, Code), - string:join([Url, ?b2l(?ATT_TXT_NAME)], "/"). - -create_inline_png_att(Host, DbName) -> - {ok, Data} = file:read_file(?FIXTURE_PNG), - Url = string:join([Host, DbName, "doc"], "/"), - Doc = {[ - {<<"_attachments">>, {[ - {?ATT_BIN_NAME, {[ - {<<"content_type">>, <<"image/png">>}, - {<<"data">>, base64:encode(Data)} - ]} - }]}} - ]}, - {ok, Code, _Headers, _Body} = test_request:put( - Url, [{"Content-Type", "application/json"}], jiffy:encode(Doc)), - ?assertEqual(201, Code), - string:join([Url, ?b2l(?ATT_BIN_NAME)], "/"). - -create_already_compressed_att(Host, DbName) -> - {ok, Data} = file:read_file(?FIXTURE_TXT), - Url = string:join([Host, DbName, "doc", ?b2l(?ATT_TXT_NAME)], "/"), - {ok, Code, _Headers, _Body} = test_request:put( - Url, [{"Content-Type", "text/plain"}, {"Content-Encoding", "gzip"}], - zlib:gzip(Data)), - ?assertEqual(201, Code), - Url. - -gzip(Data) -> - Z = zlib:open(), - ok = zlib:deflateInit(Z, ?COMPRESSION_LEVEL, deflated, 16 + 15, 8, default), - Chunk = zlib:deflate(Z, Data), - Last = zlib:deflate(Z, [], finish), - ok = zlib:deflateEnd(Z), - ok = zlib:close(Z), - [Chunk, Last]. diff --git a/src/couch/test/couchdb_auth_tests.erl b/src/couch/test/couchdb_auth_tests.erl deleted file mode 100644 index ed2c064de..000000000 --- a/src/couch/test/couchdb_auth_tests.erl +++ /dev/null @@ -1,96 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couchdb_auth_tests). - --include_lib("couch/include/couch_eunit.hrl"). - - -setup(PortType) -> - Hashed = couch_passwords:hash_admin_password("artischocko"), - ok = config:set("admins", "rocko", binary_to_list(Hashed), _Persist=false), - Addr = config:get("httpd", "bind_address", "127.0.0.1"), - lists:concat(["http://", Addr, ":", port(PortType), "/_session"]). - -teardown(_, _) -> - ok. - - -auth_test_() -> - Tests = [ - fun should_return_username_on_post_to_session/2, - fun should_not_return_authenticated_field/2, - fun should_return_list_of_handlers/2 - ], - { - "Auth tests", - { - setup, - fun() -> test_util:start_couch([chttpd]) end, fun test_util:stop_couch/1, - [ - make_test_cases(clustered, Tests), - make_test_cases(backdoor, Tests) - ] - } - }. - -make_test_cases(Mod, Funs) -> - { - lists:flatten(io_lib:format("~s", [Mod])), - {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]} - }. - -should_return_username_on_post_to_session(_PortType, Url) -> - ?_assertEqual(<<"rocko">>, - begin - Hashed = couch_passwords:hash_admin_password(<<"artischocko">>), - ok = config:set("admins", "rocko", binary_to_list(Hashed), false), - {ok, _, _, Body} = test_request:post(Url, [{"Content-Type", "application/json"}], - "{\"name\":\"rocko\", \"password\":\"artischocko\"}"), - {Json} = jiffy:decode(Body), - proplists:get_value(<<"name">>, Json) - end). - -should_not_return_authenticated_field(_PortType, Url) -> - ?_assertThrow({not_found, _}, - begin - couch_util:get_nested_json_value(session(Url), [ - <<"info">>, <<"authenticated">>]) - end). - -should_return_list_of_handlers(backdoor, Url) -> - ?_assertEqual([<<"cookie">>,<<"default">>], - begin - couch_util:get_nested_json_value(session(Url), [ - <<"info">>, <<"authentication_handlers">>]) - end); -should_return_list_of_handlers(clustered, Url) -> - ?_assertEqual([<<"cookie">>,<<"default">>], - begin - couch_util:get_nested_json_value(session(Url), [ - <<"info">>, <<"authentication_handlers">>]) - end). - - -%% ------------------------------------------------------------------ -%% Internal Function Definitions -%% ------------------------------------------------------------------ - -session(Url) -> - {ok, _, _, Body} = test_request:get(Url, [{"Content-Type", "application/json"}], - "{\"name\":\"rocko\", \"password\":\"artischocko\"}"), - jiffy:decode(Body). - -port(clustered) -> - integer_to_list(mochiweb_socket_server:get(chttpd, port)); -port(backdoor) -> - integer_to_list(mochiweb_socket_server:get(couch_httpd, port)). diff --git a/src/couch/test/couchdb_cookie_domain_tests.erl b/src/couch/test/couchdb_cookie_domain_tests.erl deleted file mode 100755 index e66ab31e6..000000000 --- a/src/couch/test/couchdb_cookie_domain_tests.erl +++ /dev/null @@ -1,69 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couchdb_cookie_domain_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(USER, "cookie_domain_test_admin"). --define(PASS, "pass"). - -setup() -> - Ctx = test_util:start_couch([chttpd]), - Hashed = couch_passwords:hash_admin_password(?PASS), - ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), - Addr = config:get("httpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(chttpd, port), - Url = ?l2b(io_lib:format("http://~s:~b/_session", [Addr, Port])), - ContentType = [{"Content-Type", "application/json"}], - Payload = jiffy:encode({[{name, ?l2b(?USER)}, {password, ?l2b(?PASS)}]}), - {ok, ?b2l(Url), ContentType, ?b2l(Payload), Ctx}. - -teardown({ok, _, _, _, Ctx}) -> - ok = config:delete("admins", ?USER, _Persist=false), - test_util:stop_couch(Ctx). - -cookie_test_() -> - { - "Cookie domain tests", - { - setup, - fun setup/0, - fun teardown/1, - fun({ok, Url, ContentType, Payload, _}) -> - [ - should_set_cookie_domain(Url, ContentType, Payload), - should_not_set_cookie_domain(Url, ContentType, Payload) - ] - end - } - }. - -should_set_cookie_domain(Url, ContentType, Payload) -> - ?_test(begin - ok = config:set("couch_httpd_auth", "cookie_domain", - "example.com", false), - {ok, Code, Headers, _} = test_request:post(Url, ContentType, Payload), - ?assertEqual(200, Code), - Cookie = proplists:get_value("Set-Cookie", Headers), - ?assert(string:str(Cookie, "; Domain=example.com") > 0) - end). - -should_not_set_cookie_domain(Url, ContentType, Payload) -> - ?_test(begin - ok = config:set("couch_httpd_auth", "cookie_domain", "", false), - {ok, Code, Headers, _} = test_request:post(Url, ContentType, Payload), - ?assertEqual(200, Code), - Cookie = proplists:get_value("Set-Cookie", Headers), - ?assertEqual(0, string:str(Cookie, "; Domain=")) - end). diff --git a/src/couch/test/couchdb_cors_tests.erl b/src/couch/test/couchdb_cors_tests.erl deleted file mode 100644 index 82630bba7..000000000 --- a/src/couch/test/couchdb_cors_tests.erl +++ /dev/null @@ -1,344 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couchdb_cors_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --include_lib("chttpd/include/chttpd_cors.hrl"). - --define(TIMEOUT, 1000). - --define(_assertEqualLists(A, B), - ?_assertEqual(lists:usort(A), lists:usort(B))). - --define(assertEqualLists(A, B), - ?assertEqual(lists:usort(A), lists:usort(B))). - -start() -> - Ctx = test_util:start_couch([ioq]), - ok = config:set("httpd", "enable_cors", "true", false), - ok = config:set("vhosts", "example.com", "/", false), - Ctx. - -setup() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - couch_db:close(Db), - - config:set("cors", "credentials", "false", false), - config:set("cors", "origins", "http://example.com", false), - - Addr = config:get("httpd", "bind_address", "127.0.0.1"), - Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)), - Host = "http://" ++ Addr ++ ":" ++ Port, - {Host, ?b2l(DbName)}. - -setup({Mod, VHost}) -> - {Host, DbName} = setup(), - Url = case Mod of - server -> - Host; - db -> - Host ++ "/" ++ DbName - end, - DefaultHeaders = [{"Origin", "http://example.com"}] - ++ maybe_append_vhost(VHost), - {Host, DbName, Url, DefaultHeaders}. - -teardown(DbName) when is_list(DbName) -> - ok = couch_server:delete(?l2b(DbName), [?ADMIN_CTX]), - ok; -teardown({_, DbName}) -> - teardown(DbName). - -teardown(_, {_, DbName, _, _}) -> - teardown(DbName). - - -cors_test_() -> - Funs = [ - fun should_not_allow_origin/2, - fun should_not_allow_origin_with_port_mismatch/2, - fun should_not_allow_origin_with_scheme_mismatch/2, - fun should_not_all_origin_due_case_mismatch/2, - fun should_make_simple_request/2, - fun should_make_preflight_request/2, - fun should_make_prefligh_request_with_port/2, - fun should_make_prefligh_request_with_scheme/2, - fun should_make_prefligh_request_with_wildcard_origin/2, - fun should_make_request_with_credentials/2, - fun should_make_origin_request_with_auth/2, - fun should_make_preflight_request_with_auth/2 - ], - { - "CORS (COUCHDB-431)", - { - setup, - fun start/0, fun test_util:stop_couch/1, - [ - cors_tests(Funs), - vhost_cors_tests(Funs), - headers_tests() - ] - } - }. - -headers_tests() -> - { - "Various headers tests", - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_not_return_cors_headers_for_invalid_origin/1, - fun should_not_return_cors_headers_for_invalid_origin_preflight/1, - fun should_make_request_against_attachment/1, - fun should_make_range_request_against_attachment/1, - fun should_make_request_with_if_none_match_header/1 - ] - } - }. - -cors_tests(Funs) -> - { - "CORS tests", - [ - make_test_case(server, false, Funs), - make_test_case(db, false, Funs) - ] - }. - -vhost_cors_tests(Funs) -> - { - "Virtual Host CORS", - [ - make_test_case(server, true, Funs), - make_test_case(db, true, Funs) - ] - }. - -make_test_case(Mod, UseVhost, Funs) -> - { - case Mod of server -> "Server"; db -> "Database" end, - {foreachx, fun setup/1, fun teardown/2, [{{Mod, UseVhost}, Fun} - || Fun <- Funs]} - }. - - -should_not_allow_origin(_, {_, _, Url, Headers0}) -> - ?_assertEqual(undefined, - begin - config:delete("cors", "origins", false), - Headers1 = proplists:delete("Origin", Headers0), - Headers = [{"Origin", "http://127.0.0.1"}] - ++ Headers1, - {ok, _, Resp, _} = test_request:get(Url, Headers), - proplists:get_value("Access-Control-Allow-Origin", Resp) - end). - -should_not_allow_origin_with_port_mismatch({_, VHost}, {_, _, Url, _}) -> - ?_assertEqual(undefined, - begin - Headers = [{"Origin", "http://example.com:5984"}, - {"Access-Control-Request-Method", "GET"}] - ++ maybe_append_vhost(VHost), - {ok, _, Resp, _} = test_request:options(Url, Headers), - proplists:get_value("Access-Control-Allow-Origin", Resp) - end). - -should_not_allow_origin_with_scheme_mismatch({_, VHost}, {_, _, Url, _}) -> - ?_assertEqual(undefined, - begin - Headers = [{"Origin", "http://example.com:5984"}, - {"Access-Control-Request-Method", "GET"}] - ++ maybe_append_vhost(VHost), - {ok, _, Resp, _} = test_request:options(Url, Headers), - proplists:get_value("Access-Control-Allow-Origin", Resp) - end). - -should_not_all_origin_due_case_mismatch({_, VHost}, {_, _, Url, _}) -> - ?_assertEqual(undefined, - begin - Headers = [{"Origin", "http://ExAmPlE.CoM"}, - {"Access-Control-Request-Method", "GET"}] - ++ maybe_append_vhost(VHost), - {ok, _, Resp, _} = test_request:options(Url, Headers), - proplists:get_value("Access-Control-Allow-Origin", Resp) - end). - -should_make_simple_request(_, {_, _, Url, DefaultHeaders}) -> - ?_test(begin - {ok, _, Resp, _} = test_request:get(Url, DefaultHeaders), - ?assertEqual( - undefined, - proplists:get_value("Access-Control-Allow-Credentials", Resp)), - ?assertEqual( - "http://example.com", - proplists:get_value("Access-Control-Allow-Origin", Resp)), - ?assertEqualLists( - ?COUCH_HEADERS ++ list_simple_headers(Resp), - split_list(proplists:get_value("Access-Control-Expose-Headers", Resp))) - end). - -should_make_preflight_request(_, {_, _, Url, DefaultHeaders}) -> - ?_assertEqualLists(?SUPPORTED_METHODS, - begin - Headers = DefaultHeaders - ++ [{"Access-Control-Request-Method", "GET"}], - {ok, _, Resp, _} = test_request:options(Url, Headers), - split_list(proplists:get_value("Access-Control-Allow-Methods", Resp)) - end). - -should_make_prefligh_request_with_port({_, VHost}, {_, _, Url, _}) -> - ?_assertEqual("http://example.com:5984", - begin - config:set("cors", "origins", "http://example.com:5984", - false), - Headers = [{"Origin", "http://example.com:5984"}, - {"Access-Control-Request-Method", "GET"}] - ++ maybe_append_vhost(VHost), - {ok, _, Resp, _} = test_request:options(Url, Headers), - proplists:get_value("Access-Control-Allow-Origin", Resp) - end). - -should_make_prefligh_request_with_scheme({_, VHost}, {_, _, Url, _}) -> - ?_assertEqual("https://example.com:5984", - begin - config:set("cors", "origins", "https://example.com:5984", - false), - Headers = [{"Origin", "https://example.com:5984"}, - {"Access-Control-Request-Method", "GET"}] - ++ maybe_append_vhost(VHost), - {ok, _, Resp, _} = test_request:options(Url, Headers), - proplists:get_value("Access-Control-Allow-Origin", Resp) - end). - -should_make_prefligh_request_with_wildcard_origin({_, VHost}, {_, _, Url, _}) -> - ?_assertEqual("https://example.com:5984", - begin - config:set("cors", "origins", "*", false), - Headers = [{"Origin", "https://example.com:5984"}, - {"Access-Control-Request-Method", "GET"}] - ++ maybe_append_vhost(VHost), - {ok, _, Resp, _} = test_request:options(Url, Headers), - proplists:get_value("Access-Control-Allow-Origin", Resp) - end). - -should_make_request_with_credentials(_, {_, _, Url, DefaultHeaders}) -> - ?_assertEqual("true", - begin - ok = config:set("cors", "credentials", "true", false), - {ok, _, Resp, _} = test_request:options(Url, DefaultHeaders), - proplists:get_value("Access-Control-Allow-Credentials", Resp) - end). - -should_make_origin_request_with_auth(_, {_, _, Url, DefaultHeaders}) -> - ?_assertEqual("http://example.com", - begin - Hashed = couch_passwords:hash_admin_password(<<"test">>), - config:set("admins", "test", ?b2l(Hashed), false), - {ok, _, Resp, _} = test_request:get( - Url, DefaultHeaders, [{basic_auth, {"test", "test"}}]), - config:delete("admins", "test", false), - proplists:get_value("Access-Control-Allow-Origin", Resp) - end). - -should_make_preflight_request_with_auth(_, {_, _, Url, DefaultHeaders}) -> - ?_assertEqualLists(?SUPPORTED_METHODS, - begin - Hashed = couch_passwords:hash_admin_password(<<"test">>), - config:set("admins", "test", ?b2l(Hashed), false), - Headers = DefaultHeaders - ++ [{"Access-Control-Request-Method", "GET"}], - {ok, _, Resp, _} = test_request:options( - Url, Headers, [{basic_auth, {"test", "test"}}]), - config:delete("admins", "test", false), - split_list(proplists:get_value("Access-Control-Allow-Methods", Resp)) - end). - -should_not_return_cors_headers_for_invalid_origin({Host, _}) -> - ?_assertEqual(undefined, - begin - Headers = [{"Origin", "http://127.0.0.1"}], - {ok, _, Resp, _} = test_request:get(Host, Headers), - proplists:get_value("Access-Control-Allow-Origin", Resp) - end). - -should_not_return_cors_headers_for_invalid_origin_preflight({Host, _}) -> - ?_assertEqual(undefined, - begin - Headers = [{"Origin", "http://127.0.0.1"}, - {"Access-Control-Request-Method", "GET"}], - {ok, _, Resp, _} = test_request:options(Host, Headers), - proplists:get_value("Access-Control-Allow-Origin", Resp) - end). - -should_make_request_against_attachment({Host, DbName}) -> - {"COUCHDB-1689", - ?_assertEqual(200, - begin - Url = Host ++ "/" ++ DbName, - {ok, Code0, _, _} = test_request:put( - Url ++ "/doc/file.txt", [{"Content-Type", "text/plain"}], - "hello, couch!"), - ?assert(Code0 =:= 201), - {ok, Code, _, _} = test_request:get( - Url ++ "/doc?attachments=true", - [{"Origin", "http://example.com"}]), - Code - end)}. - -should_make_range_request_against_attachment({Host, DbName}) -> - {"COUCHDB-1689", - ?_assertEqual(206, - begin - Url = Host ++ "/" ++ DbName, - {ok, Code0, _, _} = test_request:put( - Url ++ "/doc/file.txt", - [{"Content-Type", "application/octet-stream"}], - "hello, couch!"), - ?assert(Code0 =:= 201), - {ok, Code, _, _} = test_request:get( - Url ++ "/doc/file.txt", [{"Origin", "http://example.com"}, - {"Range", "bytes=0-6"}]), - Code - end)}. - -should_make_request_with_if_none_match_header({Host, DbName}) -> - {"COUCHDB-1697", - ?_assertEqual(304, - begin - Url = Host ++ "/" ++ DbName, - {ok, Code0, Headers0, _} = test_request:put( - Url ++ "/doc", [{"Content-Type", "application/json"}], "{}"), - ?assert(Code0 =:= 201), - ETag = proplists:get_value("ETag", Headers0), - {ok, Code, _, _} = test_request:get( - Url ++ "/doc", [{"Origin", "http://example.com"}, - {"If-None-Match", ETag}]), - Code - end)}. - - -maybe_append_vhost(true) -> - [{"Host", "http://example.com"}]; -maybe_append_vhost(false) -> - []. - -split_list(S) -> - re:split(S, "\\s*,\\s*", [trim, {return, list}]). - -list_simple_headers(Headers) -> - LCHeaders = [string:to_lower(K) || {K, _V} <- Headers], - lists:filter(fun(H) -> lists:member(H, ?SIMPLE_HEADERS) end, LCHeaders). diff --git a/src/couch/test/couchdb_db_tests.erl b/src/couch/test/couchdb_db_tests.erl deleted file mode 100644 index 734bafb9f..000000000 --- a/src/couch/test/couchdb_db_tests.erl +++ /dev/null @@ -1,91 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couchdb_db_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include_lib("mem3/include/mem3.hrl"). - -setup() -> - DbName = ?b2l(?tempdb()), - fabric:create_db(DbName), - DbName. - - -teardown(DbName) -> - (catch fabric:delete_db(DbName)), - ok. - - -clustered_db_test_() -> - { - "Checking clustered db API", - { - setup, - fun() -> test_util:start_couch([ddoc_cache, mem3]) end, - fun test_util:stop/1, - [ - { - "DB deletion", - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_close_deleted_db/1, - fun should_kill_caller_from_load_validation_funs_for_deleted_db/1 - ] - } - } - ] - } - }. - - -should_close_deleted_db(DbName) -> - ?_test(begin - [#shard{name = ShardName} | _] = mem3:shards(DbName), - {ok, Db} = couch_db:open(ShardName, []), - - MonitorRef = couch_db:monitor(Db), - fabric:delete_db(DbName), - receive - {'DOWN', MonitorRef, _Type, _Pid, _Info} -> - ok - after 2000 -> - throw(timeout_error) - end, - test_util:wait(fun() -> - case ets:lookup(couch_dbs, DbName) of - [] -> ok; - _ -> wait - end - end), - ?assertEqual([], ets:lookup(couch_dbs, DbName)) - end). - - -should_kill_caller_from_load_validation_funs_for_deleted_db(DbName) -> - ?_test(begin - [#shard{name = ShardName} | _] = mem3:shards(DbName), - {ok, Db} = couch_db:open(ShardName, []), - - MonitorRef = couch_db:monitor(Db), - fabric:delete_db(DbName), - receive - {'DOWN', MonitorRef, _Type, _Pid, _Info} -> - ok - after 2000 -> - throw(timeout_error) - end, - ?assertError(database_does_not_exist, couch_db:load_validation_funs(Db)) - end). diff --git a/src/couch/test/couchdb_design_doc_tests.erl b/src/couch/test/couchdb_design_doc_tests.erl deleted file mode 100644 index eef12e039..000000000 --- a/src/couch/test/couchdb_design_doc_tests.erl +++ /dev/null @@ -1,88 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couchdb_design_doc_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - -setup() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = couch_db:close(Db), - create_design_doc(DbName, <<"_design/foo">>), - Addr = config:get("httpd", "bind_address", "127.0.0.1"), - Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)), - BaseUrl = "http://" ++ Addr ++ ":" ++ Port, - {?b2l(DbName), BaseUrl}. - - -teardown({DbName, _}) -> - couch_server:delete(?l2b(DbName), [?ADMIN_CTX]), - ok. - - -design_list_test_() -> - { - "Check _list functionality", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_return_empty_when_plain_return/1, - fun should_return_empty_when_no_docs/1 - ] - } - } - }. - -should_return_empty_when_plain_return({DbName, BaseUrl}) -> - ?_test(begin - ?assertEqual(<<>>, - query_text(BaseUrl, DbName, "foo", "_list/plain_return/simple_view")) - end). - -should_return_empty_when_no_docs({DbName, BaseUrl}) -> - ?_test(begin - ?assertEqual(<<>>, - query_text(BaseUrl, DbName, "foo", "_list/simple_render/simple_view")) - end). - -create_design_doc(DbName, DDName) -> - {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDName}, - {<<"language">>, <<"javascript">>}, - {<<"views">>, {[ - {<<"simple_view">>, {[ - {<<"map">>, <<"function(doc) {emit(doc._id, doc)}">> }, - {<<"reduce">>, <<"function (key, values, rereduce) {return sum(values);}">> } - ]}} - ]}}, - {<<"lists">>, {[ - {<<"plain_return">>, <<"function(head, req) {return;}">>}, - {<<"simple_render">>, <<"function(head, req) {var row; while(row=getRow()) {send(JSON.stringify(row)); }}">>} - ]}} - ]}), - {ok, Rev} = couch_db:update_doc(Db, DDoc, []), - couch_db:ensure_full_commit(Db), - couch_db:close(Db), - Rev. - -query_text(BaseUrl, DbName, DDoc, Path) -> - {ok, Code, _Headers, Body} = test_request:get( - BaseUrl ++ "/" ++ DbName ++ "/_design/" ++ DDoc ++ "/" ++ Path), - ?assertEqual(200, Code), - Body. diff --git a/src/couch/test/couchdb_file_compression_tests.erl b/src/couch/test/couchdb_file_compression_tests.erl deleted file mode 100644 index 8f0fe5bf1..000000000 --- a/src/couch/test/couchdb_file_compression_tests.erl +++ /dev/null @@ -1,260 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couchdb_file_compression_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(DDOC_ID, <<"_design/test">>). --define(DOCS_COUNT, 5000). --define(TIMEOUT, 60000). - -setup() -> - config:set("couchdb", "file_compression", "none", false), - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = populate_db(Db, ?DOCS_COUNT), - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, ?DDOC_ID}, - {<<"language">>, <<"javascript">>}, - {<<"views">>, {[ - {<<"by_id">>, {[ - {<<"map">>, <<"function(doc){emit(doc._id, doc.string);}">>} - ]}} - ]} - } - ]}), - {ok, _} = couch_db:update_doc(Db, DDoc, []), - refresh_index(DbName), - ok = couch_db:close(Db), - DbName. - -teardown(DbName) -> - ok = couch_server:delete(DbName, [?ADMIN_CTX]), - ok. - - -couch_file_compression_test_() -> - { - "CouchDB file compression tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_use_none/1, - fun should_use_deflate_1/1, - fun should_use_deflate_9/1, - fun should_use_snappy/1, - fun should_compare_compression_methods/1 - ] - } - } - }. - - -should_use_none(DbName) -> - config:set("couchdb", "file_compression", "none", false), - { - "Use no compression", - [ - {"compact database", - {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_db(DbName))}}, - {"compact view", - {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_view(DbName))}} - ] - }. - -should_use_deflate_1(DbName) -> - config:set("couchdb", "file_compression", "deflate_1", false), - { - "Use deflate compression at level 1", - [ - {"compact database", - {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_db(DbName))}}, - {"compact view", - {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_view(DbName))}} - ] - }. - -should_use_deflate_9(DbName) -> - config:set("couchdb", "file_compression", "deflate_9", false), - { - "Use deflate compression at level 9", - [ - {"compact database", - {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_db(DbName))}}, - {"compact view", - {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_view(DbName))}} - ] - }. - -should_use_snappy(DbName) -> - config:set("couchdb", "file_compression", "snappy", false), - { - "Use snappy compression", - [ - {"compact database", - {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_db(DbName))}}, - {"compact view", - {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_view(DbName))}} - ] - }. - -should_compare_compression_methods(DbName) -> - {"none > snappy > deflate_1 > deflate_9", - {timeout, ?TIMEOUT div 1000, ?_test(compare_compression_methods(DbName))}}. - -compare_compression_methods(DbName) -> - config:set("couchdb", "file_compression", "none", false), - ExternalSizePreCompact = db_external_size(DbName), - compact_db(DbName), - compact_view(DbName), - DbSizeNone = db_disk_size(DbName), - ViewSizeNone = view_disk_size(DbName), - ExternalSizeNone = db_external_size(DbName), - ViewExternalSizeNone = view_external_size(DbName), - - config:set("couchdb", "file_compression", "snappy", false), - compact_db(DbName), - compact_view(DbName), - DbSizeSnappy = db_disk_size(DbName), - ViewSizeSnappy = view_disk_size(DbName), - ExternalSizeSnappy = db_external_size(DbName), - ViewExternalSizeSnappy = view_external_size(DbName), - - ?assert(DbSizeNone > DbSizeSnappy), - ?assert(ViewSizeNone > ViewSizeSnappy), - - config:set("couchdb", "file_compression", "deflate_1", false), - compact_db(DbName), - compact_view(DbName), - DbSizeDeflate1 = db_disk_size(DbName), - ViewSizeDeflate1 = view_disk_size(DbName), - - ?assert(DbSizeSnappy > DbSizeDeflate1), - ?assert(ViewSizeSnappy > ViewSizeDeflate1), - - config:set("couchdb", "file_compression", "deflate_9", false), - compact_db(DbName), - compact_view(DbName), - DbSizeDeflate9 = db_disk_size(DbName), - ViewSizeDeflate9 = view_disk_size(DbName), - ExternalSizeDeflate9 = db_external_size(DbName), - ViewExternalSizeDeflate9 = view_external_size(DbName), - - ?assert(DbSizeDeflate1 > DbSizeDeflate9), - ?assert(ViewSizeDeflate1 > ViewSizeDeflate9), - ?assert(ExternalSizePreCompact >= ExternalSizeNone), - ?assert(ExternalSizeNone =:= ExternalSizeSnappy), - ?assert(ExternalSizeNone =:= ExternalSizeDeflate9), - ?assert(ViewExternalSizeNone =:= ViewExternalSizeSnappy), - ?assert(ViewExternalSizeNone =:= ViewExternalSizeDeflate9). - - -populate_db(_Db, NumDocs) when NumDocs =< 0 -> - ok; -populate_db(Db, NumDocs) -> - Docs = lists:map( - fun(_) -> - couch_doc:from_json_obj({[ - {<<"_id">>, couch_uuids:random()}, - {<<"string">>, ?l2b(lists:duplicate(1000, $X))} - ]}) - end, - lists:seq(1, 500)), - {ok, _} = couch_db:update_docs(Db, Docs, []), - populate_db(Db, NumDocs - 500). - -refresh_index(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]), - couch_mrview:query_view(Db, DDoc, <<"by_id">>, [{update, true}]), - ok = couch_db:close(Db). - -compact_db(DbName) -> - DiskSizeBefore = db_disk_size(DbName), - {ok, Db} = couch_db:open_int(DbName, []), - {ok, _CompactPid} = couch_db:start_compact(Db), - wait_compaction(DbName, "database", ?LINE), - ok = couch_db:close(Db), - DiskSizeAfter = db_disk_size(DbName), - ?assert(DiskSizeBefore > DiskSizeAfter). - -compact_view(DbName) -> - DiskSizeBefore = view_disk_size(DbName), - {ok, _MonRef} = couch_mrview:compact(DbName, ?DDOC_ID, [monitor]), - wait_compaction(DbName, "view group", ?LINE), - DiskSizeAfter = view_disk_size(DbName), - ?assert(DiskSizeBefore > DiskSizeAfter). - -db_disk_size(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - {ok, Info} = couch_db:get_db_info(Db), - ok = couch_db:close(Db), - active_size(Info). - -db_external_size(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - {ok, Info} = couch_db:get_db_info(Db), - ok = couch_db:close(Db), - external_size(Info). - -view_disk_size(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]), - {ok, Info} = couch_mrview:get_info(Db, DDoc), - ok = couch_db:close(Db), - active_size(Info). - -view_external_size(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]), - {ok, Info} = couch_mrview:get_info(Db, DDoc), - ok = couch_db:close(Db), - external_size(Info). - -active_size(Info) -> - couch_util:get_nested_json_value({Info}, [sizes, active]). - -external_size(Info) -> - couch_util:get_nested_json_value({Info}, [sizes, external]). - -wait_compaction(DbName, Kind, Line) -> - WaitFun = fun() -> - case is_compaction_running(DbName) of - true -> wait; - false -> ok - end - end, - case test_util:wait(WaitFun, ?TIMEOUT) of - timeout -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, Line}, - {reason, "Timeout waiting for " - ++ Kind - ++ " database compaction"}]}); - _ -> - ok - end. - -is_compaction_running(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - {ok, DbInfo} = couch_db:get_db_info(Db), - {ok, ViewInfo} = couch_mrview:get_info(Db, ?DDOC_ID), - couch_db:close(Db), - (couch_util:get_value(compact_running, ViewInfo) =:= true) - orelse (couch_util:get_value(compact_running, DbInfo) =:= true). diff --git a/src/couch/test/couchdb_location_header_tests.erl b/src/couch/test/couchdb_location_header_tests.erl deleted file mode 100644 index c6c039eb0..000000000 --- a/src/couch/test/couchdb_location_header_tests.erl +++ /dev/null @@ -1,78 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couchdb_location_header_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 1000). - - -setup() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - couch_db:close(Db), - - Addr = config:get("httpd", "bind_address", "127.0.0.1"), - Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)), - Host = "http://" ++ Addr ++ ":" ++ Port, - {Host, ?b2l(DbName)}. - -teardown({_, DbName}) -> - ok = couch_server:delete(?l2b(DbName), [?ADMIN_CTX]), - ok. - - -header_test_() -> - { - "CouchDB Location Header Tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_work_with_newlines_in_docs/1, - fun should_work_with_newlines_in_attachments/1 - ] - } - } - }. - -should_work_with_newlines_in_docs({Host, DbName}) -> - Url = Host ++ "/" ++ DbName ++ "/docid%0A", - {"COUCHDB-708", - ?_assertEqual( - Url, - begin - {ok, _, Headers, _} = test_request:put(Url, - [{"Content-Type", "application/json"}], "{}"), - proplists:get_value("Location", Headers) - end)}. - -should_work_with_newlines_in_attachments({Host, DbName}) -> - Url = Host ++ "/" ++ DbName, - AttUrl = Url ++ "/docid%0A/readme.txt", - {"COUCHDB-708", - ?_assertEqual( - AttUrl, - begin - Body = "We all live in a yellow submarine!", - Headers0 = [ - {"Content-Length", "34"}, - {"Content-Type", "text/plain"} - ], - {ok, _, Headers, _} = test_request:put(AttUrl, Headers0, Body), - proplists:get_value("Location", Headers) - end)}. diff --git a/src/couch/test/couchdb_mrview_cors_tests.erl b/src/couch/test/couchdb_mrview_cors_tests.erl deleted file mode 100644 index 0f69048a0..000000000 --- a/src/couch/test/couchdb_mrview_cors_tests.erl +++ /dev/null @@ -1,140 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couchdb_mrview_cors_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - - - --define(DDOC, {[ - {<<"_id">>, <<"_design/foo">>}, - {<<"shows">>, {[ - {<<"bar">>, <<"function(doc, req) {return '

wosh

';}">>} - ]}} -]}). - --define(USER, "mrview_cors_test_admin"). --define(PASS, "pass"). --define(AUTH, {basic_auth, {?USER, ?PASS}}). - - -start() -> - Ctx = test_util:start_couch([chttpd]), - Hashed = couch_passwords:hash_admin_password(?PASS), - ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), - ok = config:set("httpd", "enable_cors", "true", false), - ok = config:set("vhosts", "example.com", "/", false), - Ctx. - -setup(PortType) -> - DbName = ?tempdb(), - ok = create_db(PortType, DbName), - - config:set("cors", "credentials", "false", false), - config:set("cors", "origins", "http://example.com", false), - - Host = host_url(PortType), - upload_ddoc(Host, ?b2l(DbName)), - {Host, ?b2l(DbName)}. - -teardown(Ctx) -> - ok = config:delete("admins", ?USER, _Persist=false), - test_util:stop_couch(Ctx). - -teardown(PortType, {_Host, DbName}) -> - delete_db(PortType, ?l2b(DbName)), - ok. - -cors_test_() -> - { - "CORS for mrview", - { - setup, - fun start/0, fun teardown/1, - [show_tests()] - } - }. - -show_tests() -> - { - "Check CORS for show", - [ - make_test_case(clustered, [fun should_make_shows_request/2]), - make_test_case(backdoor, [fun should_make_shows_request/2]) - ] - }. - -make_test_case(Mod, Funs) -> - { - lists:flatten(io_lib:format("~s", [Mod])), - {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]} - }. - -should_make_shows_request(_, {Host, DbName}) -> - ?_test(begin - ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_show/bar", - Headers = [{"Origin", "http://example.com"}, - {"Access-Control-Request-Method", "GET"}, ?AUTH], - {ok, _, Resp, Body} = test_request:get(ReqUrl, Headers), - Origin = proplists:get_value("Access-Control-Allow-Origin", Resp), - ?assertEqual("http://example.com", Origin), - ?assertEqual(<<"

wosh

">>, Body) - end). - -create_db(backdoor, DbName) -> - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - couch_db:close(Db); -create_db(clustered, DbName) -> - {ok, Status, _, _} = test_request:put(db_url(DbName), [?AUTH], ""), - assert_success(create_db, Status), - ok. - -delete_db(backdoor, DbName) -> - couch_server:delete(DbName, [?ADMIN_CTX]); -delete_db(clustered, DbName) -> - {ok, Status, _, _} = test_request:delete(db_url(DbName), [?AUTH]), - assert_success(delete_db, Status), - ok. - -assert_success(create_db, Status) -> - true = lists:member(Status, [201, 202]); -assert_success(delete_db, Status) -> - true = lists:member(Status, [200, 202]). - - -host_url(PortType) -> - "http://" ++ bind_address(PortType) ++ ":" ++ port(PortType). - -bind_address(PortType) -> - config:get(section(PortType), "bind_address", "127.0.0.1"). - -section(backdoor) -> "http"; -section(clustered) -> "chttpd". - -db_url(DbName) when is_binary(DbName) -> - db_url(binary_to_list(DbName)); -db_url(DbName) when is_list(DbName) -> - host_url(clustered) ++ "/" ++ DbName. - -port(clustered) -> - integer_to_list(mochiweb_socket_server:get(chttpd, port)); -port(backdoor) -> - integer_to_list(mochiweb_socket_server:get(couch_httpd, port)). - - -upload_ddoc(Host, DbName) -> - Url = Host ++ "/" ++ DbName ++ "/_design/foo", - Body = couch_util:json_encode(?DDOC), - {ok, 201, _Resp, _Body} = test_request:put(Url, [?AUTH], Body), - ok. diff --git a/src/couch/test/couchdb_mrview_tests.erl b/src/couch/test/couchdb_mrview_tests.erl deleted file mode 100644 index 1c96a0ae0..000000000 --- a/src/couch/test/couchdb_mrview_tests.erl +++ /dev/null @@ -1,253 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couchdb_mrview_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - - - --define(DDOC, {[ - {<<"_id">>, <<"_design/foo">>}, - {<<"shows">>, {[ - {<<"bar">>, <<"function(doc, req) {return '

wosh

';}">>} - ]}}, - {<<"updates">>, {[ - {<<"report">>, <<"function(doc, req) {" - "var data = JSON.parse(req.body); " - "return ['test', data];" - "}">>} - ]}}, - {<<"views">>, {[ - {<<"view1">>, {[ - {<<"map">>, <<"function(doc){emit(doc._id, doc._rev)}">>} - ]}} - ]}} -]}). - --define(USER, "admin"). --define(PASS, "pass"). --define(AUTH, {basic_auth, {?USER, ?PASS}}). - - -start() -> - Ctx = test_util:start_couch([chttpd]), - Hashed = couch_passwords:hash_admin_password(?PASS), - ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), - Ctx. - -setup(PortType) -> - ok = meck:new(mochiweb_socket, [passthrough]), - ok = meck:expect(mochiweb_socket, recv, fun mochiweb_socket_recv/3), - - DbName = ?tempdb(), - ok = create_db(PortType, DbName), - - Host = host_url(PortType), - upload_ddoc(Host, ?b2l(DbName)), - {Host, ?b2l(DbName)}. - -teardown(Ctx) -> - ok = config:delete("admins", ?USER, _Persist=false), - test_util:stop_couch(Ctx). - -teardown(PortType, {_Host, DbName}) -> - (catch meck:unload(mochiweb_socket)), - delete_db(PortType, ?l2b(DbName)), - ok. - -mrview_show_test_() -> - { - "Check show functionality", - { - setup, - fun start/0, fun teardown/1, - [ - make_test_case(clustered, [fun should_return_invalid_request_body/2]), - make_test_case(backdoor, [fun should_return_invalid_request_body/2]) - ] - } - }. - -mrview_query_test_() -> - { - "Check view query functionality", - { - setup, - fun start/0, fun teardown/1, - [ - make_test_case(clustered, [fun should_return_400_for_wrong_order_of_keys/2]), - make_test_case(backdoor, [fun should_return_400_for_wrong_order_of_keys/2]) - ] - } - }. - -mrview_cleanup_index_files_test_() -> - { - "Check index files cleanup", - { - setup, - fun start/0, fun teardown/1, - [ - make_test_case(clustered, [fun should_cleanup_index_files/2]) - ] - } - }. - - -make_test_case(Mod, Funs) -> - { - lists:flatten(io_lib:format("~s", [Mod])), - {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]} - }. - -should_return_invalid_request_body(PortType, {Host, DbName}) -> - ?_test(begin - ok = create_doc(PortType, ?l2b(DbName), <<"doc_id">>, {[]}), - ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_update/report/doc_id", - {ok, Status, _Headers, Body} = - test_request:post(ReqUrl, [?AUTH], <<"{truncated}">>), - {Props} = jiffy:decode(Body), - ?assertEqual( - <<"bad_request">>, couch_util:get_value(<<"error">>, Props)), - ?assertEqual( - <<"Invalid request body">>, couch_util:get_value(<<"reason">>, Props)), - ?assertEqual(400, Status), - ok - end). - -should_return_400_for_wrong_order_of_keys(_PortType, {Host, DbName}) -> - Args = [{start_key, "\"bbb\""}, {end_key, "\"aaa\""}], - ?_test(begin - ReqUrl = Host ++ "/" ++ DbName - ++ "/_design/foo/_view/view1?" ++ mochiweb_util:urlencode(Args), - {ok, Status, _Headers, Body} = test_request:get(ReqUrl, [?AUTH]), - {Props} = jiffy:decode(Body), - ?assertEqual( - <<"query_parse_error">>, couch_util:get_value(<<"error">>, Props)), - ?assertEqual( - <<"No rows can match your key range, reverse your start_key and end_key or set descending=true">>, - couch_util:get_value(<<"reason">>, Props)), - ?assertEqual(400, Status), - ok - end). - -should_cleanup_index_files(_PortType, {Host, DbName}) -> - ?_test(begin - IndexWildCard = [ - config:get("couchdb", "view_index_dir"), - "/.shards/*/", - DbName, - ".[0-9]*_design/mrview/*" - ], - ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_view/view1", - {ok, _Status0, _Headers0, _Body0} = test_request:get(ReqUrl, [?AUTH]), - FileList0 = filelib:wildcard(IndexWildCard), - ?assertNotEqual([], FileList0), - - % It is hard to simulate inactive view. - % Since couch_mrview:cleanup is called on view definition change. - % That's why we just create extra files in place - ToDelete = lists:map(fun(FilePath) -> - ViewFile = filename:join([ - filename:dirname(FilePath), - "11111111111111111111111111111111.view"]), - file:write_file(ViewFile, <<>>), - ViewFile - end, FileList0), - FileList1 = filelib:wildcard(IndexWildCard), - ?assertEqual([], lists:usort(FileList1 -- (FileList0 ++ ToDelete))), - - CleanupUrl = Host ++ "/" ++ DbName ++ "/_view_cleanup", - {ok, _Status1, _Headers1, _Body1} = test_request:post( - CleanupUrl, [], <<>>, [?AUTH]), - test_util:wait(fun() -> - IndexFiles = filelib:wildcard(IndexWildCard), - case lists:usort(FileList0) == lists:usort(IndexFiles) of - false -> wait; - true -> ok - end - end), - ok - end). - - -create_doc(backdoor, DbName, Id, Body) -> - JsonDoc = couch_util:json_apply_field({<<"_id">>, Id}, Body), - Doc = couch_doc:from_json_obj(JsonDoc), - {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - {ok, _} = couch_db:update_docs(Db, [Doc]), - couch_db:ensure_full_commit(Db), - couch_db:close(Db); -create_doc(clustered, DbName, Id, Body) -> - JsonDoc = couch_util:json_apply_field({<<"_id">>, Id}, Body), - Doc = couch_doc:from_json_obj(JsonDoc), - {ok, _} = fabric:update_docs(DbName, [Doc], [?ADMIN_CTX]), - ok. - -create_db(backdoor, DbName) -> - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - couch_db:close(Db); -create_db(clustered, DbName) -> - {ok, Status, _, _} = test_request:put(db_url(DbName), [?AUTH], ""), - assert_success(create_db, Status), - ok. - -delete_db(backdoor, DbName) -> - couch_server:delete(DbName, [?ADMIN_CTX]); -delete_db(clustered, DbName) -> - {ok, Status, _, _} = test_request:delete(db_url(DbName), [?AUTH]), - assert_success(delete_db, Status), - ok. - -assert_success(create_db, Status) -> - ?assert(lists:member(Status, [201, 202])); -assert_success(delete_db, Status) -> - ?assert(lists:member(Status, [200, 202])). - - -host_url(PortType) -> - "http://" ++ bind_address(PortType) ++ ":" ++ port(PortType). - -bind_address(PortType) -> - config:get(section(PortType), "bind_address", "127.0.0.1"). - -section(backdoor) -> "http"; -section(clustered) -> "chttpd". - -db_url(DbName) when is_binary(DbName) -> - db_url(binary_to_list(DbName)); -db_url(DbName) when is_list(DbName) -> - host_url(clustered) ++ "/" ++ DbName. - -port(clustered) -> - integer_to_list(mochiweb_socket_server:get(chttpd, port)); -port(backdoor) -> - integer_to_list(mochiweb_socket_server:get(couch_httpd, port)). - - -upload_ddoc(Host, DbName) -> - Url = Host ++ "/" ++ DbName ++ "/_design/foo", - Body = couch_util:json_encode(?DDOC), - {ok, 201, _Resp, _Body} = test_request:put(Url, [?AUTH], Body), - ok. - -mochiweb_socket_recv(Sock, Len, Timeout) -> - case meck:passthrough([Sock, Len, Timeout]) of - {ok, <<"{truncated}">>} -> - {error, closed}; - {ok, Data} -> - {ok, Data}; - Else -> - Else - end. diff --git a/src/couch/test/couchdb_os_proc_pool.erl b/src/couch/test/couchdb_os_proc_pool.erl deleted file mode 100644 index 69f8051ad..000000000 --- a/src/couch/test/couchdb_os_proc_pool.erl +++ /dev/null @@ -1,306 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couchdb_os_proc_pool). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 1000). - - -setup() -> - ok = couch_proc_manager:reload(), - ok = setup_config(). - -teardown(_) -> - ok. - -os_proc_pool_test_() -> - { - "OS processes pool tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - should_block_new_proc_on_full_pool(), - should_free_slot_on_proc_unexpected_exit(), - should_reuse_known_proc(), -% should_process_waiting_queue_as_fifo(), - should_reduce_pool_on_idle_os_procs() - ] - } - } - }. - - -should_block_new_proc_on_full_pool() -> - ?_test(begin - Client1 = spawn_client(), - Client2 = spawn_client(), - Client3 = spawn_client(), - - ?assertEqual(ok, ping_client(Client1)), - ?assertEqual(ok, ping_client(Client2)), - ?assertEqual(ok, ping_client(Client3)), - - Proc1 = get_client_proc(Client1, "1"), - Proc2 = get_client_proc(Client2, "2"), - Proc3 = get_client_proc(Client3, "3"), - - ?assertNotEqual(Proc1, Proc2), - ?assertNotEqual(Proc2, Proc3), - ?assertNotEqual(Proc3, Proc1), - - Client4 = spawn_client(), - ?assertEqual(timeout, ping_client(Client4)), - - ?assertEqual(ok, stop_client(Client1)), - ?assertEqual(ok, ping_client(Client4)), - - Proc4 = get_client_proc(Client4, "4"), - - ?assertEqual(Proc1#proc.pid, Proc4#proc.pid), - ?assertNotEqual(Proc1#proc.client, Proc4#proc.client), - - lists:map(fun(C) -> - ?assertEqual(ok, stop_client(C)) - end, [Client2, Client3, Client4]) - end). - - -should_free_slot_on_proc_unexpected_exit() -> - ?_test(begin - Client1 = spawn_client(), - Client2 = spawn_client(), - Client3 = spawn_client(), - - ?assertEqual(ok, ping_client(Client1)), - ?assertEqual(ok, ping_client(Client2)), - ?assertEqual(ok, ping_client(Client3)), - - Proc1 = get_client_proc(Client1, "1"), - Proc2 = get_client_proc(Client2, "2"), - Proc3 = get_client_proc(Client3, "3"), - - ?assertNotEqual(Proc1#proc.pid, Proc2#proc.pid), - ?assertNotEqual(Proc1#proc.client, Proc2#proc.client), - ?assertNotEqual(Proc2#proc.pid, Proc3#proc.pid), - ?assertNotEqual(Proc2#proc.client, Proc3#proc.client), - ?assertNotEqual(Proc3#proc.pid, Proc1#proc.pid), - ?assertNotEqual(Proc3#proc.client, Proc1#proc.client), - - ?assertEqual(ok, kill_client(Client1)), - - Client4 = spawn_client(), - ?assertEqual(ok, ping_client(Client4)), - - Proc4 = get_client_proc(Client4, "4"), - - ?assertEqual(Proc4#proc.pid, Proc1#proc.pid), - ?assertNotEqual(Proc4#proc.client, Proc1#proc.client), - ?assertNotEqual(Proc2#proc.pid, Proc4#proc.pid), - ?assertNotEqual(Proc2#proc.client, Proc4#proc.client), - ?assertNotEqual(Proc3#proc.pid, Proc4#proc.pid), - ?assertNotEqual(Proc3#proc.client, Proc4#proc.client), - - lists:map(fun(C) -> - ?assertEqual(ok, stop_client(C)) - end, [Client2, Client3, Client4]) - end). - - -should_reuse_known_proc() -> - ?_test(begin - Client1 = spawn_client(<<"ddoc1">>), - Client2 = spawn_client(<<"ddoc2">>), - - ?assertEqual(ok, ping_client(Client1)), - ?assertEqual(ok, ping_client(Client2)), - - Proc1 = get_client_proc(Client1, "1"), - Proc2 = get_client_proc(Client2, "2"), - ?assertNotEqual(Proc1#proc.pid, Proc2#proc.pid), - - ?assertEqual(ok, stop_client(Client1)), - ?assertEqual(ok, stop_client(Client2)), - ?assert(is_process_alive(Proc1#proc.pid)), - ?assert(is_process_alive(Proc2#proc.pid)), - - Client1Again = spawn_client(<<"ddoc1">>), - ?assertEqual(ok, ping_client(Client1Again)), - Proc1Again = get_client_proc(Client1Again, "1-again"), - ?assertEqual(Proc1#proc.pid, Proc1Again#proc.pid), - ?assertNotEqual(Proc1#proc.client, Proc1Again#proc.client), - ?assertEqual(ok, stop_client(Client1Again)) - end). - - -%should_process_waiting_queue_as_fifo() -> -% ?_test(begin -% Client1 = spawn_client(<<"ddoc1">>), -% Client2 = spawn_client(<<"ddoc2">>), -% Client3 = spawn_client(<<"ddoc3">>), -% Client4 = spawn_client(<<"ddoc4">>), -% timer:sleep(100), -% Client5 = spawn_client(<<"ddoc5">>), -% -% ?assertEqual(ok, ping_client(Client1)), -% ?assertEqual(ok, ping_client(Client2)), -% ?assertEqual(ok, ping_client(Client3)), -% ?assertEqual(timeout, ping_client(Client4)), -% ?assertEqual(timeout, ping_client(Client5)), -% -% Proc1 = get_client_proc(Client1, "1"), -% ?assertEqual(ok, stop_client(Client1)), -% ?assertEqual(ok, ping_client(Client4)), -% Proc4 = get_client_proc(Client4, "4"), -% -% ?assertNotEqual(Proc4#proc.client, Proc1#proc.client), -% ?assertEqual(Proc1#proc.pid, Proc4#proc.pid), -% ?assertEqual(timeout, ping_client(Client5)), -% -% ?assertEqual(ok, stop_client(Client2)), -% ?assertEqual(ok, stop_client(Client3)), -% ?assertEqual(ok, stop_client(Client4)), -% ?assertEqual(ok, stop_client(Client5)) -% end). - - -should_reduce_pool_on_idle_os_procs() -> - ?_test(begin - %% os_process_idle_limit is in sec - config:set("query_server_config", - "os_process_idle_limit", "1", false), - ok = confirm_config("os_process_idle_limit", "1"), - - Client1 = spawn_client(<<"ddoc1">>), - Client2 = spawn_client(<<"ddoc2">>), - Client3 = spawn_client(<<"ddoc3">>), - - ?assertEqual(ok, ping_client(Client1)), - ?assertEqual(ok, ping_client(Client2)), - ?assertEqual(ok, ping_client(Client3)), - - ?assertEqual(3, couch_proc_manager:get_proc_count()), - - ?assertEqual(ok, stop_client(Client1)), - ?assertEqual(ok, stop_client(Client2)), - ?assertEqual(ok, stop_client(Client3)), - - timer:sleep(1200), - ?assertEqual(1, couch_proc_manager:get_proc_count()) - end). - - -setup_config() -> - config:set("native_query_servers", "enable_erlang_query_server", "true", false), - config:set("query_server_config", "os_process_limit", "3", false), - config:set("query_server_config", "os_process_soft_limit", "2", false), - ok = confirm_config("os_process_soft_limit", "2"). - -confirm_config(Key, Value) -> - confirm_config(Key, Value, 0). - -confirm_config(Key, Value, Count) -> - case config:get("query_server_config", Key) of - Value -> - ok; - _ when Count > 10 -> - erlang:error({config_setup, [ - {module, ?MODULE}, - {line, ?LINE}, - {value, timeout} - ]}); - _ -> - %% we need to wait to let gen_server:cast finish - timer:sleep(10), - confirm_config(Key, Value, Count + 1) - end. - -spawn_client() -> - Parent = self(), - Ref = make_ref(), - Pid = spawn(fun() -> - Proc = couch_query_servers:get_os_process(<<"erlang">>), - loop(Parent, Ref, Proc) - end), - {Pid, Ref}. - -spawn_client(DDocId) -> - Parent = self(), - Ref = make_ref(), - Pid = spawn(fun() -> - DDocKey = {DDocId, <<"1-abcdefgh">>}, - DDoc = #doc{body={[{<<"language">>, <<"erlang">>}]}}, - Proc = couch_query_servers:get_ddoc_process(DDoc, DDocKey), - loop(Parent, Ref, Proc) - end), - {Pid, Ref}. - -ping_client({Pid, Ref}) -> - Pid ! ping, - receive - {pong, Ref} -> - ok - after ?TIMEOUT -> - timeout - end. - -get_client_proc({Pid, Ref}, ClientName) -> - Pid ! get_proc, - receive - {proc, Ref, Proc} -> Proc - after ?TIMEOUT -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "Timeout getting client " - ++ ClientName ++ " proc"}]}) - end. - -stop_client({Pid, Ref}) -> - Pid ! stop, - receive - {stop, Ref} -> - ok - after ?TIMEOUT -> - timeout - end. - -kill_client({Pid, Ref}) -> - Pid ! die, - receive - {die, Ref} -> - ok - after ?TIMEOUT -> - timeout - end. - -loop(Parent, Ref, Proc) -> - receive - ping -> - Parent ! {pong, Ref}, - loop(Parent, Ref, Proc); - get_proc -> - Parent ! {proc, Ref, Proc}, - loop(Parent, Ref, Proc); - stop -> - couch_query_servers:ret_os_process(Proc), - Parent ! {stop, Ref}; - die -> - Parent ! {die, Ref}, - exit(some_error) - end. diff --git a/src/couch/test/couchdb_update_conflicts_tests.erl b/src/couch/test/couchdb_update_conflicts_tests.erl deleted file mode 100644 index e92c73856..000000000 --- a/src/couch/test/couchdb_update_conflicts_tests.erl +++ /dev/null @@ -1,281 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couchdb_update_conflicts_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(i2l(I), integer_to_list(I)). --define(DOC_ID, <<"foobar">>). --define(LOCAL_DOC_ID, <<"_local/foobar">>). --define(NUM_CLIENTS, [100, 500, 1000, 2000, 5000, 10000]). --define(TIMEOUT, 20000). - -start() -> - Ctx = test_util:start_couch(), - config:set("couchdb", "delayed_commits", "true", false), - Ctx. - -setup() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX, overwrite]), - Doc = couch_doc:from_json_obj({[{<<"_id">>, ?DOC_ID}, - {<<"value">>, 0}]}), - {ok, Rev} = couch_db:update_doc(Db, Doc, []), - ok = couch_db:close(Db), - RevStr = couch_doc:rev_to_str(Rev), - {DbName, RevStr}. -setup(_) -> - setup(). - -teardown({DbName, _}) -> - ok = couch_server:delete(DbName, []), - ok. -teardown(_, {DbName, _RevStr}) -> - teardown({DbName, _RevStr}). - - -view_indexes_cleanup_test_() -> - { - "Update conflicts", - { - setup, - fun start/0, fun test_util:stop_couch/1, - [ - concurrent_updates(), - bulk_docs_updates() - ] - } - }. - -concurrent_updates()-> - { - "Concurrent updates", - { - foreachx, - fun setup/1, fun teardown/2, - [{NumClients, fun should_concurrently_update_doc/2} - || NumClients <- ?NUM_CLIENTS] - } - }. - -bulk_docs_updates()-> - { - "Bulk docs updates", - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_bulk_create_delete_doc/1, - fun should_bulk_create_local_doc/1, - fun should_ignore_invalid_local_doc/1 - ] - } - }. - - -should_concurrently_update_doc(NumClients, {DbName, InitRev})-> - {?i2l(NumClients) ++ " clients", - {inorder, - [{"update doc", - {timeout, ?TIMEOUT div 1000, - ?_test(concurrent_doc_update(NumClients, DbName, InitRev))}}, - {"ensure in single leaf", - ?_test(ensure_in_single_revision_leaf(DbName))}]}}. - -should_bulk_create_delete_doc({DbName, InitRev})-> - ?_test(bulk_delete_create(DbName, InitRev)). - -should_bulk_create_local_doc({DbName, _})-> - ?_test(bulk_create_local_doc(DbName)). - -should_ignore_invalid_local_doc({DbName, _})-> - ?_test(ignore_invalid_local_doc(DbName)). - - -concurrent_doc_update(NumClients, DbName, InitRev) -> - Clients = lists:map( - fun(Value) -> - ClientDoc = couch_doc:from_json_obj({[ - {<<"_id">>, ?DOC_ID}, - {<<"_rev">>, InitRev}, - {<<"value">>, Value} - ]}), - Pid = spawn_client(DbName, ClientDoc), - {Value, Pid, erlang:monitor(process, Pid)} - end, - lists:seq(1, NumClients)), - - lists:foreach(fun({_, Pid, _}) -> Pid ! go end, Clients), - - {NumConflicts, SavedValue} = lists:foldl( - fun({Value, Pid, MonRef}, {AccConflicts, AccValue}) -> - receive - {'DOWN', MonRef, process, Pid, {ok, _NewRev}} -> - {AccConflicts, Value}; - {'DOWN', MonRef, process, Pid, conflict} -> - {AccConflicts + 1, AccValue}; - {'DOWN', MonRef, process, Pid, Error} -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "Client " ++ ?i2l(Value) - ++ " got update error: " - ++ couch_util:to_list(Error)}]}) - after ?TIMEOUT div 2 -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "Timeout waiting for client " - ++ ?i2l(Value) ++ " to die"}]}) - end - end, {0, nil}, Clients), - ?assertEqual(NumClients - 1, NumConflicts), - - {ok, Db} = couch_db:open_int(DbName, []), - {ok, Leaves} = couch_db:open_doc_revs(Db, ?DOC_ID, all, []), - ok = couch_db:close(Db), - ?assertEqual(1, length(Leaves)), - - [{ok, Doc2}] = Leaves, - {JsonDoc} = couch_doc:to_json_obj(Doc2, []), - ?assertEqual(SavedValue, couch_util:get_value(<<"value">>, JsonDoc)). - -ensure_in_single_revision_leaf(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - {ok, Leaves} = couch_db:open_doc_revs(Db, ?DOC_ID, all, []), - ok = couch_db:close(Db), - [{ok, Doc}] = Leaves, - - %% FIXME: server restart won't work from test side - %% stop(ok), - %% start(), - - {ok, Db2} = couch_db:open_int(DbName, []), - {ok, Leaves2} = couch_db:open_doc_revs(Db2, ?DOC_ID, all, []), - ok = couch_db:close(Db2), - ?assertEqual(1, length(Leaves2)), - - [{ok, Doc2}] = Leaves, - ?assertEqual(Doc, Doc2). - -bulk_delete_create(DbName, InitRev) -> - {ok, Db} = couch_db:open_int(DbName, []), - - DeletedDoc = couch_doc:from_json_obj({[ - {<<"_id">>, ?DOC_ID}, - {<<"_rev">>, InitRev}, - {<<"_deleted">>, true} - ]}), - NewDoc = couch_doc:from_json_obj({[ - {<<"_id">>, ?DOC_ID}, - {<<"value">>, 666} - ]}), - - {ok, Results} = couch_db:update_docs(Db, [DeletedDoc, NewDoc], []), - ok = couch_db:close(Db), - - ?assertEqual(2, length([ok || {ok, _} <- Results])), - [{ok, Rev1}, {ok, Rev2}] = Results, - - {ok, Db2} = couch_db:open_int(DbName, []), - {ok, [{ok, Doc1}]} = couch_db:open_doc_revs( - Db2, ?DOC_ID, [Rev1], [conflicts, deleted_conflicts]), - {ok, [{ok, Doc2}]} = couch_db:open_doc_revs( - Db2, ?DOC_ID, [Rev2], [conflicts, deleted_conflicts]), - ok = couch_db:close(Db2), - - {Doc1Props} = couch_doc:to_json_obj(Doc1, []), - {Doc2Props} = couch_doc:to_json_obj(Doc2, []), - - %% Document was deleted - ?assert(couch_util:get_value(<<"_deleted">>, Doc1Props)), - %% New document not flagged as deleted - ?assertEqual(undefined, couch_util:get_value(<<"_deleted">>, - Doc2Props)), - %% New leaf revision has the right value - ?assertEqual(666, couch_util:get_value(<<"value">>, - Doc2Props)), - %% Deleted document has no conflicts - ?assertEqual(undefined, couch_util:get_value(<<"_conflicts">>, - Doc1Props)), - %% Deleted document has no deleted conflicts - ?assertEqual(undefined, couch_util:get_value(<<"_deleted_conflicts">>, - Doc1Props)), - %% New leaf revision doesn't have conflicts - ?assertEqual(undefined, couch_util:get_value(<<"_conflicts">>, - Doc1Props)), - %% New leaf revision doesn't have deleted conflicts - ?assertEqual(undefined, couch_util:get_value(<<"_deleted_conflicts">>, - Doc1Props)), - - %% Deleted revision has position 2 - ?assertEqual(2, element(1, Rev1)), - %% New leaf revision has position 3 - ?assertEqual(3, element(1, Rev2)). - - -bulk_create_local_doc(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - - LocalDoc = couch_doc:from_json_obj({[ - {<<"_id">>, ?LOCAL_DOC_ID}, - {<<"_rev">>, <<"0-1">>} - ]}), - - {ok, Results} = couch_db:update_docs(Db, [LocalDoc], - [], replicated_changes), - ok = couch_db:close(Db), - ?assertEqual([], Results), - - {ok, Db2} = couch_db:open_int(DbName, []), - {ok, LocalDoc1} = couch_db:open_doc_int(Db2, ?LOCAL_DOC_ID, []), - ok = couch_db:close(Db2), - ?assertEqual(?LOCAL_DOC_ID, LocalDoc1#doc.id), - ?assertEqual({0, [<<"2">>]}, LocalDoc1#doc.revs). - - -ignore_invalid_local_doc(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - - LocalDoc = couch_doc:from_json_obj({[ - {<<"_id">>, ?LOCAL_DOC_ID}, - {<<"_rev">>, <<"0-abcdef">>} - ]}), - - {ok, Results} = couch_db:update_docs(Db, [LocalDoc], - [], replicated_changes), - ok = couch_db:close(Db), - ?assertEqual([], Results), - - {ok, Db2} = couch_db:open_int(DbName, []), - Result2 = couch_db:open_doc_int(Db2, ?LOCAL_DOC_ID, []), - ok = couch_db:close(Db2), - ?assertEqual({not_found, missing}, Result2). - - -spawn_client(DbName, Doc) -> - spawn(fun() -> - {ok, Db} = couch_db:open_int(DbName, []), - receive - go -> ok - end, - erlang:yield(), - Result = try - couch_db:update_doc(Db, Doc, []) - catch _:Error -> - Error - end, - ok = couch_db:close(Db), - exit(Result) - end). diff --git a/src/couch/test/couchdb_vhosts_tests.erl b/src/couch/test/couchdb_vhosts_tests.erl deleted file mode 100644 index 1c4117215..000000000 --- a/src/couch/test/couchdb_vhosts_tests.erl +++ /dev/null @@ -1,272 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couchdb_vhosts_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 1000). --define(iofmt(S, A), lists:flatten(io_lib:format(S, A))). - - -setup() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc1">>}, - {<<"value">>, 666} - ]}), - - Doc1 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/doc1">>}, - {<<"shows">>, {[ - {<<"test">>, <<"function(doc, req) { - return { json: { - requested_path: '/' + req.requested_path.join('/'), - path: '/' + req.path.join('/')}};}">>} - ]}}, - {<<"rewrites">>, [ - {[ - {<<"from">>, <<"/">>}, - {<<"to">>, <<"_show/test">>} - ]} - ]} - ]}), - {ok, _} = couch_db:update_docs(Db, [Doc, Doc1]), - couch_db:ensure_full_commit(Db), - couch_db:close(Db), - - Addr = config:get("httpd", "bind_address", "127.0.0.1"), - Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)), - Url = "http://" ++ Addr ++ ":" ++ Port, - {Url, ?b2l(DbName)}. - -teardown({_, DbName}) -> - ok = couch_server:delete(?l2b(DbName), []), - ok. - - -vhosts_test_() -> - { - "Virtual Hosts rewrite tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_return_database_info/1, - fun should_return_revs_info/1, - fun should_return_virtual_request_path_field_in_request/1, - fun should_return_real_request_path_field_in_request/1, - fun should_match_wildcard_vhost/1, - fun should_return_db_info_for_wildcard_vhost_for_custom_db/1, - fun should_replace_rewrite_variables_for_db_and_doc/1, - fun should_return_db_info_for_vhost_with_resource/1, - fun should_return_revs_info_for_vhost_with_resource/1, - fun should_return_db_info_for_vhost_with_wildcard_resource/1, - fun should_return_path_for_vhost_with_wildcard_host/1 - ] - } - } - }. - -should_return_database_info({Url, DbName}) -> - ?_test(begin - ok = config:set("vhosts", "example.com", "/" ++ DbName, false), - case test_request:get(Url, [], [{host_header, "example.com"}]) of - {ok, _, _, Body} -> - {JsonBody} = jiffy:decode(Body), - ?assert(proplists:is_defined(<<"db_name">>, JsonBody)); - Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) - end - end). - -should_return_revs_info({Url, DbName}) -> - ?_test(begin - ok = config:set("vhosts", "example.com", "/" ++ DbName, false), - case test_request:get(Url ++ "/doc1?revs_info=true", [], - [{host_header, "example.com"}]) of - {ok, _, _, Body} -> - {JsonBody} = jiffy:decode(Body), - ?assert(proplists:is_defined(<<"_revs_info">>, JsonBody)); - Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) - end - end). - -should_return_virtual_request_path_field_in_request({Url, DbName}) -> - ?_test(begin - ok = config:set("vhosts", "example1.com", - "/" ++ DbName ++ "/_design/doc1/_rewrite/", - false), - case test_request:get(Url, [], [{host_header, "example1.com"}]) of - {ok, _, _, Body} -> - {Json} = jiffy:decode(Body), - ?assertEqual(<<"/">>, - proplists:get_value(<<"requested_path">>, Json)); - Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) - end - end). - -should_return_real_request_path_field_in_request({Url, DbName}) -> - ?_test(begin - ok = config:set("vhosts", "example1.com", - "/" ++ DbName ++ "/_design/doc1/_rewrite/", - false), - case test_request:get(Url, [], [{host_header, "example1.com"}]) of - {ok, _, _, Body} -> - {Json} = jiffy:decode(Body), - Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"), - ?assertEqual(Path, proplists:get_value(<<"path">>, Json)); - Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) - end - end). - -should_match_wildcard_vhost({Url, DbName}) -> - ?_test(begin - ok = config:set("vhosts", "*.example.com", - "/" ++ DbName ++ "/_design/doc1/_rewrite", false), - case test_request:get(Url, [], [{host_header, "test.example.com"}]) of - {ok, _, _, Body} -> - {Json} = jiffy:decode(Body), - Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"), - ?assertEqual(Path, proplists:get_value(<<"path">>, Json)); - Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) - end - end). - -should_return_db_info_for_wildcard_vhost_for_custom_db({Url, DbName}) -> - ?_test(begin - ok = config:set("vhosts", ":dbname.example1.com", - "/:dbname", false), - Host = DbName ++ ".example1.com", - case test_request:get(Url, [], [{host_header, Host}]) of - {ok, _, _, Body} -> - {JsonBody} = jiffy:decode(Body), - ?assert(proplists:is_defined(<<"db_name">>, JsonBody)); - Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) - end - end). - -should_replace_rewrite_variables_for_db_and_doc({Url, DbName}) -> - ?_test(begin - ok = config:set("vhosts",":appname.:dbname.example1.com", - "/:dbname/_design/:appname/_rewrite/", false), - Host = "doc1." ++ DbName ++ ".example1.com", - case test_request:get(Url, [], [{host_header, Host}]) of - {ok, _, _, Body} -> - {Json} = jiffy:decode(Body), - Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"), - ?assertEqual(Path, proplists:get_value(<<"path">>, Json)); - Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) - end - end). - -should_return_db_info_for_vhost_with_resource({Url, DbName}) -> - ?_test(begin - ok = config:set("vhosts", - "example.com/test", "/" ++ DbName, false), - ReqUrl = Url ++ "/test", - case test_request:get(ReqUrl, [], [{host_header, "example.com"}]) of - {ok, _, _, Body} -> - {JsonBody} = jiffy:decode(Body), - ?assert(proplists:is_defined(<<"db_name">>, JsonBody)); - Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) - end - end). - - -should_return_revs_info_for_vhost_with_resource({Url, DbName}) -> - ?_test(begin - ok = config:set("vhosts", - "example.com/test", "/" ++ DbName, false), - ReqUrl = Url ++ "/test/doc1?revs_info=true", - case test_request:get(ReqUrl, [], [{host_header, "example.com"}]) of - {ok, _, _, Body} -> - {JsonBody} = jiffy:decode(Body), - ?assert(proplists:is_defined(<<"_revs_info">>, JsonBody)); - Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) - end - end). - -should_return_db_info_for_vhost_with_wildcard_resource({Url, DbName}) -> - ?_test(begin - ok = config:set("vhosts", "*.example2.com/test", "/*", false), - ReqUrl = Url ++ "/test", - Host = DbName ++ ".example2.com", - case test_request:get(ReqUrl, [], [{host_header, Host}]) of - {ok, _, _, Body} -> - {JsonBody} = jiffy:decode(Body), - ?assert(proplists:is_defined(<<"db_name">>, JsonBody)); - Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) - end - end). - -should_return_path_for_vhost_with_wildcard_host({Url, DbName}) -> - ?_test(begin - ok = config:set("vhosts", "*/test1", - "/" ++ DbName ++ "/_design/doc1/_show/test", - false), - case test_request:get(Url ++ "/test1") of - {ok, _, _, Body} -> - {Json} = jiffy:decode(Body), - Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"), - ?assertEqual(Path, proplists:get_value(<<"path">>, Json)); - Else -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, ?iofmt("Request failed: ~p", [Else])}]}) - end - end). diff --git a/src/couch/test/couchdb_views_tests.erl b/src/couch/test/couchdb_views_tests.erl deleted file mode 100644 index 60bb5c975..000000000 --- a/src/couch/test/couchdb_views_tests.erl +++ /dev/null @@ -1,669 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couchdb_views_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include_lib("couch_mrview/include/couch_mrview.hrl"). - --define(DELAY, 100). --define(TIMEOUT, 1000). --define(WAIT_DELAY_COUNT, 40). - -setup() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = couch_db:close(Db), - FooRev = create_design_doc(DbName, <<"_design/foo">>, <<"bar">>), - query_view(DbName, "foo", "bar"), - BooRev = create_design_doc(DbName, <<"_design/boo">>, <<"baz">>), - query_view(DbName, "boo", "baz"), - {DbName, {FooRev, BooRev}}. - -setup_with_docs() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = couch_db:close(Db), - create_docs(DbName), - create_design_doc(DbName, <<"_design/foo">>, <<"bar">>), - DbName. - -setup_legacy() -> - DbName = <<"test">>, - DbFileName = "test.couch", - OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]), - OldViewName = "3b835456c235b1827e012e25666152f3.view", - FixtureViewFilePath = filename:join([?FIXTURESDIR, OldViewName]), - NewViewName = "6cf2c2f766f87b618edf6630b00f8736.view", - - DbDir = config:get("couchdb", "database_dir"), - ViewDir = config:get("couchdb", "view_index_dir"), - OldViewFilePath = filename:join([ViewDir, ".test_design", OldViewName]), - NewViewFilePath = filename:join([ViewDir, ".test_design", "mrview", - NewViewName]), - - NewDbFilePath = filename:join([DbDir, DbFileName]), - - Files = [NewDbFilePath, OldViewFilePath, NewViewFilePath], - - %% make sure there is no left over - lists:foreach(fun(File) -> file:delete(File) end, Files), - - % copy old db file into db dir - {ok, _} = file:copy(OldDbFilePath, NewDbFilePath), - - % copy old view file into view dir - ok = filelib:ensure_dir(OldViewFilePath), - - {ok, _} = file:copy(FixtureViewFilePath, OldViewFilePath), - - {DbName, Files}. - -teardown({DbName, _}) -> - teardown(DbName); -teardown(DbName) when is_binary(DbName) -> - couch_server:delete(DbName, [?ADMIN_CTX]), - ok. - -teardown_legacy({_DbName, Files}) -> - lists:foreach(fun(File) -> file:delete(File) end, Files). - -view_indexes_cleanup_test_() -> - { - "View indexes cleanup", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_have_two_indexes_alive_before_deletion/1, - fun should_cleanup_index_file_after_ddoc_deletion/1, - fun should_cleanup_all_index_files/1 - ] - } - } - }. - -view_group_db_leaks_test_() -> - { - "View group db leaks", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup_with_docs/0, fun teardown/1, - [ - fun couchdb_1138/1, - fun couchdb_1309/1 - ] - } - } - }. - -view_group_shutdown_test_() -> - { - "View group shutdown", - { - setup, - fun() -> - meck:new(couch_mrview_index, [passthrough]), - test_util:start_couch() - end, - fun(Ctx) -> - test_util:stop_couch(Ctx), - meck:unload() - end, - [couchdb_1283()] - } - }. - -backup_restore_test_() -> - { - "Upgrade and bugs related tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup_with_docs/0, fun teardown/1, - [ - fun should_not_remember_docs_in_index_after_backup_restore/1 - ] - } - } - }. - - -upgrade_test_() -> - { - "Upgrade tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup_legacy/0, fun teardown_legacy/1, - [ - fun should_upgrade_legacy_view_files/1 - ] - } - } - }. - -should_not_remember_docs_in_index_after_backup_restore(DbName) -> - ?_test(begin - %% COUCHDB-640 - - ok = backup_db_file(DbName), - create_doc(DbName, "doc666"), - - Rows0 = query_view(DbName, "foo", "bar"), - ?assert(has_doc("doc1", Rows0)), - ?assert(has_doc("doc2", Rows0)), - ?assert(has_doc("doc3", Rows0)), - ?assert(has_doc("doc666", Rows0)), - - ?assertEqual(ok, restore_backup_db_file(DbName)), - - Rows1 = query_view(DbName, "foo", "bar"), - ?assert(has_doc("doc1", Rows1)), - ?assert(has_doc("doc2", Rows1)), - ?assert(has_doc("doc3", Rows1)), - ?assertNot(has_doc("doc666", Rows1)) - end). - -should_upgrade_legacy_view_files({DbName, Files}) -> - ?_test(begin - [_NewDbFilePath, OldViewFilePath, NewViewFilePath] = Files, - ok = config:set("query_server_config", "commit_freq", "0", false), - - % ensure old header - OldHeader = read_header(OldViewFilePath), - ?assertMatch(#index_header{}, OldHeader), - - % query view for expected results - Rows0 = query_view(DbName, "test", "test"), - ?assertEqual(2, length(Rows0)), - - % ensure old file gone - ?assertNot(filelib:is_regular(OldViewFilePath)), - - % add doc to trigger update - DocUrl = db_url(DbName) ++ "/boo", - {ok, _, _, _} = test_request:put( - DocUrl, [{"Content-Type", "application/json"}], <<"{\"a\":3}">>), - - % query view for expected results - Rows1 = query_view(DbName, "test", "test"), - ?assertEqual(3, length(Rows1)), - - % ensure new header - timer:sleep(2000), % have to wait for awhile to upgrade the index - NewHeader = read_header(NewViewFilePath), - ?assertMatch(#mrheader{}, NewHeader) - end). - - -should_have_two_indexes_alive_before_deletion({DbName, _}) -> - view_cleanup(DbName), - ?_assertEqual(2, count_index_files(DbName)). - -should_cleanup_index_file_after_ddoc_deletion({DbName, {FooRev, _}}) -> - delete_design_doc(DbName, <<"_design/foo">>, FooRev), - view_cleanup(DbName), - ?_assertEqual(1, count_index_files(DbName)). - -should_cleanup_all_index_files({DbName, {FooRev, BooRev}})-> - delete_design_doc(DbName, <<"_design/foo">>, FooRev), - delete_design_doc(DbName, <<"_design/boo">>, BooRev), - view_cleanup(DbName), - ?_assertEqual(0, count_index_files(DbName)). - -couchdb_1138(DbName) -> - ?_test(begin - {ok, IndexerPid} = couch_index_server:get_index( - couch_mrview_index, DbName, <<"_design/foo">>), - ?assert(is_pid(IndexerPid)), - ?assert(is_process_alive(IndexerPid)), - ?assertEqual(2, count_users(DbName)), - - wait_indexer(IndexerPid), - - Rows0 = query_view(DbName, "foo", "bar"), - ?assertEqual(3, length(Rows0)), - ?assertEqual(2, count_users(DbName)), - ?assert(is_process_alive(IndexerPid)), - - create_doc(DbName, "doc1000"), - Rows1 = query_view(DbName, "foo", "bar"), - ?assertEqual(4, length(Rows1)), - ?assertEqual(2, count_users(DbName)), - - ?assert(is_process_alive(IndexerPid)), - - compact_db(DbName), - ?assert(is_process_alive(IndexerPid)), - - compact_view_group(DbName, "foo"), - ?assertEqual(2, count_users(DbName)), - - ?assert(is_process_alive(IndexerPid)), - - create_doc(DbName, "doc1001"), - Rows2 = query_view(DbName, "foo", "bar"), - ?assertEqual(5, length(Rows2)), - ?assertEqual(2, count_users(DbName)), - - ?assert(is_process_alive(IndexerPid)) - end). - -couchdb_1309(DbName) -> - ?_test(begin - {ok, IndexerPid} = couch_index_server:get_index( - couch_mrview_index, DbName, <<"_design/foo">>), - ?assert(is_pid(IndexerPid)), - ?assert(is_process_alive(IndexerPid)), - ?assertEqual(2, count_users(DbName)), - - wait_indexer(IndexerPid), - - create_doc(DbName, "doc1001"), - Rows0 = query_view(DbName, "foo", "bar"), - check_rows_value(Rows0, null), - ?assertEqual(4, length(Rows0)), - ?assertEqual(2, count_users(DbName)), - - ?assert(is_process_alive(IndexerPid)), - - update_design_doc(DbName, <<"_design/foo">>, <<"bar">>), - {ok, NewIndexerPid} = couch_index_server:get_index( - couch_mrview_index, DbName, <<"_design/foo">>), - ?assert(is_pid(NewIndexerPid)), - ?assert(is_process_alive(NewIndexerPid)), - ?assertNotEqual(IndexerPid, NewIndexerPid), - UserCnt = case count_users(DbName) of - N when N > 2 -> - timer:sleep(1000), - count_users(DbName); - N -> N - end, - ?assertEqual(2, UserCnt), - - Rows1 = query_view(DbName, "foo", "bar", ok), - ?assertEqual(0, length(Rows1)), - Rows2 = query_view(DbName, "foo", "bar"), - check_rows_value(Rows2, 1), - ?assertEqual(4, length(Rows2)), - - ok = stop_indexer( %% FIXME we need to grab monitor earlier - fun() -> ok end, - IndexerPid, ?LINE, - "old view group is not dead after ddoc update"), - - ok = stop_indexer( - fun() -> couch_server:delete(DbName, [?ADMIN_USER]) end, - NewIndexerPid, ?LINE, - "new view group did not die after DB deletion") - end). - -couchdb_1283() -> - ?_test(begin - ok = config:set("couchdb", "max_dbs_open", "3", false), - ok = config:set("couchdb", "delayed_commits", "false", false), - - {ok, MDb1} = couch_db:create(?tempdb(), [?ADMIN_CTX]), - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/foo">>}, - {<<"language">>, <<"javascript">>}, - {<<"views">>, {[ - {<<"foo">>, {[ - {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} - ]}}, - {<<"foo2">>, {[ - {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} - ]}}, - {<<"foo3">>, {[ - {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} - ]}}, - {<<"foo4">>, {[ - {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} - ]}}, - {<<"foo5">>, {[ - {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} - ]}} - ]}} - ]}), - {ok, _} = couch_db:update_doc(MDb1, DDoc, []), - ok = populate_db(MDb1, 100, 100), - query_view(couch_db:name(MDb1), "foo", "foo"), - ok = couch_db:close(MDb1), - - {ok, Pid} = couch_index_server:get_index( - couch_mrview_index, couch_db:name(MDb1), <<"_design/foo">>), - - % Start and pause compacton - WaitRef = erlang:make_ref(), - meck:expect(couch_mrview_index, compact, fun(Db, State, Opts) -> - receive {WaitRef, From, init} -> ok end, - From ! {WaitRef, inited}, - receive {WaitRef, go} -> ok end, - meck:passthrough([Db, State, Opts]) - end), - - {ok, CPid} = gen_server:call(Pid, compact), - CRef = erlang:monitor(process, CPid), - ?assert(is_process_alive(CPid)), - - % Make sure that our compactor is waiting for us - % before we continue our assertions - CPid ! {WaitRef, self(), init}, - receive {WaitRef, inited} -> ok end, - - % Make sure that a compaction process takes a monitor - % on the database's main_pid - ?assertEqual(true, lists:member(CPid, couch_db:monitored_by(MDb1))), - - % Finish compaction to and make sure the monitor - % disappears - CPid ! {WaitRef, go}, - wait_for_process_shutdown(CRef, normal, - {reason, "Failure compacting view group"}), - - % Make sure that the monitor was removed - ?assertEqual(false, lists:member(CPid, couch_db:monitored_by(MDb1))) - end). - -wait_for_process_shutdown(Pid, ExpectedReason, Error) -> - receive - {'DOWN', Pid, process, _, Reason} -> - ?assertEqual(ExpectedReason, Reason) - after ?TIMEOUT -> - erlang:error( - {assertion_failed, - [{module, ?MODULE}, {line, ?LINE}, Error]}) - end. - - -create_doc(DbName, DocId) when is_list(DocId) -> - create_doc(DbName, ?l2b(DocId)); -create_doc(DbName, DocId) when is_binary(DocId) -> - {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - Doc666 = couch_doc:from_json_obj({[ - {<<"_id">>, DocId}, - {<<"value">>, 999} - ]}), - {ok, _} = couch_db:update_docs(Db, [Doc666]), - couch_db:ensure_full_commit(Db), - couch_db:close(Db). - -create_docs(DbName) -> - {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - Doc1 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc1">>}, - {<<"value">>, 1} - - ]}), - Doc2 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc2">>}, - {<<"value">>, 2} - - ]}), - Doc3 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc3">>}, - {<<"value">>, 3} - - ]}), - {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]), - couch_db:ensure_full_commit(Db), - couch_db:close(Db). - -populate_db(Db, BatchSize, N) when N > 0 -> - Docs = lists:map( - fun(_) -> - couch_doc:from_json_obj({[ - {<<"_id">>, couch_uuids:new()}, - {<<"value">>, base64:encode(crypto:strong_rand_bytes(1000))} - ]}) - end, - lists:seq(1, BatchSize)), - {ok, _} = couch_db:update_docs(Db, Docs, []), - populate_db(Db, BatchSize, N - length(Docs)); -populate_db(_Db, _, _) -> - ok. - -create_design_doc(DbName, DDName, ViewName) -> - {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDName}, - {<<"language">>, <<"javascript">>}, - {<<"views">>, {[ - {ViewName, {[ - {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>} - ]}} - ]}} - ]}), - {ok, Rev} = couch_db:update_doc(Db, DDoc, []), - couch_db:ensure_full_commit(Db), - couch_db:close(Db), - Rev. - -update_design_doc(DbName, DDName, ViewName) -> - {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - {ok, Doc} = couch_db:open_doc(Db, DDName, [?ADMIN_CTX]), - {Props} = couch_doc:to_json_obj(Doc, []), - Rev = couch_util:get_value(<<"_rev">>, Props), - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDName}, - {<<"_rev">>, Rev}, - {<<"language">>, <<"javascript">>}, - {<<"views">>, {[ - {ViewName, {[ - {<<"map">>, <<"function(doc) { emit(doc.value, 1); }">>} - ]}} - ]}} - ]}), - {ok, NewRev} = couch_db:update_doc(Db, DDoc, [?ADMIN_CTX]), - couch_db:ensure_full_commit(Db), - couch_db:close(Db), - NewRev. - -delete_design_doc(DbName, DDName, Rev) -> - {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDName}, - {<<"_rev">>, couch_doc:rev_to_str(Rev)}, - {<<"_deleted">>, true} - ]}), - {ok, _} = couch_db:update_doc(Db, DDoc, [Rev]), - couch_db:close(Db). - -db_url(DbName) -> - Addr = config:get("httpd", "bind_address", "127.0.0.1"), - Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)), - "http://" ++ Addr ++ ":" ++ Port ++ "/" ++ ?b2l(DbName). - -query_view(DbName, DDoc, View) -> - query_view(DbName, DDoc, View, false). - -query_view(DbName, DDoc, View, Stale) -> - {ok, Code, _Headers, Body} = test_request:get( - db_url(DbName) ++ "/_design/" ++ DDoc ++ "/_view/" ++ View - ++ case Stale of - false -> []; - _ -> "?stale=" ++ atom_to_list(Stale) - end), - ?assertEqual(200, Code), - {Props} = jiffy:decode(Body), - couch_util:get_value(<<"rows">>, Props, []). - -check_rows_value(Rows, Value) -> - lists:foreach( - fun({Row}) -> - ?assertEqual(Value, couch_util:get_value(<<"value">>, Row)) - end, Rows). - -view_cleanup(DbName) -> - {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - couch_mrview:cleanup(Db), - couch_db:close(Db). - -count_users(DbName) -> - {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]), - DbPid = couch_db:get_pid(Db), - {monitored_by, Monitors0} = process_info(DbPid, monitored_by), - Monitors = lists:filter(fun is_pid/1, Monitors0), - CouchFiles = [P || P <- Monitors, couch_file:process_info(P) =/= undefined], - ok = couch_db:close(Db), - length(lists:usort(Monitors) -- [self() | CouchFiles]). - -count_index_files(DbName) -> - % call server to fetch the index files - RootDir = config:get("couchdb", "view_index_dir"), - length(filelib:wildcard(RootDir ++ "/." ++ - binary_to_list(DbName) ++ "_design"++"/mrview/*")). - -has_doc(DocId1, Rows) -> - DocId = iolist_to_binary(DocId1), - lists:any(fun({R}) -> lists:member({<<"id">>, DocId}, R) end, Rows). - -backup_db_file(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - try - SrcPath = couch_db:get_filepath(Db), - Src = if - is_list(SrcPath) -> SrcPath; - true -> binary_to_list(SrcPath) - end, - ok = copy_tree(Src, Src ++ ".backup") - after - couch_db:close(Db) - end. - -restore_backup_db_file(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - Src = couch_db:get_filepath(Db), - ok = couch_db:close(Db), - DbPid = couch_db:get_pid(Db), - exit(DbPid, shutdown), - ok = copy_tree(Src ++ ".backup", Src), - - test_util:wait(fun() -> - case couch_server:open(DbName, [{timeout, ?TIMEOUT}]) of - {ok, WaitDb} -> - case couch_db:get_pid(WaitDb) == DbPid of - true -> wait; - false -> ok - end; - Else -> - Else - end - end, ?TIMEOUT, ?DELAY). - -compact_db(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - {ok, _} = couch_db:start_compact(Db), - ok = couch_db:close(Db), - wait_db_compact_done(DbName, ?WAIT_DELAY_COUNT). - -wait_db_compact_done(_DbName, 0) -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "DB compaction failed to finish"}]}); -wait_db_compact_done(DbName, N) -> - {ok, Db} = couch_db:open_int(DbName, []), - ok = couch_db:close(Db), - CompactorPid = couch_db:get_compactor_pid(Db), - case is_pid(CompactorPid) of - false -> - ok; - true -> - ok = timer:sleep(?DELAY), - wait_db_compact_done(DbName, N - 1) - end. - -compact_view_group(DbName, DDocId) when is_list(DDocId) -> - compact_view_group(DbName, ?l2b("_design/" ++ DDocId)); -compact_view_group(DbName, DDocId) when is_binary(DDocId) -> - ok = couch_mrview:compact(DbName, DDocId), - wait_view_compact_done(DbName, DDocId, 10). - -wait_view_compact_done(_DbName, _DDocId, 0) -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "DB compaction failed to finish"}]}); -wait_view_compact_done(DbName, DDocId, N) -> - {ok, Code, _Headers, Body} = test_request:get( - db_url(DbName) ++ "/" ++ ?b2l(DDocId) ++ "/_info"), - ?assertEqual(200, Code), - {Info} = jiffy:decode(Body), - {IndexInfo} = couch_util:get_value(<<"view_index">>, Info), - CompactRunning = couch_util:get_value(<<"compact_running">>, IndexInfo), - case CompactRunning of - false -> - ok; - true -> - ok = timer:sleep(?DELAY), - wait_view_compact_done(DbName, DDocId, N - 1) - end. - -read_header(File) -> - {ok, Fd} = couch_file:open(File), - {ok, {_Sig, Header}} = couch_file:read_header(Fd), - couch_file:close(Fd), - Header. - -stop_indexer(StopFun, Pid, Line, Reason) -> - case test_util:stop_sync(Pid, StopFun) of - timeout -> - erlang:error( - {assertion_failed, - [{module, ?MODULE}, {line, Line}, - {reason, Reason}]}); - ok -> - ok - end. - -wait_indexer(IndexerPid) -> - test_util:wait(fun() -> - {ok, Info} = couch_index:get_info(IndexerPid), - case couch_util:get_value(compact_running, Info) of - true -> - wait; - false -> - ok - end - end). - -copy_tree(Src, Dst) -> - case filelib:is_dir(Src) of - true -> - {ok, Files} = file:list_dir(Src), - copy_tree(Files, Src, Dst); - false -> - ok = filelib:ensure_dir(Dst), - {ok, _} = file:copy(Src, Dst), - ok - end. - -copy_tree([], _Src, _Dst) -> - ok; -copy_tree([File | Rest], Src, Dst) -> - FullSrc = filename:join(Src, File), - FullDst = filename:join(Dst, File), - ok = copy_tree(FullSrc, FullDst), - copy_tree(Rest, Src, Dst). diff --git a/src/couch/test/eunit/chttpd_endpoints_tests.erl b/src/couch/test/eunit/chttpd_endpoints_tests.erl new file mode 100644 index 000000000..9b7430823 --- /dev/null +++ b/src/couch/test/eunit/chttpd_endpoints_tests.erl @@ -0,0 +1,184 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(chttpd_endpoints_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +setup("mocked") -> + fun setup_mocked/1; +setup("not_mocked") -> + fun setup_not_mocked/1. + +setup_mocked({Endpoint, {_Path, Module, Function}}) -> + catch meck:unload(Module), + meck:new(Module, [passthrough, non_strict]), + Expected = mock_handler(Endpoint, Module, Function), + Expected. + +setup_not_mocked({_Endpoint, {_Path, Module, _Function}}) -> + catch meck:unload(Module), + meck:new(Module, [non_strict]), + ok. + +teardown({_Endpoint, {Module, _F, _A}}, _) -> + catch meck:unload(Module), + ok. + +handlers(url_handler) -> + [ + {<<"">>, chttpd_misc, handle_welcome_req}, + {<<"favicon.ico">>, chttpd_misc, handle_favicon_req}, + {<<"_utils">>, chttpd_misc, handle_utils_dir_req}, + {<<"_all_dbs">>, chttpd_misc, handle_all_dbs_req}, + {<<"_dbs_info">>, chttpd_misc, handle_dbs_info_req}, + {<<"_active_tasks">>, chttpd_misc, handle_task_status_req}, + {<<"_node">>, chttpd_misc, handle_node_req}, + {<<"_reload_query_servers">>, chttpd_misc, handle_reload_query_servers_req}, + {<<"_replicate">>, chttpd_misc, handle_replicate_req}, + {<<"_uuids">>, chttpd_misc, handle_uuids_req}, + {<<"_session">>, chttpd_auth, handle_session_req}, + {<<"_up">>, chttpd_misc, handle_up_req}, + {<<"_membership">>, mem3_httpd, handle_membership_req}, + {<<"_db_updates">>, global_changes_httpd, handle_global_changes_req}, + {<<"_cluster_setup">>, setup_httpd, handle_setup_req}, + {<<"anything">>, chttpd_db, handle_request} + ]; +handlers(db_handler) -> + [ + {<<"_view_cleanup">>, chttpd_db, handle_view_cleanup_req}, + {<<"_compact">>, chttpd_db, handle_compact_req}, + {<<"_design">>, chttpd_db, handle_design_req}, + {<<"_temp_view">>, chttpd_view, handle_temp_view_req}, + {<<"_changes">>, chttpd_db, handle_changes_req}, + {<<"_shards">>, mem3_httpd, handle_shards_req}, + {<<"_index">>, mango_httpd, handle_req}, + {<<"_explain">>, mango_httpd, handle_req}, + {<<"_find">>, mango_httpd, handle_req} + ]; +handlers(design_handler) -> + [ + {<<"_view">>, chttpd_view, handle_view_req}, + {<<"_show">>, chttpd_show, handle_doc_show_req}, + {<<"_list">>, chttpd_show, handle_view_list_req}, + {<<"_update">>, chttpd_show, handle_doc_update_req}, + {<<"_info">>, chttpd_db, handle_design_info_req}, + {<<"_rewrite">>, chttpd_rewrite, handle_rewrite_req} + ]. + +endpoints_test_() -> + { + "Checking dynamic endpoints", + { + setup, + fun() -> test_util:start_couch([chttpd, mem3, global_changes, mango, setup]) end, + fun test_util:stop/1, + [ + check_dynamic_endpoints( + "mocked", url_handler, fun ensure_called/2), + check_dynamic_endpoints( + "mocked", db_handler, fun ensure_called/2), + check_dynamic_endpoints( + "mocked", design_handler, fun ensure_called/2), + check_dynamic_endpoints( + "not_mocked", url_handler, fun verify_we_fail_if_missing/2), + check_dynamic_endpoints( + "not_mocked", db_handler, fun verify_we_fail_if_missing/2), + check_dynamic_endpoints( + "not_mocked", design_handler, fun verify_we_fail_if_missing/2) + ] + } + }. + +check_dynamic_endpoints(Setup, EndpointType, TestFun) -> + { + "Checking '" + ++ atom_to_list(EndpointType) + ++ "' [" ++ Setup ++ "] dynamic endpoints", + [ + make_test_case(Setup, EndpointType, Spec, TestFun) + || Spec <- handlers(EndpointType) + ] + }. + +make_test_case(Setup, EndpointType, {Path, Module, Function}, TestFun) -> + { + lists:flatten(io_lib:format("~s -- \"~s\"", [EndpointType, ?b2l(Path)])), + { + foreachx, setup(Setup), fun teardown/2, + [ + {{EndpointType, {Path, Module, Function}}, TestFun} + ] + } + }. + + +mock_handler(url_handler = Endpoint, M, F) -> + meck:expect(M, F, fun(X) -> {return, Endpoint, X} end), + fun M:F/1; +mock_handler(db_handler = Endpoint, M, F) -> + meck:expect(M, F, fun(X, Y) -> {return, Endpoint, X, Y} end), + fun M:F/2; +mock_handler(design_handler = Endpoint, M, F) -> + meck:expect(M, F, fun(X, Y, Z) -> {return, Endpoint, X, Y, Z} end), + fun M:F/3. + +ensure_called({url_handler = Endpoint, {Path, _M, _Fun}}, ExpectedFun) -> + HandlerFun = handler(Endpoint, Path), + ?_test(begin + ?assertEqual(ExpectedFun, HandlerFun), + ?assertMatch({return, Endpoint, x}, HandlerFun(x)) + end); +ensure_called({db_handler = Endpoint, {Path, _M, _Fun}}, ExpectedFun) -> + HandlerFun = handler(Endpoint, Path), + ?_test(begin + ?assertEqual(ExpectedFun, HandlerFun), + ?assertMatch({return, Endpoint, x, y}, HandlerFun(x, y)) + end); +ensure_called({design_handler = Endpoint, {Path, _M, _Fun}}, ExpectedFun) -> + HandlerFun = handler(Endpoint, Path), + ?_test(begin + ?assertEqual(ExpectedFun, HandlerFun), + ?assertMatch({return, Endpoint, x, y, z}, HandlerFun(x, y, z)) + end). + +%% Test the test: when the final target function is missing, +%% the Fun call must fail. +verify_we_fail_if_missing({url_handler = Endpoint, {Path, _M, _Fun}}, _) -> + HandlerFun = handler(Endpoint, Path), + ?_test(begin + ?assert(is_function(HandlerFun)), + ?assertError(undef, HandlerFun(x)) + end); +verify_we_fail_if_missing({db_handler = Endpoint, {Path, _M, _Fun}}, _) -> + HandlerFun = handler(Endpoint, Path), + ?_test(begin + ?assert(is_function(HandlerFun)), + ?assertError(undef, HandlerFun(x, y)) + end); +verify_we_fail_if_missing({design_handler = Endpoint, {Path, _M, _Fun}}, _) -> + HandlerFun = handler(Endpoint, Path), + ?_test(begin + ?assert(is_function(HandlerFun)), + ?assertError(undef, HandlerFun(x, y, z)) + end). + +handler(url_handler, HandlerKey) -> + chttpd_handlers:url_handler(HandlerKey, fun chttpd_db:handle_request/1); +handler(db_handler, HandlerKey) -> + chttpd_handlers:db_handler(HandlerKey, fun chttpd_db:db_req/2); +handler(design_handler, HandlerKey) -> + chttpd_handlers:design_handler(HandlerKey, fun dummy/3). + +dummy(_, _, _) -> + throw(error). diff --git a/src/couch/test/eunit/couch_auth_cache_tests.erl b/src/couch/test/eunit/couch_auth_cache_tests.erl new file mode 100644 index 000000000..706c0cee9 --- /dev/null +++ b/src/couch/test/eunit/couch_auth_cache_tests.erl @@ -0,0 +1,384 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_auth_cache_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(SALT, <<"SALT">>). +-define(DB_TIMEOUT, 15000). + +start() -> + test_util:start_couch([ioq]). + + +setup() -> + DbName = ?tempdb(), + config:set("couch_httpd_auth", "authentication_db", + ?b2l(DbName), false), + DbName. + +teardown(DbName) -> + ok = couch_server:delete(DbName, [?ADMIN_CTX]), + ok. + + +couch_auth_cache_test_() -> + { + "CouchDB auth cache tests", + { + setup, + fun start/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_get_nil_on_missed_cache/1, + fun should_get_right_password_hash/1, + fun should_ensure_doc_hash_equals_cached_one/1, + fun should_update_password/1, + fun should_cleanup_cache_after_userdoc_deletion/1, + fun should_restore_cache_after_userdoc_recreation/1, + fun should_drop_cache_on_auth_db_change/1, + fun should_restore_cache_on_auth_db_change/1, + fun should_recover_cache_after_shutdown/1, + fun should_close_old_db_on_auth_db_change/1, + fun should_get_admin_from_config/1 + ] + } + } + }. + +auth_vdu_test_() -> + Cases = [ + %% Old , New , Result + %% [Roles, Type] , [Roles, Type] , + + %% Updating valid user doc with valid one + {[custom, user], [custom, user], "ok"}, + + %% Updating invalid doc (missing type or roles field) with valid one + {[missing, missing], [custom, user], "ok"}, + {[missing, user], [custom, user], "ok"}, + {[custom, missing], [custom, user], "ok"}, + + %% Updating invalid doc (wrong type) with valid one + {[missing, other], [custom, user], "ok"}, + {[custom, other], [custom, user], "ok"}, + + %% Updating valid document with invalid one + {[custom, user], [missing, missing], "doc.type must be user"}, + {[custom, user], [missing, user], "doc.roles must exist"}, + {[custom, user], [custom, missing], "doc.type must be user"}, + {[custom, user], [missing, other], "doc.type must be user"}, + {[custom, user], [custom, other], "doc.type must be user"}, + + %% Updating invalid doc with invalid one + {[missing, missing], [missing, missing], "doc.type must be user"}, + {[missing, missing], [missing, user], "doc.roles must exist"}, + {[missing, missing], [custom, missing], "doc.type must be user"}, + {[missing, missing], [missing, other], "doc.type must be user"}, + {[missing, missing], [custom, other], "doc.type must be user"}, + + {[missing, user], [missing, missing], "doc.type must be user"}, + {[missing, user], [missing, user], "doc.roles must exist"}, + {[missing, user], [custom, missing], "doc.type must be user"}, + {[missing, user], [missing, other], "doc.type must be user"}, + {[missing, user], [custom, other], "doc.type must be user"}, + + {[missing, other], [missing, missing], "doc.type must be user"}, + {[missing, other], [missing, user], "doc.roles must exist"}, + {[missing, other], [custom, missing], "doc.type must be user"}, + {[missing, other], [missing, other], "doc.type must be user"}, + {[missing, other], [custom, other], "doc.type must be user"}, + + {[custom, missing], [missing, missing], "doc.type must be user"}, + {[custom, missing], [missing, user], "doc.roles must exist"}, + {[custom, missing], [custom, missing], "doc.type must be user"}, + {[custom, missing], [missing, other], "doc.type must be user"}, + {[custom, missing], [custom, other], "doc.type must be user"}, + + {[custom, other], [missing, missing], "doc.type must be user"}, + {[custom, other], [missing, user], "doc.roles must exist"}, + {[custom, other], [custom, missing], "doc.type must be user"}, + {[custom, other], [missing, other], "doc.type must be user"}, + {[custom, other], [custom, other], "doc.type must be user"} + ], + + %% Make sure we covered all combinations + AllPossibleDocs = couch_tests_combinatorics:product([ + [missing, custom], + [missing, user, other] + ]), + AllPossibleCases = couch_tests_combinatorics:product( + [AllPossibleDocs, AllPossibleDocs]), + ?assertEqual([], AllPossibleCases -- [[A, B] || {A, B, _} <- Cases]), + + { + "Check User doc validation", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + [ + make_validate_test(Case) || Case <- Cases + ] + } + }. + +should_get_nil_on_missed_cache(_) -> + ?_assertEqual(nil, couch_auth_cache:get_user_creds("joe")). + +should_get_right_password_hash(DbName) -> + ?_test(begin + PasswordHash = hash_password("pass1"), + {ok, _} = update_user_doc(DbName, "joe", "pass1"), + {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"), + ?assertEqual(PasswordHash, + couch_util:get_value(<<"password_sha">>, Creds)) + end). + +should_ensure_doc_hash_equals_cached_one(DbName) -> + ?_test(begin + {ok, _} = update_user_doc(DbName, "joe", "pass1"), + {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"), + + CachedHash = couch_util:get_value(<<"password_sha">>, Creds), + StoredHash = get_user_doc_password_sha(DbName, "joe"), + ?assertEqual(StoredHash, CachedHash) + end). + +should_update_password(DbName) -> + ?_test(begin + PasswordHash = hash_password("pass2"), + {ok, Rev} = update_user_doc(DbName, "joe", "pass1"), + {ok, _} = update_user_doc(DbName, "joe", "pass2", Rev), + {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"), + ?assertEqual(PasswordHash, + couch_util:get_value(<<"password_sha">>, Creds)) + end). + +should_cleanup_cache_after_userdoc_deletion(DbName) -> + ?_test(begin + {ok, _} = update_user_doc(DbName, "joe", "pass1"), + delete_user_doc(DbName, "joe"), + ?assertEqual(nil, couch_auth_cache:get_user_creds("joe")) + end). + +should_restore_cache_after_userdoc_recreation(DbName) -> + ?_test(begin + PasswordHash = hash_password("pass5"), + {ok, _} = update_user_doc(DbName, "joe", "pass1"), + delete_user_doc(DbName, "joe"), + ?assertEqual(nil, couch_auth_cache:get_user_creds("joe")), + + {ok, _} = update_user_doc(DbName, "joe", "pass5"), + {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"), + + ?assertEqual(PasswordHash, + couch_util:get_value(<<"password_sha">>, Creds)) + end). + +should_drop_cache_on_auth_db_change(DbName) -> + ?_test(begin + {ok, _} = update_user_doc(DbName, "joe", "pass1"), + full_commit(DbName), + config:set("couch_httpd_auth", "authentication_db", + ?b2l(?tempdb()), false), + ?assertEqual(nil, couch_auth_cache:get_user_creds("joe")) + end). + +should_restore_cache_on_auth_db_change(DbName) -> + ?_test(begin + PasswordHash = hash_password("pass1"), + {ok, _} = update_user_doc(DbName, "joe", "pass1"), + {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"), + full_commit(DbName), + + DbName1 = ?tempdb(), + config:set("couch_httpd_auth", "authentication_db", + ?b2l(DbName1), false), + + {ok, _} = update_user_doc(DbName1, "joe", "pass5"), + full_commit(DbName1), + + config:set("couch_httpd_auth", "authentication_db", + ?b2l(DbName), false), + + {ok, Creds, _} = couch_auth_cache:get_user_creds("joe"), + ?assertEqual(PasswordHash, + couch_util:get_value(<<"password_sha">>, Creds)) + end). + +should_recover_cache_after_shutdown(DbName) -> + ?_test(begin + PasswordHash = hash_password("pass2"), + {ok, Rev0} = update_user_doc(DbName, "joe", "pass1"), + {ok, Rev1} = update_user_doc(DbName, "joe", "pass2", Rev0), + full_commit(DbName), + shutdown_db(DbName), + {ok, Rev1} = get_doc_rev(DbName, "joe"), + ?assertEqual(PasswordHash, get_user_doc_password_sha(DbName, "joe")) + end). + +should_close_old_db_on_auth_db_change(DbName) -> + {timeout, ?DB_TIMEOUT, ?_test(begin + ?assertEqual(ok, wait_db(DbName, fun is_opened/1)), + config:set("couch_httpd_auth", "authentication_db", + ?b2l(?tempdb()), false), + ?assertEqual(ok, wait_db(DbName, fun is_closed/1)) + end)}. + +should_get_admin_from_config(_DbName) -> + ?_test(begin + config:set("admins", "testadmin", "password", false), + Creds = test_util:wait(fun() -> + case couch_auth_cache:get_user_creds("testadmin") of + {ok, Creds0, _} -> Creds0; + nil -> wait + end + end), + Roles = couch_util:get_value(<<"roles">>, Creds), + ?assertEqual([<<"_admin">>], Roles) + end). + +update_user_doc(DbName, UserName, Password) -> + update_user_doc(DbName, UserName, Password, nil). + +update_user_doc(DbName, UserName, Password, Rev) -> + User = iolist_to_binary(UserName), + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"org.couchdb.user:", User/binary>>}, + {<<"name">>, User}, + {<<"type">>, <<"user">>}, + {<<"salt">>, ?SALT}, + {<<"password_sha">>, hash_password(Password)}, + {<<"roles">>, []} + ] ++ case Rev of + nil -> []; + _ -> [{<<"_rev">>, Rev}] + end + }), + {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]), + {ok, NewRev} = couch_db:update_doc(AuthDb, Doc, []), + ok = couch_db:close(AuthDb), + {ok, couch_doc:rev_to_str(NewRev)}. + +wait_db(Db, DbFun) -> + test_util:wait(fun() -> + case DbFun(Db) of + true -> + ok; + false -> + wait + end + end, ?DB_TIMEOUT, 500). + + +hash_password(Password) -> + ?l2b(couch_util:to_hex(crypto:hash(sha, iolist_to_binary([Password, ?SALT])))). + +shutdown_db(DbName) -> + {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]), + ok = couch_db:close(AuthDb), + couch_util:shutdown_sync(couch_db:get_pid(AuthDb)), + ok = timer:sleep(1000). + +get_doc_rev(DbName, UserName) -> + DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]), + {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]), + UpdateRev = + case couch_db:open_doc(AuthDb, DocId, []) of + {ok, Doc} -> + {Props} = couch_doc:to_json_obj(Doc, []), + couch_util:get_value(<<"_rev">>, Props); + {not_found, missing} -> + nil + end, + ok = couch_db:close(AuthDb), + {ok, UpdateRev}. + +get_user_doc_password_sha(DbName, UserName) -> + DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]), + {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]), + {ok, Doc} = couch_db:open_doc(AuthDb, DocId, []), + ok = couch_db:close(AuthDb), + {Props} = couch_doc:to_json_obj(Doc, []), + couch_util:get_value(<<"password_sha">>, Props). + +delete_user_doc(DbName, UserName) -> + DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]), + {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]), + {ok, Doc} = couch_db:open_doc(AuthDb, DocId, []), + {Props} = couch_doc:to_json_obj(Doc, []), + DeletedDoc = couch_doc:from_json_obj({[ + {<<"_id">>, DocId}, + {<<"_rev">>, couch_util:get_value(<<"_rev">>, Props)}, + {<<"_deleted">>, true} + ]}), + {ok, _} = couch_db:update_doc(AuthDb, DeletedDoc, []), + ok = couch_db:close(AuthDb). + +full_commit(DbName) -> + {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]), + {ok, _} = couch_db:ensure_full_commit(AuthDb), + ok = couch_db:close(AuthDb). + +is_opened(DbName) -> + {ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]), + Monitors = couch_db:monitored_by(AuthDb) -- [self()], + ok = couch_db:close(AuthDb), + Monitors /= []. + +is_closed(DbName) -> + not is_opened(DbName). + +make_validate_test({Old, New, "ok"} = Case) -> + {test_id(Case), ?_assertEqual(ok, validate(doc(Old), doc(New)))}; +make_validate_test({Old, New, Reason} = Case) -> + Failure = ?l2b(Reason), + {test_id(Case), ?_assertThrow({forbidden, Failure}, validate(doc(Old), doc(New)))}. + +test_id({[OldRoles, OldType], [NewRoles, NewType], Result}) -> + lists:flatten(io_lib:format( + "(roles: ~w, type: ~w) -> (roles: ~w, type: ~w) ==> \"~s\"", + [OldRoles, OldType, NewRoles, NewType, Result])). + +doc([Roles, Type]) -> + couch_doc:from_json_obj({[ + {<<"_id">>,<<"org.couchdb.user:foo">>}, + {<<"_rev">>,<<"1-281c81adb1bf10927a6160f246dc0468">>}, + {<<"name">>,<<"foo">>}, + {<<"password_scheme">>,<<"simple">>}, + {<<"salt">>,<<"00000000000000000000000000000000">>}, + {<<"password_sha">>, <<"111111111111111111111111111111111111">>}] + ++ type(Type) ++ roles(Roles)}). + +roles(custom) -> [{<<"roles">>, [<<"custom">>]}]; +roles(missing) -> []. + +type(user) -> [{<<"type">>, <<"user">>}]; +type(other) -> [{<<"type">>, <<"other">>}]; +type(missing) -> []. + +validate(DiskDoc, NewDoc) -> + JSONCtx = {[ + {<<"db">>, <<"foo/bar">>}, + {<<"name">>, <<"foo">>}, + {<<"roles">>, [<<"_admin">>]} + ]}, + validate(DiskDoc, NewDoc, JSONCtx). + +validate(DiskDoc, NewDoc, JSONCtx) -> + {ok, DDoc0} = couch_auth_cache:auth_design_doc(<<"_design/anything">>), + DDoc = DDoc0#doc{revs = {1, [<<>>]}}, + couch_query_servers:validate_doc_update(DDoc, NewDoc, DiskDoc, JSONCtx, []). diff --git a/src/couch/test/eunit/couch_base32_tests.erl b/src/couch/test/eunit/couch_base32_tests.erl new file mode 100644 index 000000000..7e4d59a09 --- /dev/null +++ b/src/couch/test/eunit/couch_base32_tests.erl @@ -0,0 +1,28 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_base32_tests). + +-include_lib("eunit/include/eunit.hrl"). + +base32_test() -> + roundtrip(<<"">>, <<"">>), + roundtrip(<<"f">>, <<"MY======">>), + roundtrip(<<"fo">>, <<"MZXQ====">>), + roundtrip(<<"foo">>, <<"MZXW6===">>), + roundtrip(<<"foob">>, <<"MZXW6YQ=">>), + roundtrip(<<"fooba">>, <<"MZXW6YTB">>), + roundtrip(<<"foobar">>, <<"MZXW6YTBOI======">>). + +roundtrip(Plain, Encoded) -> + ?assertEqual(Plain, couch_base32:decode(Encoded)), + ?assertEqual(Encoded, couch_base32:encode(Plain)). diff --git a/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl b/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl new file mode 100644 index 000000000..6c99ceb73 --- /dev/null +++ b/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl @@ -0,0 +1,130 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_bt_engine_compactor_tests). + + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + + +-define(DELAY, 100). +-define(WAIT_DELAY_COUNT, 50). + + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + create_docs(DbName), + DbName. + + +teardown(DbName) when is_binary(DbName) -> + couch_server:delete(DbName, [?ADMIN_CTX]), + ok. + + +compaction_resume_test_() -> + { + setup, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, + { + foreach, + fun setup/0, + fun teardown/1, + [ + fun compaction_resume/1 + ] + } + }. + + +compaction_resume(DbName) -> + ?_test(begin + check_db_validity(DbName), + compact_db(DbName), + check_db_validity(DbName), + + % Force an error when copying document ids + with_mecked_emsort(fun() -> + compact_db(DbName) + end), + + check_db_validity(DbName), + compact_db(DbName), + check_db_validity(DbName) + end). + + +check_db_validity(DbName) -> + couch_util:with_db(DbName, fun(Db) -> + ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)), + ?assertEqual(3, couch_db:count_changes_since(Db, 0)) + end). + + +with_mecked_emsort(Fun) -> + meck:new(couch_emsort, [passthrough]), + meck:expect(couch_emsort, iter, fun(_) -> erlang:error(kaboom) end), + try + Fun() + after + meck:unload() + end. + + +create_docs(DbName) -> + couch_util:with_db(DbName, fun(Db) -> + Doc1 = couch_doc:from_json_obj({[ + {<<"_id">>, <<"doc1">>}, + {<<"value">>, 1} + + ]}), + Doc2 = couch_doc:from_json_obj({[ + {<<"_id">>, <<"doc2">>}, + {<<"value">>, 2} + + ]}), + Doc3 = couch_doc:from_json_obj({[ + {<<"_id">>, <<"doc3">>}, + {<<"value">>, 3} + + ]}), + {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]), + couch_db:ensure_full_commit(Db) + end). + + +compact_db(DbName) -> + couch_util:with_db(DbName, fun(Db) -> + {ok, _} = couch_db:start_compact(Db) + end), + wait_db_compact_done(DbName, ?WAIT_DELAY_COUNT). + + +wait_db_compact_done(_DbName, 0) -> + Failure = [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, "DB compaction failed to finish"} + ], + erlang:error({assertion_failed, Failure}); +wait_db_compact_done(DbName, N) -> + IsDone = couch_util:with_db(DbName, fun(Db) -> + not is_pid(couch_db:get_compactor_pid(Db)) + end), + if IsDone -> ok; true -> + timer:sleep(?DELAY), + wait_db_compact_done(DbName, N - 1) + end. diff --git a/src/couch/test/eunit/couch_bt_engine_tests.erl b/src/couch/test/eunit/couch_bt_engine_tests.erl new file mode 100644 index 000000000..3e3ecbf25 --- /dev/null +++ b/src/couch/test/eunit/couch_bt_engine_tests.erl @@ -0,0 +1,20 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_bt_engine_tests). + + +-include_lib("eunit/include/eunit.hrl"). + + +couch_bt_engine_test_()-> + cpse_util:create_tests(couch, couch_bt_engine, "couch"). diff --git a/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl b/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl new file mode 100644 index 000000000..3a516f8f7 --- /dev/null +++ b/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl @@ -0,0 +1,243 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_bt_engine_upgrade_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + + +setup(_) -> + Ctx = test_util:start_couch(), + DbDir = config:get("couchdb", "database_dir"), + DbFileNames = [ + "db_v6_without_purge_req.couch", + "db_v6_with_1_purge_req.couch", + "db_v6_with_2_purge_req.couch", + "db_v6_with_1_purge_req_for_2_docs.couch", + "db_v7_without_purge_req.couch", + "db_v7_with_1_purge_req.couch", + "db_v7_with_2_purge_req.couch", + "db_v7_with_1_purge_req_for_2_docs.couch" + ], + NewPaths = lists:map(fun(DbFileName) -> + OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]), + NewDbFilePath = filename:join([DbDir, DbFileName]), + ok = filelib:ensure_dir(NewDbFilePath), + file:delete(NewDbFilePath), + {ok, _} = file:copy(OldDbFilePath, NewDbFilePath), + NewDbFilePath + end, DbFileNames), + {Ctx, NewPaths}. + + +teardown(_, {Ctx, Paths}) -> + test_util:stop_couch(Ctx), + lists:foreach(fun(Path) -> + file:delete(Path) + end, Paths). + + +upgrade_test_() -> + From = [6, 7], + { + "Couch Bt Engine Upgrade tests", + { + foreachx, + fun setup/1, fun teardown/2, + [{F, fun t_upgrade_without_purge_req/2} || F <- From] ++ + [{F, fun t_upgrade_with_1_purge_req/2} || F <- From] ++ + [{F, fun t_upgrade_with_N_purge_req/2} || F <- From] ++ + [{F, fun t_upgrade_with_1_purge_req_for_2_docs/2} || F <- From] + } + }. + + +t_upgrade_without_purge_req(VersionFrom, {_Ctx, _NewPaths}) -> + ?_test(begin + % There are three documents in the fixture + % db with zero purge entries + DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom) + ++ "_without_purge_req"), + + ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)), + {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> + ?assertEqual(0, couch_db:get_purge_seq(Db)), + couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, []) + end), + ?assertEqual([], UpgradedPurged), + ?assertEqual(8, get_disk_version_from_header(DbName)), + {ok, Rev} = save_doc( + DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]} + ), + {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}), + + couch_util:with_db(DbName, fun(Db) -> + ?assertEqual({ok, 5}, couch_db:get_doc_count(Db)), + ?assertEqual(0, couch_db:get_purge_seq(Db)) + end), + + PurgeReqs = [ + {couch_uuids:random(), <<"doc4">>, [Rev]} + ], + + {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) -> + couch_db:purge_docs(Db, PurgeReqs) + end), + ?assertEqual(PRevs, [Rev]), + + couch_util:with_db(DbName, fun(Db) -> + ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)), + ?assertEqual(1, couch_db:get_purge_seq(Db)) + end) + end). + + +t_upgrade_with_1_purge_req(VersionFrom, {_Ctx, _NewPaths}) -> + ?_test(begin + % There are two documents in the fixture database + % with a single purge entry + DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom) + ++ "_with_1_purge_req"), + + ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)), + {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> + ?assertEqual(1, couch_db:get_purge_seq(Db)), + couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, []) + end), + ?assertEqual(8, get_disk_version_from_header(DbName)), + ?assertEqual([{1, <<"doc1">>}], UpgradedPurged), + + {ok, Rev} = save_doc( + DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]} + ), + {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}), + + couch_util:with_db(DbName, fun(Db) -> + ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)), + ?assertEqual(1, couch_db:get_purge_seq(Db)) + end), + + PurgeReqs = [ + {couch_uuids:random(), <<"doc4">>, [Rev]} + ], + + {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) -> + couch_db:purge_docs(Db, PurgeReqs) + end), + ?assertEqual(PRevs, [Rev]), + + couch_util:with_db(DbName, fun(Db) -> + ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)), + ?assertEqual(2, couch_db:get_purge_seq(Db)) + end) + end). + + +t_upgrade_with_N_purge_req(VersionFrom, {_Ctx, _NewPaths}) -> + ?_test(begin + % There is one document in the fixture database + % with two docs that have been purged + DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom) + ++ "_with_2_purge_req"), + + ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)), + {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> + ?assertEqual(2, couch_db:get_purge_seq(Db)), + couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, []) + end), + ?assertEqual(8, get_disk_version_from_header(DbName)), + ?assertEqual([{2, <<"doc2">>}], UpgradedPurged), + + {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}), + {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}), + + couch_util:with_db(DbName, fun(Db) -> + ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)), + ?assertEqual(2, couch_db:get_purge_seq(Db)) + end), + + PurgeReqs = [ + {couch_uuids:random(), <<"doc4">>, [Rev]} + ], + + {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) -> + couch_db:purge_docs(Db, PurgeReqs) + end), + ?assertEqual(PRevs, [Rev]), + + couch_util:with_db(DbName, fun(Db) -> + ?assertEqual({ok, 2}, couch_db:get_doc_count(Db)), + ?assertEqual(3, couch_db:get_purge_seq(Db)) + end) + end). + + +t_upgrade_with_1_purge_req_for_2_docs(VersionFrom, {_Ctx, _NewPaths}) -> + ?_test(begin + % There are two documents (Doc4 and Doc5) in the fixture database + % with three docs (Doc1, Doc2 and Doc3) that have been purged, and + % with one purge req for Doc1 and another purge req for Doc 2 and Doc3 + DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom) + ++ "_with_1_purge_req_for_2_docs"), + + ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)), + {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> + ?assertEqual(3, couch_db:get_purge_seq(Db)), + couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, []) + end), + ?assertEqual(8, get_disk_version_from_header(DbName)), + ?assertEqual([{3,<<"doc2">>},{2,<<"doc3">>}], UpgradedPurged), + + {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc6">>}, {<<"v">>, 1}]}), + {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc7">>}, {<<"v">>, 2}]}), + + couch_util:with_db(DbName, fun(Db) -> + ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)), + ?assertEqual(3, couch_db:get_purge_seq(Db)) + end), + + PurgeReqs = [ + {couch_uuids:random(), <<"doc6">>, [Rev]} + ], + + {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) -> + couch_db:purge_docs(Db, PurgeReqs) + end), + ?assertEqual(PRevs, [Rev]), + + couch_util:with_db(DbName, fun(Db) -> + ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)), + ?assertEqual(4, couch_db:get_purge_seq(Db)) + end) + end). + + +save_doc(DbName, Json) -> + Doc = couch_doc:from_json_obj(Json), + couch_util:with_db(DbName, fun(Db) -> + couch_db:update_doc(Db, Doc, []) + end). + + +fold_fun({PSeq, _UUID, Id, _Revs}, Acc) -> + {ok, [{PSeq, Id} | Acc]}. + + +get_disk_version_from_header(DbFileName) -> + DbDir = config:get("couchdb", "database_dir"), + DbFilePath = filename:join([DbDir, ?l2b(?b2l(DbFileName) ++ ".couch")]), + {ok, Fd} = couch_file:open(DbFilePath, []), + {ok, Header} = couch_file:read_header(Fd), + DiskVerison = couch_bt_engine_header:disk_version(Header), + couch_file:close(Fd), + DiskVerison. diff --git a/src/couch/test/eunit/couch_btree_tests.erl b/src/couch/test/eunit/couch_btree_tests.erl new file mode 100644 index 000000000..c9b791d2c --- /dev/null +++ b/src/couch/test/eunit/couch_btree_tests.erl @@ -0,0 +1,572 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_btree_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(ROWS, 1000). +-define(TIMEOUT, 60). % seconds + + +setup() -> + {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]), + {ok, Btree} = couch_btree:open(nil, Fd, [{compression, none}, + {reduce, fun reduce_fun/2}]), + {Fd, Btree}. + +setup_kvs(_) -> + setup(). + +setup_red() -> + {_, EvenOddKVs} = lists:foldl( + fun(Idx, {Key, Acc}) -> + case Key of + "even" -> {"odd", [{{Key, Idx}, 1} | Acc]}; + _ -> {"even", [{{Key, Idx}, 1} | Acc]} + end + end, {"odd", []}, lists:seq(1, ?ROWS)), + {Fd, Btree} = setup(), + {ok, Btree1} = couch_btree:add_remove(Btree, EvenOddKVs, []), + {Fd, Btree1}. +setup_red(_) -> + setup_red(). + +teardown(Fd) when is_pid(Fd) -> + ok = couch_file:close(Fd); +teardown({Fd, _}) -> + teardown(Fd). +teardown(_, {Fd, _}) -> + teardown(Fd). + + +kvs_test_funs() -> + [ + fun should_set_fd_correctly/2, + fun should_set_root_correctly/2, + fun should_create_zero_sized_btree/2, + fun should_set_reduce_option/2, + fun should_fold_over_empty_btree/2, + fun should_add_all_keys/2, + fun should_continuously_add_new_kv/2, + fun should_continuously_remove_keys/2, + fun should_insert_keys_in_reversed_order/2, + fun should_add_every_odd_key_remove_every_even/2, + fun should_add_every_even_key_remove_every_old/2 + ]. + +red_test_funs() -> + [ + fun should_reduce_whole_range/2, + fun should_reduce_first_half/2, + fun should_reduce_second_half/2 + ]. + + +btree_open_test_() -> + {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]), + {ok, Btree} = couch_btree:open(nil, Fd, [{compression, none}]), + { + "Ensure that created btree is really a btree record", + ?_assert(is_record(Btree, btree)) + }. + +sorted_kvs_test_() -> + Funs = kvs_test_funs(), + Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)], + { + "BTree with sorted keys", + { + setup, + fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, + { + foreachx, + fun setup_kvs/1, fun teardown/2, + [{Sorted, Fun} || Fun <- Funs] + } + } + }. + +rsorted_kvs_test_() -> + Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)], + Funs = kvs_test_funs(), + Reversed = Sorted, + { + "BTree with backward sorted keys", + { + setup, + fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, + { + foreachx, + fun setup_kvs/1, fun teardown/2, + [{Reversed, Fun} || Fun <- Funs] + } + } + }. + +shuffled_kvs_test_() -> + Funs = kvs_test_funs(), + Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)], + Shuffled = shuffle(Sorted), + { + "BTree with shuffled keys", + { + setup, + fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, + { + foreachx, + fun setup_kvs/1, fun teardown/2, + [{Shuffled, Fun} || Fun <- Funs] + } + } + }. + +reductions_test_() -> + { + "BTree reductions", + { + setup, + fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, + [ + { + "Common tests", + { + foreach, + fun setup_red/0, fun teardown/1, + [ + fun should_reduce_without_specified_direction/1, + fun should_reduce_forward/1, + fun should_reduce_backward/1 + ] + } + }, + { + "Range requests", + [ + { + "Forward direction", + { + foreachx, + fun setup_red/1, fun teardown/2, + [{fwd, F} || F <- red_test_funs()] + } + }, + { + "Backward direction", + { + foreachx, + fun setup_red/1, fun teardown/2, + [{rev, F} || F <- red_test_funs()] + } + } + ] + } + ] + } + }. + + +should_set_fd_correctly(_, {Fd, Btree}) -> + ?_assertMatch(Fd, Btree#btree.fd). + +should_set_root_correctly(_, {_, Btree}) -> + ?_assertMatch(nil, Btree#btree.root). + +should_create_zero_sized_btree(_, {_, Btree}) -> + ?_assertMatch(0, couch_btree:size(Btree)). + +should_set_reduce_option(_, {_, Btree}) -> + ReduceFun = fun reduce_fun/2, + Btree1 = couch_btree:set_options(Btree, [{reduce, ReduceFun}]), + ?_assertMatch(ReduceFun, Btree1#btree.reduce). + +should_fold_over_empty_btree(_, {_, Btree}) -> + {ok, _, EmptyRes} = couch_btree:foldl(Btree, fun(_, X) -> {ok, X+1} end, 0), + ?_assertEqual(EmptyRes, 0). + +should_add_all_keys(KeyValues, {Fd, Btree}) -> + {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []), + [ + should_return_complete_btree_on_adding_all_keys(KeyValues, Btree1), + should_have_non_zero_size(Btree1), + should_have_lesser_size_than_file(Fd, Btree1), + should_keep_root_pointer_to_kp_node(Fd, Btree1), + should_remove_all_keys(KeyValues, Btree1) + ]. + +should_return_complete_btree_on_adding_all_keys(KeyValues, Btree) -> + ?_assert(test_btree(Btree, KeyValues)). + +should_have_non_zero_size(Btree) -> + ?_assert(couch_btree:size(Btree) > 0). + +should_have_lesser_size_than_file(Fd, Btree) -> + ?_assert((couch_btree:size(Btree) =< couch_file:bytes(Fd))). + +should_keep_root_pointer_to_kp_node(Fd, Btree) -> + ?_assertMatch({ok, {kp_node, _}}, + couch_file:pread_term(Fd, element(1, Btree#btree.root))). + +should_remove_all_keys(KeyValues, Btree) -> + Keys = keys(KeyValues), + {ok, Btree1} = couch_btree:add_remove(Btree, [], Keys), + { + "Should remove all the keys", + [ + should_produce_valid_btree(Btree1, []), + should_be_empty(Btree1) + ] + }. + +should_continuously_add_new_kv(KeyValues, {_, Btree}) -> + {Btree1, _} = lists:foldl( + fun(KV, {BtAcc, PrevSize}) -> + {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []), + ?assert(couch_btree:size(BtAcc2) > PrevSize), + {BtAcc2, couch_btree:size(BtAcc2)} + end, {Btree, couch_btree:size(Btree)}, KeyValues), + { + "Should continuously add key-values to btree", + [ + should_produce_valid_btree(Btree1, KeyValues), + should_not_be_empty(Btree1) + ] + }. + +should_continuously_remove_keys(KeyValues, {_, Btree}) -> + {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []), + {Btree2, _} = lists:foldl( + fun({K, _}, {BtAcc, PrevSize}) -> + {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]), + ?assert(couch_btree:size(BtAcc2) < PrevSize), + {BtAcc2, couch_btree:size(BtAcc2)} + end, {Btree1, couch_btree:size(Btree1)}, KeyValues), + { + "Should continuously remove keys from btree", + [ + should_produce_valid_btree(Btree2, []), + should_be_empty(Btree2) + ] + }. + +should_insert_keys_in_reversed_order(KeyValues, {_, Btree}) -> + KeyValuesRev = lists:reverse(KeyValues), + {Btree1, _} = lists:foldl( + fun(KV, {BtAcc, PrevSize}) -> + {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []), + ?assert(couch_btree:size(BtAcc2) > PrevSize), + {BtAcc2, couch_btree:size(BtAcc2)} + end, {Btree, couch_btree:size(Btree)}, KeyValuesRev), + should_produce_valid_btree(Btree1, KeyValues). + +should_add_every_odd_key_remove_every_even(KeyValues, {_, Btree}) -> + {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []), + {_, Rem2Keys0, Rem2Keys1} = lists:foldl(fun(X, {Count, Left, Right}) -> + case Count rem 2 == 0 of + true -> {Count + 1, [X | Left], Right}; + false -> {Count + 1, Left, [X | Right]} + end + end, {0, [], []}, KeyValues), + {timeout, ?TIMEOUT, + ?_assert(test_add_remove(Btree1, Rem2Keys0, Rem2Keys1)) + }. + +should_add_every_even_key_remove_every_old(KeyValues, {_, Btree}) -> + {ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []), + {_, Rem2Keys0, Rem2Keys1} = lists:foldl(fun(X, {Count, Left, Right}) -> + case Count rem 2 == 0 of + true -> {Count + 1, [X | Left], Right}; + false -> {Count + 1, Left, [X | Right]} + end + end, {0, [], []}, KeyValues), + {timeout, ?TIMEOUT, + ?_assert(test_add_remove(Btree1, Rem2Keys1, Rem2Keys0)) + }. + + +should_reduce_without_specified_direction({_, Btree}) -> + ?_assertMatch( + {ok, [{{"odd", _}, ?ROWS div 2}, {{"even", _}, ?ROWS div 2}]}, + fold_reduce(Btree, [])). + +should_reduce_forward({_, Btree}) -> + ?_assertMatch( + {ok, [{{"odd", _}, ?ROWS div 2}, {{"even", _}, ?ROWS div 2}]}, + fold_reduce(Btree, [{dir, fwd}])). + +should_reduce_backward({_, Btree}) -> + ?_assertMatch( + {ok, [{{"even", _}, ?ROWS div 2}, {{"odd", _}, ?ROWS div 2}]}, + fold_reduce(Btree, [{dir, rev}])). + +should_reduce_whole_range(fwd, {_, Btree}) -> + {SK, EK} = {{"even", 0}, {"odd", ?ROWS - 1}}, + [ + { + "include endkey", + ?_assertMatch( + {ok, [{{"odd", 1}, ?ROWS div 2}, + {{"even", 2}, ?ROWS div 2}]}, + fold_reduce(Btree, [{dir, fwd}, + {start_key, SK}, + {end_key, EK}])) + }, + { + "exclude endkey", + ?_assertMatch( + {ok, [{{"odd", 1}, (?ROWS div 2) - 1}, + {{"even", 2}, ?ROWS div 2}]}, + fold_reduce(Btree, [{dir, fwd}, + {start_key, SK}, + {end_key_gt, EK}])) + } + ]; +should_reduce_whole_range(rev, {_, Btree}) -> + {SK, EK} = {{"odd", ?ROWS - 1}, {"even", 2}}, + [ + { + "include endkey", + ?_assertMatch( + {ok, [{{"even", ?ROWS}, ?ROWS div 2}, + {{"odd", ?ROWS - 1}, ?ROWS div 2}]}, + fold_reduce(Btree, [{dir, rev}, + {start_key, SK}, + {end_key, EK}])) + }, + { + "exclude endkey", + ?_assertMatch( + {ok, [{{"even", ?ROWS}, (?ROWS div 2) - 1}, + {{"odd", ?ROWS - 1}, ?ROWS div 2}]}, + fold_reduce(Btree, [{dir, rev}, + {start_key, SK}, + {end_key_gt, EK}])) + } + ]. + +should_reduce_first_half(fwd, {_, Btree}) -> + {SK, EK} = {{"even", 0}, {"odd", (?ROWS div 2) - 1}}, + [ + { + "include endkey", + ?_assertMatch( + {ok, [{{"odd", 1}, ?ROWS div 4}, + {{"even", 2}, ?ROWS div 2}]}, + fold_reduce(Btree, [{dir, fwd}, + {start_key, SK}, {end_key, EK}])) + }, + { + "exclude endkey", + ?_assertMatch( + {ok, [{{"odd", 1}, (?ROWS div 4) - 1}, + {{"even", 2}, ?ROWS div 2}]}, + fold_reduce(Btree, [{dir, fwd}, + {start_key, SK}, + {end_key_gt, EK}])) + } + ]; +should_reduce_first_half(rev, {_, Btree}) -> + {SK, EK} = {{"odd", ?ROWS - 1}, {"even", ?ROWS div 2}}, + [ + { + "include endkey", + ?_assertMatch( + {ok, [{{"even", ?ROWS}, (?ROWS div 4) + 1}, + {{"odd", ?ROWS - 1}, ?ROWS div 2}]}, + fold_reduce(Btree, [{dir, rev}, + {start_key, SK}, + {end_key, EK}])) + }, + { + "exclude endkey", + ?_assertMatch( + {ok, [{{"even", ?ROWS}, ?ROWS div 4}, + {{"odd", ?ROWS - 1}, ?ROWS div 2}]}, + fold_reduce(Btree, [{dir, rev}, + {start_key, SK}, + {end_key_gt, EK}])) + } + ]. + +should_reduce_second_half(fwd, {_, Btree}) -> + {SK, EK} = {{"even", ?ROWS div 2}, {"odd", ?ROWS - 1}}, + [ + { + "include endkey", + ?_assertMatch( + {ok, [{{"odd", 1}, ?ROWS div 2}, + {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}]}, + fold_reduce(Btree, [{dir, fwd}, + {start_key, SK}, + {end_key, EK}])) + }, + { + "exclude endkey", + ?_assertMatch( + {ok, [{{"odd", 1}, (?ROWS div 2) - 1}, + {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}]}, + fold_reduce(Btree, [{dir, fwd}, + {start_key, SK}, + {end_key_gt, EK}])) + } + ]; +should_reduce_second_half(rev, {_, Btree}) -> + {SK, EK} = {{"odd", (?ROWS div 2) + 1}, {"even", 2}}, + [ + { + "include endkey", + ?_assertMatch( + {ok, [{{"even", ?ROWS}, ?ROWS div 2}, + {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}]}, + fold_reduce(Btree, [{dir, rev}, + {start_key, SK}, + {end_key, EK}])) + }, + { + "exclude endkey", + ?_assertMatch( + {ok, [{{"even", ?ROWS}, (?ROWS div 2) - 1}, + {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}]}, + fold_reduce(Btree, [{dir, rev}, + {start_key, SK}, + {end_key_gt, EK}])) + } + ]. + +should_produce_valid_btree(Btree, KeyValues) -> + ?_assert(test_btree(Btree, KeyValues)). + +should_be_empty(Btree) -> + ?_assertEqual(couch_btree:size(Btree), 0). + +should_not_be_empty(Btree) -> + ?_assert(couch_btree:size(Btree) > 0). + +fold_reduce(Btree, Opts) -> + GroupFun = fun({K1, _}, {K2, _}) -> + K1 == K2 + end, + FoldFun = fun(GroupedKey, Unreduced, Acc) -> + {ok, [{GroupedKey, couch_btree:final_reduce(Btree, Unreduced)} | Acc]} + end, + couch_btree:fold_reduce(Btree, FoldFun, [], + [{key_group_fun, GroupFun}] ++ Opts). + + +keys(KVs) -> + [K || {K, _} <- KVs]. + +reduce_fun(reduce, KVs) -> + length(KVs); +reduce_fun(rereduce, Reds) -> + lists:sum(Reds). + + +shuffle(List) -> + randomize(round(math:log(length(List)) + 0.5), List). + +randomize(1, List) -> + randomize(List); +randomize(T, List) -> + lists:foldl( + fun(_E, Acc) -> + randomize(Acc) + end, randomize(List), lists:seq(1, (T - 1))). + +randomize(List) -> + D = lists:map(fun(A) -> {couch_rand:uniform(), A} end, List), + {_, D1} = lists:unzip(lists:keysort(1, D)), + D1. + +test_btree(Btree, KeyValues) -> + ok = test_key_access(Btree, KeyValues), + ok = test_lookup_access(Btree, KeyValues), + ok = test_final_reductions(Btree, KeyValues), + ok = test_traversal_callbacks(Btree, KeyValues), + true. + +test_add_remove(Btree, OutKeyValues, RemainingKeyValues) -> + Btree2 = lists:foldl( + fun({K, _}, BtAcc) -> + {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]), + BtAcc2 + end, Btree, OutKeyValues), + true = test_btree(Btree2, RemainingKeyValues), + + Btree3 = lists:foldl( + fun(KV, BtAcc) -> + {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []), + BtAcc2 + end, Btree2, OutKeyValues), + true = test_btree(Btree3, OutKeyValues ++ RemainingKeyValues). + +test_key_access(Btree, List) -> + FoldFun = fun(Element, {[HAcc|TAcc], Count}) -> + case Element == HAcc of + true -> {ok, {TAcc, Count + 1}}; + _ -> {ok, {TAcc, Count + 1}} + end + end, + Length = length(List), + Sorted = lists:sort(List), + {ok, _, {[], Length}} = couch_btree:foldl(Btree, FoldFun, {Sorted, 0}), + {ok, _, {[], Length}} = couch_btree:fold(Btree, FoldFun, + {Sorted, 0}, [{dir, rev}]), + ok. + +test_lookup_access(Btree, KeyValues) -> + FoldFun = fun({Key, Value}, {Key, Value}) -> {stop, true} end, + lists:foreach( + fun({Key, Value}) -> + [{ok, {Key, Value}}] = couch_btree:lookup(Btree, [Key]), + {ok, _, true} = couch_btree:foldl(Btree, FoldFun, + {Key, Value}, [{start_key, Key}]) + end, KeyValues). + +test_final_reductions(Btree, KeyValues) -> + KVLen = length(KeyValues), + FoldLFun = fun(_X, LeadingReds, Acc) -> + CountToStart = KVLen div 3 + Acc, + CountToStart = couch_btree:final_reduce(Btree, LeadingReds), + {ok, Acc + 1} + end, + FoldRFun = fun(_X, LeadingReds, Acc) -> + CountToEnd = KVLen - KVLen div 3 + Acc, + CountToEnd = couch_btree:final_reduce(Btree, LeadingReds), + {ok, Acc + 1} + end, + {LStartKey, _} = case KVLen of + 0 -> {nil, nil}; + _ -> lists:nth(KVLen div 3 + 1, lists:sort(KeyValues)) + end, + {RStartKey, _} = case KVLen of + 0 -> {nil, nil}; + _ -> lists:nth(KVLen div 3, lists:sort(KeyValues)) + end, + {ok, _, FoldLRed} = couch_btree:foldl(Btree, FoldLFun, 0, + [{start_key, LStartKey}]), + {ok, _, FoldRRed} = couch_btree:fold(Btree, FoldRFun, 0, + [{dir, rev}, {start_key, RStartKey}]), + KVLen = FoldLRed + FoldRRed, + ok. + +test_traversal_callbacks(Btree, _KeyValues) -> + FoldFun = fun + (visit, _GroupedKey, _Unreduced, Acc) -> + {ok, Acc andalso false}; + (traverse, _LK, _Red, Acc) -> + {skip, Acc andalso true} + end, + % With 250 items the root is a kp. Always skipping should reduce to true. + {ok, _, true} = couch_btree:fold(Btree, FoldFun, true, [{dir, fwd}]), + ok. diff --git a/src/couch/test/eunit/couch_changes_tests.erl b/src/couch/test/eunit/couch_changes_tests.erl new file mode 100644 index 000000000..0c2f5f91f --- /dev/null +++ b/src/couch/test/eunit/couch_changes_tests.erl @@ -0,0 +1,1001 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_changes_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 6000). +-define(TEST_TIMEOUT, 10000). + +-record(row, { + id, + seq, + deleted = false, + doc = nil +}). + +setup() -> + DbName = ?tempdb(), + {ok, Db} = create_db(DbName), + Revs = [R || {ok, R} <- [ + save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}), + save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}), + save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}), + save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}), + save_doc(Db, {[{<<"_id">>, <<"doc5">>}]}) + ]], + Rev = lists:nth(3, Revs), + couch_db:ensure_full_commit(Db), + {ok, Db1} = couch_db:reopen(Db), + + {ok, Rev1} = save_doc(Db1, {[{<<"_id">>, <<"doc3">>}, {<<"_rev">>, Rev}]}), + Revs1 = Revs ++ [Rev1], + Revs2 = Revs1 ++ [R || {ok, R} <- [ + save_doc(Db1, {[{<<"_id">>, <<"doc6">>}]}), + save_doc(Db1, {[{<<"_id">>, <<"_design/foo">>}]}), + save_doc(Db1, {[{<<"_id">>, <<"doc7">>}]}), + save_doc(Db1, {[{<<"_id">>, <<"doc8">>}]}) + ]], + config:set("native_query_servers", "erlang", "{couch_native_process, start_link, []}", _Persist=false), + {DbName, list_to_tuple(Revs2)}. + +teardown({DbName, _}) -> + config:delete("native_query_servers", "erlang", _Persist=false), + delete_db(DbName), + ok. + + +changes_test_() -> + { + "Changes feed", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + [ + filter_by_selector(), + filter_by_doc_id(), + filter_by_design(), + continuous_feed(), + %%filter_by_custom_function() + filter_by_filter_function(), + filter_by_view() + ] + } + }. + +filter_by_doc_id() -> + { + "Filter _doc_id", + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_filter_by_specific_doc_ids/1, + fun should_filter_by_specific_doc_ids_descending/1, + fun should_filter_by_specific_doc_ids_with_since/1, + fun should_filter_by_specific_doc_ids_no_result/1, + fun should_handle_deleted_docs/1 + ] + } + }. + +filter_by_selector() -> + { + "Filter _selector", + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_select_basic/1, + fun should_select_with_since/1, + fun should_select_when_no_result/1, + fun should_select_with_deleted_docs/1, + fun should_select_with_continuous/1, + fun should_stop_selector_when_db_deleted/1, + fun should_select_with_empty_fields/1, + fun should_select_with_fields/1 + ] + } + }. + + +filter_by_design() -> + { + "Filter _design", + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_emit_only_design_documents/1 + ] + } + }. + +%% filter_by_custom_function() -> +%% { +%% "Filter function", +%% { +%% foreach, +%% fun setup/0, fun teardown/1, +%% [ +%% fun should_receive_heartbeats/1 +%% ] +%% } +%% }. + +filter_by_filter_function() -> + { + "Filter by filters", + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_filter_by_doc_attribute/1, + fun should_filter_by_user_ctx/1 + ] + } + }. + +filter_by_view() -> + { + "Filter _view", + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_filter_by_view/1, + fun should_filter_by_fast_view/1, + fun should_filter_by_erlang_view/1 + ] + } + }. + +continuous_feed() -> + { + "Continuous Feed", + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_filter_continuous_feed_by_specific_doc_ids/1, + fun should_end_changes_when_db_deleted/1 + ] + } + }. + + +should_filter_by_specific_doc_ids({DbName, _}) -> + ?_test( + begin + ChArgs = #changes_args{ + filter = "_doc_ids" + }, + DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>], + Req = {json_req, {[{<<"doc_ids">>, DocIds}]}}, + {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), + + ?assertEqual(2, length(Rows)), + [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows, + ?assertEqual(<<"doc4">>, Id1), + ?assertEqual(4, Seq1), + ?assertEqual(<<"doc3">>, Id2), + ?assertEqual(6, Seq2), + ?assertEqual(UpSeq, LastSeq) + end). + +should_filter_by_specific_doc_ids_descending({DbName, _}) -> + ?_test( + begin + ChArgs = #changes_args{ + filter = "_doc_ids", + dir = rev + }, + DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>], + Req = {json_req, {[{<<"doc_ids">>, DocIds}]}}, + {Rows, LastSeq, _} = run_changes_query(DbName, ChArgs, Req), + + ?assertEqual(2, length(Rows)), + [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows, + ?assertEqual(<<"doc3">>, Id1), + ?assertEqual(6, Seq1), + ?assertEqual(<<"doc4">>, Id2), + ?assertEqual(4, Seq2), + ?assertEqual(4, LastSeq) + end). + +should_filter_by_specific_doc_ids_with_since({DbName, _}) -> + ?_test( + begin + ChArgs = #changes_args{ + filter = "_doc_ids", + since = 5 + }, + DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>], + Req = {json_req, {[{<<"doc_ids">>, DocIds}]}}, + {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), + + ?assertEqual(1, length(Rows)), + [#row{seq = Seq1, id = Id1}] = Rows, + ?assertEqual(<<"doc3">>, Id1), + ?assertEqual(6, Seq1), + ?assertEqual(UpSeq, LastSeq) + end). + +should_filter_by_specific_doc_ids_no_result({DbName, _}) -> + ?_test( + begin + ChArgs = #changes_args{ + filter = "_doc_ids", + since = 6 + }, + DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>], + Req = {json_req, {[{<<"doc_ids">>, DocIds}]}}, + {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), + + ?assertEqual(0, length(Rows)), + ?assertEqual(UpSeq, LastSeq) + end). + +should_handle_deleted_docs({DbName, Revs}) -> + ?_test( + begin + Rev3_2 = element(6, Revs), + {ok, Db} = couch_db:open_int(DbName, []), + {ok, _} = save_doc( + Db, + {[{<<"_id">>, <<"doc3">>}, + {<<"_deleted">>, true}, + {<<"_rev">>, Rev3_2}]}), + + ChArgs = #changes_args{ + filter = "_doc_ids", + since = 9 + }, + DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>], + Req = {json_req, {[{<<"doc_ids">>, DocIds}]}}, + {Rows, LastSeq, _} = run_changes_query(DbName, ChArgs, Req), + + ?assertEqual(1, length(Rows)), + ?assertMatch( + [#row{seq = LastSeq, id = <<"doc3">>, deleted = true}], + Rows + ), + ?assertEqual(11, LastSeq) + end). + +should_filter_continuous_feed_by_specific_doc_ids({DbName, Revs}) -> + ?_test( + begin + {ok, Db} = couch_db:open_int(DbName, []), + ChangesArgs = #changes_args{ + filter = "_doc_ids", + feed = "continuous" + }, + DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>], + Req = {json_req, {[{<<"doc_ids">>, DocIds}]}}, + reset_row_notifications(), + Consumer = spawn_consumer(DbName, ChangesArgs, Req), + ?assertEqual(ok, wait_row_notifications(2)), + ok = pause(Consumer), + + Rows = get_rows(Consumer), + ?assertEqual(2, length(Rows)), + [#row{seq = Seq1, id = Id1}, #row{seq = Seq2, id = Id2}] = Rows, + ?assertEqual(<<"doc4">>, Id1), + ?assertEqual(4, Seq1), + ?assertEqual(<<"doc3">>, Id2), + ?assertEqual(6, Seq2), + + clear_rows(Consumer), + {ok, _Rev9} = save_doc(Db, {[{<<"_id">>, <<"doc9">>}]}), + {ok, _Rev10} = save_doc(Db, {[{<<"_id">>, <<"doc10">>}]}), + ok = unpause(Consumer), + timer:sleep(100), + ok = pause(Consumer), + ?assertEqual([], get_rows(Consumer)), + + Rev4 = element(4, Revs), + Rev3_2 = element(6, Revs), + {ok, Rev4_2} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}, + {<<"_rev">>, Rev4}]}), + {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}), + {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}, + {<<"_rev">>, Rev4_2}]}), + {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}), + {ok, Rev3_3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}, + {<<"_rev">>, Rev3_2}]}), + reset_row_notifications(), + ok = unpause(Consumer), + ?assertEqual(ok, wait_row_notifications(2)), + ok = pause(Consumer), + + NewRows = get_rows(Consumer), + ?assertEqual(2, length(NewRows)), + [Row14, Row16] = NewRows, + ?assertEqual(<<"doc4">>, Row14#row.id), + ?assertEqual(15, Row14#row.seq), + ?assertEqual(<<"doc3">>, Row16#row.id), + ?assertEqual(17, Row16#row.seq), + + clear_rows(Consumer), + {ok, _Rev3_4} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}, + {<<"_rev">>, Rev3_3}]}), + reset_row_notifications(), + ok = unpause(Consumer), + ?assertEqual(ok, wait_row_notifications(1)), + ok = pause(Consumer), + + FinalRows = get_rows(Consumer), + + ok = unpause(Consumer), + stop_consumer(Consumer), + + ?assertMatch([#row{seq = 18, id = <<"doc3">>}], FinalRows) + end). + + +should_end_changes_when_db_deleted({DbName, _Revs}) -> + ?_test(begin + {ok, _Db} = couch_db:open_int(DbName, []), + ChangesArgs = #changes_args{ + filter = "_doc_ids", + feed = "continuous" + }, + DocIds = [<<"doc3">>, <<"doc4">>, <<"doc9999">>], + Req = {json_req, {[{<<"doc_ids">>, DocIds}]}}, + Consumer = spawn_consumer(DbName, ChangesArgs, Req), + ok = pause(Consumer), + ok = couch_server:delete(DbName, [?ADMIN_CTX]), + ok = unpause(Consumer), + {_Rows, _LastSeq} = wait_finished(Consumer), + stop_consumer(Consumer), + ok + end). + + +should_select_basic({DbName, _}) -> + ?_test( + begin + ChArgs = #changes_args{filter = "_selector"}, + Selector = {[{<<"_id">>, <<"doc3">>}]}, + Req = {json_req, {[{<<"selector">>, Selector}]}}, + {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), + ?assertEqual(1, length(Rows)), + [#row{seq = Seq, id = Id}] = Rows, + ?assertEqual(<<"doc3">>, Id), + ?assertEqual(6, Seq), + ?assertEqual(UpSeq, LastSeq) + end). + +should_select_with_since({DbName, _}) -> + ?_test( + begin + ChArgs = #changes_args{filter = "_selector", since = 9}, + GteDoc2 = {[{<<"$gte">>, <<"doc1">>}]}, + Selector = {[{<<"_id">>, GteDoc2}]}, + Req = {json_req, {[{<<"selector">>, Selector}]}}, + {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), + ?assertEqual(1, length(Rows)), + [#row{seq = Seq, id = Id}] = Rows, + ?assertEqual(<<"doc8">>, Id), + ?assertEqual(10, Seq), + ?assertEqual(UpSeq, LastSeq) + end). + +should_select_when_no_result({DbName, _}) -> + ?_test( + begin + ChArgs = #changes_args{filter = "_selector"}, + Selector = {[{<<"_id">>, <<"nopers">>}]}, + Req = {json_req, {[{<<"selector">>, Selector}]}}, + {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), + ?assertEqual(0, length(Rows)), + ?assertEqual(UpSeq, LastSeq) + end). + +should_select_with_deleted_docs({DbName, Revs}) -> + ?_test( + begin + Rev3_2 = element(6, Revs), + {ok, Db} = couch_db:open_int(DbName, []), + {ok, _} = save_doc( + Db, + {[{<<"_id">>, <<"doc3">>}, + {<<"_deleted">>, true}, + {<<"_rev">>, Rev3_2}]}), + ChArgs = #changes_args{filter = "_selector"}, + Selector = {[{<<"_id">>, <<"doc3">>}]}, + Req = {json_req, {[{<<"selector">>, Selector}]}}, + {Rows, LastSeq, _} = run_changes_query(DbName, ChArgs, Req), + ?assertMatch( + [#row{seq = LastSeq, id = <<"doc3">>, deleted = true}], + Rows + ), + ?assertEqual(11, LastSeq) + end). + +should_select_with_continuous({DbName, Revs}) -> + ?_test( + begin + {ok, Db} = couch_db:open_int(DbName, []), + ChArgs = #changes_args{filter = "_selector", feed = "continuous"}, + GteDoc8 = {[{<<"$gte">>, <<"doc8">>}]}, + Selector = {[{<<"_id">>, GteDoc8}]}, + Req = {json_req, {[{<<"selector">>, Selector}]}}, + reset_row_notifications(), + Consumer = spawn_consumer(DbName, ChArgs, Req), + ?assertEqual(ok, wait_row_notifications(1)), + ok = pause(Consumer), + Rows = get_rows(Consumer), + ?assertMatch( + [#row{seq = 10, id = <<"doc8">>, deleted = false}], + Rows + ), + clear_rows(Consumer), + {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc01">>}]}), + ok = unpause(Consumer), + timer:sleep(100), + ok = pause(Consumer), + ?assertEqual([], get_rows(Consumer)), + Rev4 = element(4, Revs), + Rev8 = element(10, Revs), + {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc8">>}, + {<<"_rev">>, Rev8}]}), + {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}, + {<<"_rev">>, Rev4}]}), + reset_row_notifications(), + ok = unpause(Consumer), + ?assertEqual(ok, wait_row_notifications(1)), + ok = pause(Consumer), + NewRows = get_rows(Consumer), + ?assertMatch( + [#row{seq = _, id = <<"doc8">>, deleted = false}], + NewRows + ) + end). + +should_stop_selector_when_db_deleted({DbName, _Revs}) -> + ?_test( + begin + {ok, _Db} = couch_db:open_int(DbName, []), + ChArgs = #changes_args{filter = "_selector", feed = "continuous"}, + Selector = {[{<<"_id">>, <<"doc3">>}]}, + Req = {json_req, {[{<<"selector">>, Selector}]}}, + Consumer = spawn_consumer(DbName, ChArgs, Req), + ok = pause(Consumer), + ok = couch_server:delete(DbName, [?ADMIN_CTX]), + ok = unpause(Consumer), + {_Rows, _LastSeq} = wait_finished(Consumer), + stop_consumer(Consumer), + ok + end). + + +should_select_with_empty_fields({DbName, _}) -> + ?_test( + begin + ChArgs = #changes_args{filter = "_selector", include_docs=true}, + Selector = {[{<<"_id">>, <<"doc3">>}]}, + Req = {json_req, {[{<<"selector">>, Selector}, + {<<"fields">>, []}]}}, + {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), + ?assertEqual(1, length(Rows)), + [#row{seq = Seq, id = Id, doc = Doc}] = Rows, + ?assertEqual(<<"doc3">>, Id), + ?assertEqual(6, Seq), + ?assertEqual(UpSeq, LastSeq), + ?assertMatch({[{_K1, _V1}, {_K2, _V2}]}, Doc) + end). + +should_select_with_fields({DbName, _}) -> + ?_test( + begin + ChArgs = #changes_args{filter = "_selector", include_docs=true}, + Selector = {[{<<"_id">>, <<"doc3">>}]}, + Req = {json_req, {[{<<"selector">>, Selector}, + {<<"fields">>, [<<"_id">>, <<"nope">>]}]}}, + {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), + ?assertEqual(1, length(Rows)), + [#row{seq = Seq, id = Id, doc = Doc}] = Rows, + ?assertEqual(<<"doc3">>, Id), + ?assertEqual(6, Seq), + ?assertEqual(UpSeq, LastSeq), + ?assertMatch(Doc, {[{<<"_id">>, <<"doc3">>}]}) + end). + + +should_emit_only_design_documents({DbName, Revs}) -> + ?_test( + begin + ChArgs = #changes_args{ + filter = "_design" + }, + Req = {json_req, null}, + {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), + + ?assertEqual(1, length(Rows)), + ?assertEqual(UpSeq, LastSeq), + ?assertEqual([#row{seq = 8, id = <<"_design/foo">>}], Rows), + + + {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]), + {ok, _} = save_doc(Db, {[{<<"_id">>, <<"_design/foo">>}, + {<<"_rev">>, element(8, Revs)}, + {<<"_deleted">>, true}]}), + + couch_db:close(Db), + {Rows2, LastSeq2, _} = run_changes_query(DbName, ChArgs, Req), + + UpSeq2 = UpSeq + 1, + + ?assertEqual(1, length(Rows2)), + ?assertEqual(UpSeq2, LastSeq2), + ?assertEqual([#row{seq = 11, + id = <<"_design/foo">>, + deleted = true}], + Rows2) + end). + +%% should_receive_heartbeats(_) -> +%% {timeout, ?TEST_TIMEOUT div 1000, +%% ?_test( +%% begin +%% DbName = ?tempdb(), +%% Timeout = 100, +%% {ok, Db} = create_db(DbName), + +%% {ok, _} = save_doc(Db, {[ +%% {<<"_id">>, <<"_design/filtered">>}, +%% {<<"language">>, <<"javascript">>}, +%% {<<"filters">>, {[ +%% {<<"foo">>, <<"function(doc) { +%% return ['doc10', 'doc11', 'doc12'].indexOf(doc._id) != -1;}">> +%% }]}} +%% ]}), + +%% ChangesArgs = #changes_args{ +%% filter = "filtered/foo", +%% feed = "continuous", +%% timeout = 10000, +%% heartbeat = 1000 +%% }, +%% Consumer = spawn_consumer(DbName, ChangesArgs, {json_req, null}), + +%% {ok, _Rev1} = save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}), +%% timer:sleep(Timeout), +%% {ok, _Rev2} = save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}), +%% timer:sleep(Timeout), +%% {ok, _Rev3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}), +%% timer:sleep(Timeout), +%% {ok, _Rev4} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}), +%% timer:sleep(Timeout), +%% {ok, _Rev5} = save_doc(Db, {[{<<"_id">>, <<"doc5">>}]}), +%% timer:sleep(Timeout), +%% {ok, _Rev6} = save_doc(Db, {[{<<"_id">>, <<"doc6">>}]}), +%% timer:sleep(Timeout), +%% {ok, _Rev7} = save_doc(Db, {[{<<"_id">>, <<"doc7">>}]}), +%% timer:sleep(Timeout), +%% {ok, _Rev8} = save_doc(Db, {[{<<"_id">>, <<"doc8">>}]}), +%% timer:sleep(Timeout), +%% {ok, _Rev9} = save_doc(Db, {[{<<"_id">>, <<"doc9">>}]}), + +%% Heartbeats = get_heartbeats(Consumer), +%% ?assert(Heartbeats > 0), + +%% {ok, _Rev10} = save_doc(Db, {[{<<"_id">>, <<"doc10">>}]}), +%% timer:sleep(Timeout), +%% {ok, _Rev11} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}), +%% timer:sleep(Timeout), +%% {ok, _Rev12} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}), + +%% Heartbeats2 = get_heartbeats(Consumer), +%% ?assert(Heartbeats2 > Heartbeats), + +%% Rows = get_rows(Consumer), +%% ?assertEqual(3, length(Rows)), + +%% {ok, _Rev13} = save_doc(Db, {[{<<"_id">>, <<"doc13">>}]}), +%% timer:sleep(Timeout), +%% {ok, _Rev14} = save_doc(Db, {[{<<"_id">>, <<"doc14">>}]}), +%% timer:sleep(Timeout), + +%% Heartbeats3 = get_heartbeats(Consumer), +%% ?assert(Heartbeats3 > Heartbeats2) +%% end)}. + +should_filter_by_doc_attribute({DbName, _}) -> + ?_test( + begin + DDocId = <<"_design/app">>, + DDoc = couch_doc:from_json_obj({[ + {<<"_id">>, DDocId}, + {<<"language">>, <<"javascript">>}, + {<<"filters">>, {[ + {<<"valid">>, <<"function(doc, req) {" + " if (doc._id == 'doc3') {" + " return true; " + "} }">>} + ]}} + ]}), + ChArgs = #changes_args{filter = "app/valid"}, + Req = {json_req, null}, + ok = update_ddoc(DbName, DDoc), + {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), + ?assertEqual(1, length(Rows)), + [#row{seq = Seq, id = Id}] = Rows, + ?assertEqual(<<"doc3">>, Id), + ?assertEqual(6, Seq), + ?assertEqual(UpSeq, LastSeq) + end). + +should_filter_by_user_ctx({DbName, _}) -> + ?_test( + begin + DDocId = <<"_design/app">>, + DDoc = couch_doc:from_json_obj({[ + {<<"_id">>, DDocId}, + {<<"language">>, <<"javascript">>}, + {<<"filters">>, {[ + {<<"valid">>, <<"function(doc, req) {" + " if (req.userCtx.name == doc._id) {" + " return true; " + "} }">>} + ]}} + ]}), + ChArgs = #changes_args{filter = "app/valid"}, + UserCtx = #user_ctx{name = <<"doc3">>, roles = []}, + {ok, DbRec} = couch_db:clustered_db(DbName, UserCtx), + Req = {json_req, {[{ + <<"userCtx">>, couch_util:json_user_ctx(DbRec) + }]}}, + ok = update_ddoc(DbName, DDoc), + {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), + ?assertEqual(1, length(Rows)), + [#row{seq = Seq, id = Id}] = Rows, + ?assertEqual(<<"doc3">>, Id), + ?assertEqual(6, Seq), + ?assertEqual(UpSeq, LastSeq) + end). + +should_filter_by_view({DbName, _}) -> + ?_test( + begin + DDocId = <<"_design/app">>, + DDoc = couch_doc:from_json_obj({[ + {<<"_id">>, DDocId}, + {<<"language">>, <<"javascript">>}, + {<<"views">>, {[ + {<<"valid">>, {[ + {<<"map">>, <<"function(doc) {" + " if (doc._id == 'doc3') {" + " emit(doc); " + "} }">>} + ]}} + ]}} + ]}), + ChArgs = #changes_args{filter = "_view"}, + Req = {json_req, {[{ + <<"query">>, {[ + {<<"view">>, <<"app/valid">>} + ]} + }]}}, + ok = update_ddoc(DbName, DDoc), + {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), + ?assertEqual(1, length(Rows)), + [#row{seq = Seq, id = Id}] = Rows, + ?assertEqual(<<"doc3">>, Id), + ?assertEqual(6, Seq), + ?assertEqual(UpSeq, LastSeq) + end). + +should_filter_by_fast_view({DbName, _}) -> + ?_test( + begin + DDocId = <<"_design/app">>, + DDoc = couch_doc:from_json_obj({[ + {<<"_id">>, DDocId}, + {<<"language">>, <<"javascript">>}, + {<<"options">>, {[{<<"seq_indexed">>, true}]}}, + {<<"views">>, {[ + {<<"valid">>, {[ + {<<"map">>, <<"function(doc) {" + " if (doc._id == 'doc3') {" + " emit(doc); " + "} }">>} + ]}} + ]}} + ]}), + ChArgs = #changes_args{filter = "_view"}, + Req = {json_req, {[{ + <<"query">>, {[ + {<<"view">>, <<"app/valid">>} + ]} + }]}}, + ok = update_ddoc(DbName, DDoc), + {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), + {ok, Db} = couch_db:open_int(DbName, []), + {ok, ViewInfo} = couch_mrview:get_view_info(Db, DDoc, <<"valid">>), + {update_seq, ViewUpSeq} = lists:keyfind(update_seq, 1, ViewInfo), + couch_db:close(Db), + ?assertEqual(1, length(Rows)), + [#row{seq = Seq, id = Id}] = Rows, + ?assertEqual(<<"doc3">>, Id), + ?assertEqual(6, Seq), + ?assertEqual(LastSeq, Seq), + ?assertEqual(UpSeq, ViewUpSeq) + end). + +should_filter_by_erlang_view({DbName, _}) -> + ?_test( + begin + DDocId = <<"_design/app">>, + DDoc = couch_doc:from_json_obj({[ + {<<"_id">>, DDocId}, + {<<"language">>, <<"erlang">>}, + {<<"views">>, {[ + {<<"valid">>, {[ + {<<"map">>, <<"fun({Doc}) ->" + " case lists:keyfind(<<\"_id\">>, 1, Doc) of" + " {<<\"_id\">>, <<\"doc3\">>} -> Emit(Doc, null); " + " false -> ok" + " end " + "end.">>} + ]}} + ]}} + ]}), + ChArgs = #changes_args{filter = "_view"}, + Req = {json_req, {[{ + <<"query">>, {[ + {<<"view">>, <<"app/valid">>} + ]} + }]}}, + ok = update_ddoc(DbName, DDoc), + {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req), + ?assertEqual(1, length(Rows)), + [#row{seq = Seq, id = Id}] = Rows, + ?assertEqual(<<"doc3">>, Id), + ?assertEqual(6, Seq), + ?assertEqual(UpSeq, LastSeq) + end). + +update_ddoc(DbName, DDoc) -> + {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]), + {ok, _} = couch_db:update_doc(Db, DDoc, []), + couch_db:close(Db). + +run_changes_query(DbName, ChangesArgs, Opts) -> + Consumer = spawn_consumer(DbName, ChangesArgs, Opts), + {Rows, LastSeq} = wait_finished(Consumer), + {ok, Db} = couch_db:open_int(DbName, []), + UpSeq = couch_db:get_update_seq(Db), + couch_db:close(Db), + stop_consumer(Consumer), + {Rows, LastSeq, UpSeq}. + +save_doc(Db, Json) -> + Doc = couch_doc:from_json_obj(Json), + {ok, Rev} = couch_db:update_doc(Db, Doc, []), + {ok, couch_doc:rev_to_str(Rev)}. + +get_rows({Consumer, _}) -> + Ref = make_ref(), + Consumer ! {get_rows, Ref}, + Resp = receive + {rows, Ref, Rows} -> + Rows + after ?TIMEOUT -> + timeout + end, + ?assertNotEqual(timeout, Resp), + Resp. + +%% get_heartbeats({Consumer, _}) -> +%% Ref = make_ref(), +%% Consumer ! {get_heartbeats, Ref}, +%% Resp = receive +%% {hearthbeats, Ref, HeartBeats} -> +%% HeartBeats +%% after ?TIMEOUT -> +%% timeout +%% end, +%% ?assertNotEqual(timeout, Resp), +%% Resp. + +clear_rows({Consumer, _}) -> + Ref = make_ref(), + Consumer ! {reset, Ref}, + Resp = receive + {ok, Ref} -> + ok + after ?TIMEOUT -> + timeout + end, + ?assertNotEqual(timeout, Resp), + Resp. + +stop_consumer({Consumer, _}) -> + Ref = make_ref(), + Consumer ! {stop, Ref}, + Resp = receive + {ok, Ref} -> + ok + after ?TIMEOUT -> + timeout + end, + ?assertNotEqual(timeout, Resp), + Resp. + +pause({Consumer, _}) -> + Ref = make_ref(), + Consumer ! {pause, Ref}, + Resp = receive + {paused, Ref} -> + ok + after ?TIMEOUT -> + timeout + end, + ?assertNotEqual(timeout, Resp), + Resp. + +unpause({Consumer, _}) -> + Ref = make_ref(), + Consumer ! {continue, Ref}, + Resp = receive + {ok, Ref} -> + ok + after ?TIMEOUT -> + timeout + end, + ?assertNotEqual(timeout, Resp), + Resp. + +wait_finished({_, ConsumerRef}) -> + receive + {consumer_finished, Rows, LastSeq} -> + {Rows, LastSeq}; + {'DOWN', ConsumerRef, _, _, Msg} when Msg == normal; Msg == ok -> + ok; + {'DOWN', ConsumerRef, _, _, Msg} -> + erlang:error({consumer_died, [ + {module, ?MODULE}, + {line, ?LINE}, + {value, Msg} + ]}) + after ?TIMEOUT -> + erlang:error({consumer_died, [ + {module, ?MODULE}, + {line, ?LINE}, + {value, timeout} + ]}) + end. + + +reset_row_notifications() -> + receive + row -> + reset_row_notifications() + after 0 -> + ok + end. + + +wait_row_notifications(N) -> + receive + row when N == 1 -> + ok; + row when N > 1 -> + wait_row_notifications(N - 1) + after ?TIMEOUT -> + timeout + end. + + +spawn_consumer(DbName, ChangesArgs0, Req) -> + Parent = self(), + spawn_monitor(fun() -> + put(heartbeat_count, 0), + Callback = fun + ({change, {Change}, _}, _, Acc) -> + Id = couch_util:get_value(<<"id">>, Change), + Seq = couch_util:get_value(<<"seq">>, Change), + Del = couch_util:get_value(<<"deleted">>, Change, false), + Doc = couch_util:get_value(doc, Change, nil), + Parent ! row, + [#row{id = Id, seq = Seq, deleted = Del, doc = Doc} | Acc]; + ({stop, LastSeq}, _, Acc) -> + Parent ! {consumer_finished, lists:reverse(Acc), LastSeq}, + stop_loop(Parent, Acc); + (timeout, _, Acc) -> + put(heartbeat_count, get(heartbeat_count) + 1), + maybe_pause(Parent, Acc); + (_, _, Acc) -> + maybe_pause(Parent, Acc) + end, + {ok, Db} = couch_db:open_int(DbName, []), + ChangesArgs = case (ChangesArgs0#changes_args.timeout =:= undefined) + andalso (ChangesArgs0#changes_args.heartbeat =:= undefined) of + true -> + ChangesArgs0#changes_args{timeout = 1000, heartbeat = 100}; + false -> + ChangesArgs0 + end, + FeedFun = couch_changes:handle_db_changes(ChangesArgs, Req, Db), + try + FeedFun({Callback, []}) + catch + throw:{stop, _} -> ok; + _:Error -> exit(Error) + after + couch_db:close(Db) + end + end). + +maybe_pause(Parent, Acc) -> + receive + {get_rows, Ref} -> + Parent ! {rows, Ref, lists:reverse(Acc)}, + maybe_pause(Parent, Acc); + {get_heartbeats, Ref} -> + Parent ! {hearthbeats, Ref, get(heartbeat_count)}, + maybe_pause(Parent, Acc); + {reset, Ref} -> + Parent ! {ok, Ref}, + maybe_pause(Parent, []); + {pause, Ref} -> + Parent ! {paused, Ref}, + pause_loop(Parent, Acc); + {stop, Ref} -> + Parent ! {ok, Ref}, + throw({stop, Acc}); + V when V /= updated -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {value, V}, + {reason, "Received unexpected message"}]}) + after 0 -> + Acc + end. + +pause_loop(Parent, Acc) -> + receive + {stop, Ref} -> + Parent ! {ok, Ref}, + throw({stop, Acc}); + {reset, Ref} -> + Parent ! {ok, Ref}, + pause_loop(Parent, []); + {continue, Ref} -> + Parent ! {ok, Ref}, + Acc; + {get_rows, Ref} -> + Parent ! {rows, Ref, lists:reverse(Acc)}, + pause_loop(Parent, Acc) + end. + +stop_loop(Parent, Acc) -> + receive + {get_rows, Ref} -> + Parent ! {rows, Ref, lists:reverse(Acc)}, + stop_loop(Parent, Acc); + {stop, Ref} -> + Parent ! {ok, Ref}, + Acc + end. + +create_db(DbName) -> + couch_db:create(DbName, [?ADMIN_CTX, overwrite]). + +delete_db(DbName) -> + couch_server:delete(DbName, [?ADMIN_CTX]). diff --git a/src/couch/test/eunit/couch_compress_tests.erl b/src/couch/test/eunit/couch_compress_tests.erl new file mode 100644 index 000000000..addb9a0e2 --- /dev/null +++ b/src/couch/test/eunit/couch_compress_tests.erl @@ -0,0 +1,85 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_compress_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TERM, {[{a, 1}, {b, 2}, {c, 3}, {d, 4}, {e, 5}]}). +-define(NONE, <<131,104,1,108,0,0,0,5,104,2,100,0,1,97,97,1, + 104,2,100,0,1,98,97,2,104,2,100,0,1,99,97,3,104,2,100,0, + 1,100,97,4,104,2,100,0,1,101,97,5,106>>). +-define(DEFLATE, <<131,80,0,0,0,48,120,218,203,96,204,97,96, + 96,96,205,96,74,97,96,76,76,100,4,211,73,137,76,96,58,57, + 145,25,76,167,36,178,128,233,212,68,214,44,0,212,169,9,51>>). +-define(SNAPPY, <<1,49,64,131,104,1,108,0,0,0,5,104,2,100,0, + 1,97,97,1,104,1,8,8,98,97,2,5,8,8,99,97,3,5,8,44,100,97, + 4,104,2,100,0,1,101,97,5,106>>). +-define(CORRUPT, <<2,12,85,06>>). + + +compress_test_() -> + [ + ?_assertEqual(?NONE, couch_compress:compress(?TERM, none)), + ?_assertEqual(?DEFLATE, couch_compress:compress(?TERM, {deflate, 9})), + ?_assertEqual(?SNAPPY, couch_compress:compress(?TERM, snappy)) + ]. + +decompress_test_() -> + [ + ?_assertEqual(?TERM, couch_compress:decompress(?NONE)), + ?_assertEqual(?TERM, couch_compress:decompress(?DEFLATE)), + ?_assertEqual(?TERM, couch_compress:decompress(?SNAPPY)), + ?_assertError(invalid_compression, couch_compress:decompress(?CORRUPT)) + ]. + +recompress_test_() -> + [ + ?_assertEqual(?DEFLATE, couch_compress:compress(?NONE, {deflate, 9})), + ?_assertEqual(?SNAPPY, couch_compress:compress(?NONE, snappy)), + ?_assertEqual(?NONE, couch_compress:compress(?DEFLATE, none)), + ?_assertEqual(?SNAPPY, couch_compress:compress(?DEFLATE, snappy)), + ?_assertEqual(?NONE, couch_compress:compress(?SNAPPY, none)), + ?_assertEqual(?DEFLATE, couch_compress:compress(?SNAPPY, {deflate, 9})) + ]. + +is_compressed_test_() -> + [ + ?_assert(couch_compress:is_compressed(?NONE, none)), + ?_assert(couch_compress:is_compressed(?DEFLATE, {deflate, 9})), + ?_assert(couch_compress:is_compressed(?SNAPPY, snappy)), + ?_assertNot(couch_compress:is_compressed(?NONE, {deflate, 0})), + ?_assertNot(couch_compress:is_compressed(?NONE, {deflate, 9})), + ?_assertNot(couch_compress:is_compressed(?NONE, snappy)), + ?_assertNot(couch_compress:is_compressed(?DEFLATE, none)), + ?_assertNot(couch_compress:is_compressed(?DEFLATE, snappy)), + ?_assertNot(couch_compress:is_compressed(?SNAPPY, none)), + ?_assertNot(couch_compress:is_compressed(?SNAPPY, {deflate, 9})), + ?_assertError(invalid_compression, + couch_compress:is_compressed(?CORRUPT, none)), + ?_assertError(invalid_compression, + couch_compress:is_compressed(?CORRUPT, {deflate, 9})), + ?_assertError(invalid_compression, + couch_compress:is_compressed(?CORRUPT, snappy)) + ]. + +uncompressed_size_test_() -> + [ + ?_assertEqual(49, couch_compress:uncompressed_size(?NONE)), + ?_assertEqual(49, couch_compress:uncompressed_size(?DEFLATE)), + ?_assertEqual(49, couch_compress:uncompressed_size(?SNAPPY)), + ?_assertEqual(5, couch_compress:uncompressed_size( + couch_compress:compress(x, {deflate, 9}))), + ?_assertError(invalid_compression, + couch_compress:uncompressed_size(?CORRUPT)) + ]. diff --git a/src/couch/test/eunit/couch_db_doc_tests.erl b/src/couch/test/eunit/couch_db_doc_tests.erl new file mode 100644 index 000000000..cdcf81d15 --- /dev/null +++ b/src/couch/test/eunit/couch_db_doc_tests.erl @@ -0,0 +1,113 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_db_doc_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +start() -> + test_util:start_couch([ioq]). + + +setup() -> + DbName = ?tempdb(), + config:set("couchdb", "stem_interactive_updates", "false", false), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + couch_db:close(Db), + DbName. + + +teardown(DbName) -> + ok = couch_server:delete(DbName, [?ADMIN_CTX]), + ok. + + +couch_db_doc_test_() -> + { + "CouchDB doc tests", + { + setup, + fun start/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_truncate_number_of_revisions/1, + fun should_raise_bad_request_on_invalid_rev/1 + ] + } + } + }. + + +should_truncate_number_of_revisions(DbName) -> + DocId = <<"foo">>, + Db = open_db(DbName), + couch_db:set_revs_limit(Db, 5), + Rev = create_doc(Db, DocId), + Rev10 = add_revisions(Db, DocId, Rev, 10), + {ok, [{ok, #doc{revs = {11, Revs}}}]} = open_doc_rev(Db, DocId, Rev10), + ?_assertEqual(5, length(Revs)). + + +should_raise_bad_request_on_invalid_rev(DbName) -> + DocId = <<"foo">>, + InvalidRev1 = <<"foo">>, + InvalidRev2 = <<"a-foo">>, + InvalidRev3 = <<"1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx">>, + Expect = {bad_request, <<"Invalid rev format">>}, + Db = open_db(DbName), + create_doc(Db, DocId), + [ + {InvalidRev1, + ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev1, 1))}, + {InvalidRev2, + ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev2, 1))}, + {InvalidRev3, + ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev3, 1))} + ]. + + +open_db(DbName) -> + {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]), + Db. + + +create_doc(Db, DocId) -> + add_revision(Db, DocId, undefined). + + +open_doc_rev(Db0, DocId, Rev) -> + {ok, Db} = couch_db:reopen(Db0), + couch_db:open_doc_revs(Db, DocId, [couch_doc:parse_rev(Rev)], []). + + +add_revision(Db, DocId, undefined) -> + add_revision(Db, DocId, []); +add_revision(Db, DocId, Rev) when is_binary(Rev) -> + add_revision(Db, DocId, [{<<"_rev">>, Rev}]); +add_revision(Db0, DocId, Rev) -> + {ok, Db} = couch_db:reopen(Db0), + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, DocId}, + {<<"value">>, DocId} + ] ++ Rev}), + {ok, NewRev} = couch_db:update_doc(Db, Doc, []), + {ok, _} = couch_db:ensure_full_commit(Db), + couch_doc:rev_to_str(NewRev). + + +add_revisions(Db, DocId, Rev, N) -> + lists:foldl(fun(_, OldRev) -> + add_revision(Db, DocId, OldRev) + end, Rev, lists:seq(1, N)). diff --git a/src/couch/test/eunit/couch_db_mpr_tests.erl b/src/couch/test/eunit/couch_db_mpr_tests.erl new file mode 100644 index 000000000..bb97c66d7 --- /dev/null +++ b/src/couch/test/eunit/couch_db_mpr_tests.erl @@ -0,0 +1,137 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_db_mpr_tests). + + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 30). + +-define(USER, "couch_db_admin"). +-define(PASS, "pass"). +-define(AUTH, {basic_auth, {?USER, ?PASS}}). +-define(CONTENT_JSON, {"Content-Type", "application/json"}). +-define(JSON_BODY, "{\"foo\": \"bar\"}"). +-define(CONTENT_MULTI_RELATED, + {"Content-Type", "multipart/related;boundary=\"bound\""}). + + +setup() -> + Hashed = couch_passwords:hash_admin_password(?PASS), + ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), + TmpDb = ?tempdb(), + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(couch_httpd, port), + Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]), + Url. + + +teardown(Url) -> + catch delete_db(Url), + ok = config:delete("admins", ?USER, _Persist=false). + + +create_db(Url) -> + {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), + ?assert(Status =:= 201 orelse Status =:= 202). + + +delete_db(Url) -> + {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). + + +create_doc(Url, Id, Body, Type) -> + test_request:put(Url ++ "/" ++ Id, [Type, ?AUTH], Body). + + +delete_doc(Url, Id, Rev) -> + test_request:delete(Url ++ "/" ++ Id ++ "?rev=" ++ ?b2l(Rev)). + + +couch_db_mpr_test_() -> + { + "multi-part attachment tests", + { + setup, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, + { + foreach, + fun setup/0, + fun teardown/1, + [ + fun recreate_with_mpr/1 + ] + } + } + }. + + +recreate_with_mpr(Url) -> + {timeout, ?TIMEOUT, ?_test(begin + DocId1 = "foo", + DocId2 = "bar", + + create_db(Url), + create_and_delete_doc(Url, DocId1), + Rev1 = create_with_mpr(Url, DocId1), + delete_db(Url), + + create_db(Url), + create_and_delete_doc(Url, DocId1), + % We create a second unrelated doc to change the + % position on disk where the attachment is written + % so that we can assert that the position on disk + % is not included when calculating a revision. + create_and_delete_doc(Url, DocId2), + Rev2 = create_with_mpr(Url, DocId1), + delete_db(Url), + + ?assertEqual(Rev1, Rev2) + end)}. + + +create_and_delete_doc(Url, DocId) -> + {ok, _, _, Resp} = create_doc(Url, DocId, ?JSON_BODY, ?CONTENT_JSON), + {Props} = ?JSON_DECODE(Resp), + Rev = couch_util:get_value(<<"rev">>, Props, undefined), + ?assert(is_binary(Rev)), + {ok, _, _, _} = delete_doc(Url, DocId, Rev). + + +create_with_mpr(Url, DocId) -> + {ok, _, _, Resp} = create_doc(Url, DocId, mpr(), ?CONTENT_MULTI_RELATED), + {Props} = ?JSON_DECODE(Resp), + Rev = couch_util:get_value(<<"rev">>, Props, undefined), + ?assert(is_binary(Rev)), + Rev. + + +mpr() -> + lists:concat([ + "--bound\r\n", + "Content-Type: application/json\r\n\r\n", + "{", + "\"body\":\"stuff\"," + "\"_attachments\":", + "{\"foo.txt\":{", + "\"follows\":true,", + "\"content_type\":\"text/plain\"," + "\"length\":21", + "}}" + "}", + "\r\n--bound\r\n\r\n", + "this is 21 chars long", + "\r\n--bound--epilogue" + ]). diff --git a/src/couch/test/eunit/couch_db_plugin_tests.erl b/src/couch/test/eunit/couch_db_plugin_tests.erl new file mode 100644 index 000000000..93551adbc --- /dev/null +++ b/src/couch/test/eunit/couch_db_plugin_tests.erl @@ -0,0 +1,205 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_db_plugin_tests). + +-export([ + validate_dbname/2, + before_doc_update/3, + after_doc_read/2, + validate_docid/1, + check_is_admin/1, + on_delete/2 +]). + +-export([ %% couch_epi_plugin behaviour + app/0, + providers/0, + services/0, + data_providers/0, + data_subscriptions/0, + processes/0, + notify/3 +]). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +%% couch_epi_plugin behaviour + +app() -> test_app. +providers() -> [{couch_db, ?MODULE}]. +services() -> []. +data_providers() -> []. +data_subscriptions() -> []. +processes() -> []. +notify(_, _, _) -> ok. +fake_db() -> test_util:fake_db([]). + +setup() -> + couch_tests:setup([ + couch_epi_dispatch:dispatch(chttpd, ?MODULE) + ]). + +teardown(Ctx) -> + couch_tests:teardown(Ctx). + +validate_dbname({true, _Db}, _) -> {decided, true}; +validate_dbname({false, _Db}, _) -> {decided, false}; +validate_dbname({fail, _Db}, _) -> throw(validate_dbname); +validate_dbname({pass, _Db}, _) -> no_decision. + +before_doc_update({fail, _Doc}, _Db, interactive_edit) -> throw(before_doc_update); +before_doc_update({true, Doc}, Db, interactive_edit) -> [{true, [before_doc_update|Doc]}, Db, interactive_edit]; +before_doc_update({false, Doc}, Db, interactive_edit) -> [{false, Doc}, Db, interactive_edit]. + +after_doc_read({fail, _Doc}, _Db) -> throw(after_doc_read); +after_doc_read({true, Doc}, Db) -> [{true, [after_doc_read|Doc]}, Db]; +after_doc_read({false, Doc}, Db) -> [{false, Doc}, Db]. + +validate_docid({true, _Id}) -> true; +validate_docid({false, _Id}) -> false; +validate_docid({fail, _Id}) -> throw(validate_docid). + +check_is_admin({true, _Db}) -> true; +check_is_admin({false, _Db}) -> false; +check_is_admin({fail, _Db}) -> throw(check_is_admin). + +on_delete(true, _Opts) -> true; +on_delete(false, _Opts) -> false; +on_delete(fail, _Opts) -> throw(on_delete). + +callback_test_() -> + { + "callback tests", + { + setup, fun setup/0, fun teardown/1, + [ + {"validate_dbname_match", fun validate_dbname_match/0}, + {"validate_dbname_no_match", fun validate_dbname_no_match/0}, + {"validate_dbname_throw", fun validate_dbname_throw/0}, + {"validate_dbname_pass", fun validate_dbname_pass/0}, + + {"before_doc_update_match", fun before_doc_update_match/0}, + {"before_doc_update_no_match", fun before_doc_update_no_match/0}, + {"before_doc_update_throw", fun before_doc_update_throw/0}, + + {"after_doc_read_match", fun after_doc_read_match/0}, + {"after_doc_read_no_match", fun after_doc_read_no_match/0}, + {"after_doc_read_throw", fun after_doc_read_throw/0}, + + {"validate_docid_match", fun validate_docid_match/0}, + {"validate_docid_no_match", fun validate_docid_no_match/0}, + {"validate_docid_throw", fun validate_docid_throw/0}, + + {"check_is_admin_match", fun check_is_admin_match/0}, + {"check_is_admin_no_match", fun check_is_admin_no_match/0}, + {"check_is_admin_throw", fun check_is_admin_throw/0}, + + {"on_delete_match", fun on_delete_match/0}, + {"on_delete_no_match", fun on_delete_no_match/0}, + {"on_delete_throw", fun on_delete_throw/0} + ] + } + }. + + +validate_dbname_match() -> + ?assert(couch_db_plugin:validate_dbname( + {true, [db]}, db, fun(_, _) -> pass end)). + +validate_dbname_no_match() -> + ?assertNot(couch_db_plugin:validate_dbname( + {false, [db]}, db, fun(_, _) -> pass end)). + +validate_dbname_throw() -> + ?assertThrow( + validate_dbname, + couch_db_plugin:validate_dbname( + {fail, [db]}, db, fun(_, _) -> pass end)). + +validate_dbname_pass() -> + ?assertEqual(pass, couch_db_plugin:validate_dbname( + {pass, [db]}, db, fun(_, _) -> pass end)). + +before_doc_update_match() -> + ?assertMatch( + {true, [before_doc_update, doc]}, + couch_db_plugin:before_doc_update( + fake_db(), {true, [doc]}, interactive_edit)). + +before_doc_update_no_match() -> + ?assertMatch( + {false, [doc]}, + couch_db_plugin:before_doc_update( + fake_db(), {false, [doc]}, interactive_edit)). + +before_doc_update_throw() -> + ?assertThrow( + before_doc_update, + couch_db_plugin:before_doc_update( + fake_db(), {fail, [doc]}, interactive_edit)). + + +after_doc_read_match() -> + ?assertMatch( + {true, [after_doc_read, doc]}, + couch_db_plugin:after_doc_read(fake_db(), {true, [doc]})). + +after_doc_read_no_match() -> + ?assertMatch( + {false, [doc]}, + couch_db_plugin:after_doc_read(fake_db(), {false, [doc]})). + +after_doc_read_throw() -> + ?assertThrow( + after_doc_read, + couch_db_plugin:after_doc_read(fake_db(), {fail, [doc]})). + + +validate_docid_match() -> + ?assert(couch_db_plugin:validate_docid({true, [doc]})). + +validate_docid_no_match() -> + ?assertNot(couch_db_plugin:validate_docid({false, [doc]})). + +validate_docid_throw() -> + ?assertThrow( + validate_docid, + couch_db_plugin:validate_docid({fail, [doc]})). + + +check_is_admin_match() -> + ?assert(couch_db_plugin:check_is_admin({true, [db]})). + +check_is_admin_no_match() -> + ?assertNot(couch_db_plugin:check_is_admin({false, [db]})). + +check_is_admin_throw() -> + ?assertThrow( + check_is_admin, + couch_db_plugin:check_is_admin({fail, [db]})). + +on_delete_match() -> + ?assertMatch( + [true], + couch_db_plugin:on_delete(true, [])). + +on_delete_no_match() -> + ?assertMatch( + [false], + couch_db_plugin:on_delete(false, [])). + +on_delete_throw() -> + ?assertThrow( + on_delete, + couch_db_plugin:on_delete(fail, [])). diff --git a/src/couch/test/eunit/couch_db_props_upgrade_tests.erl b/src/couch/test/eunit/couch_db_props_upgrade_tests.erl new file mode 100644 index 000000000..40ad283cf --- /dev/null +++ b/src/couch/test/eunit/couch_db_props_upgrade_tests.erl @@ -0,0 +1,83 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_db_props_upgrade_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("couch_mrview/include/couch_mrview.hrl"). + + +setup() -> + DbName = <<"test">>, + DbFileName = "test.couch", + OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]), + + DbDir = config:get("couchdb", "database_dir"), + NewDbFilePath = filename:join([DbDir, DbFileName]), + + file:delete(NewDbFilePath), + {ok, _} = file:copy(OldDbFilePath, NewDbFilePath), + + DbName. + + +teardown(DbName) when is_binary(DbName) -> + couch_server:delete(DbName, [?ADMIN_CTX]), + ok. + + +old_db_info_test_() -> + { + "Old database versions work", + { + setup, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, + { + foreach, + fun setup/0, + fun teardown/1, + [ + fun can_get_props/1, + fun can_get_db_info/1, + fun can_compact_db/1 + ] + } + } + }. + + +can_get_props(DbName) -> + ?_test(begin + {ok, Db} = couch_db:open_int(DbName, []), + Props = couch_db_engine:get_props(Db), + ?assert(is_list(Props)) + end). + + +can_get_db_info(DbName) -> + ?_test(begin + {ok, Db} = couch_db:open_int(DbName, []), + {ok, Info} = couch_db:get_db_info(Db), + Props = couch_util:get_value(props, Info), + ?assertEqual({[]}, Props) + end). + + +can_compact_db(DbName) -> + ?_test(begin + couch_util:with_db(DbName, fun(Db) -> + couch_db:start_compact(Db), + couch_db:wait_for_compaction(Db) + end) + end). diff --git a/src/couch/test/eunit/couch_db_split_tests.erl b/src/couch/test/eunit/couch_db_split_tests.erl new file mode 100644 index 000000000..7d2bb4006 --- /dev/null +++ b/src/couch/test/eunit/couch_db_split_tests.erl @@ -0,0 +1,302 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_db_split_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(RINGTOP, 2 bsl 31). + + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + DbName. + + +teardown(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + FilePath = couch_db:get_filepath(Db), + ok = couch_db:close(Db), + ok = file:delete(FilePath). + + +split_test_() -> + Cases = [ + {"Should split an empty shard", 0, 2}, + {"Should split shard in half", 100, 2}, + {"Should split shard in three", 99, 3}, + {"Should split shard in four", 100, 4} + ], + { + setup, + fun test_util:start_couch/0, fun test_util:stop/1, + [ + { + foreachx, + fun(_) -> setup() end, fun(_, St) -> teardown(St) end, + [{Case, fun should_split_shard/2} || Case <- Cases] + }, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_fail_on_missing_source/1, + fun should_fail_on_existing_target/1, + fun should_fail_on_invalid_target_name/1, + fun should_crash_on_invalid_tmap/1 + ] + } + ] + }. + + +should_split_shard({Desc, TotalDocs, Q}, DbName) -> + {ok, ExpectSeq} = create_docs(DbName, TotalDocs), + Ranges = make_ranges(Q), + TMap = make_targets(Ranges), + DocsPerRange = TotalDocs div Q, + PickFun = make_pickfun(DocsPerRange), + {Desc, ?_test(begin + {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun), + ?assertEqual(ExpectSeq, UpdateSeq), + maps:map(fun(Range, Name) -> + {ok, Db} = couch_db:open_int(Name, []), + FilePath = couch_db:get_filepath(Db), + %% target actually exists + ?assertMatch({ok, _}, file:read_file_info(FilePath)), + %% target's update seq is the same as source's update seq + USeq = couch_db:get_update_seq(Db), + ?assertEqual(ExpectSeq, USeq), + %% target shard has all the expected in its range docs + {ok, DocsInShard} = couch_db:fold_docs(Db, fun(FDI, Acc) -> + DocId = FDI#full_doc_info.id, + ExpectedRange = PickFun(DocId, Ranges, undefined), + ?assertEqual(ExpectedRange, Range), + {ok, Acc + 1} + end, 0), + ?assertEqual(DocsPerRange, DocsInShard), + ok = couch_db:close(Db), + ok = file:delete(FilePath) + end, TMap) + end)}. + + +should_fail_on_missing_source(_DbName) -> + DbName = ?tempdb(), + Ranges = make_ranges(2), + TMap = make_targets(Ranges), + Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3), + ?_assertEqual({error, missing_source}, Response). + + +should_fail_on_existing_target(DbName) -> + Ranges = make_ranges(2), + TMap = maps:map(fun(_, _) -> DbName end, make_targets(Ranges)), + Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3), + ?_assertMatch({error, {target_create_error, DbName, eexist}}, Response). + + +should_fail_on_invalid_target_name(DbName) -> + Ranges = make_ranges(2), + TMap = maps:map(fun([B, _], _) -> + iolist_to_binary(["_$", couch_util:to_hex(<>)]) + end, make_targets(Ranges)), + Expect = {error, {target_create_error, <<"_$00000000">>, + {illegal_database_name, <<"_$00000000">>}}}, + Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3), + ?_assertMatch(Expect, Response). + + +should_crash_on_invalid_tmap(DbName) -> + Ranges = make_ranges(1), + TMap = make_targets(Ranges), + ?_assertError(function_clause, + couch_db_split:split(DbName, TMap, fun fake_pickfun/3)). + + +copy_local_docs_test_() -> + Cases = [ + {"Should work with no docs", 0, 2}, + {"Should copy local docs after split in two", 100, 2}, + {"Should copy local docs after split in three", 99, 3}, + {"Should copy local docs after split in four", 100, 4} + ], + { + setup, + fun test_util:start_couch/0, fun test_util:stop/1, + [ + { + foreachx, + fun(_) -> setup() end, fun(_, St) -> teardown(St) end, + [{Case, fun should_copy_local_docs/2} || Case <- Cases] + }, + {"Should return error on missing source", + fun should_fail_copy_local_on_missing_source/0} + ] + }. + + +should_copy_local_docs({Desc, TotalDocs, Q}, DbName) -> + {ok, ExpectSeq} = create_docs(DbName, TotalDocs), + Ranges = make_ranges(Q), + TMap = make_targets(Ranges), + DocsPerRange = TotalDocs div Q, + PickFun = make_pickfun(DocsPerRange), + {Desc, ?_test(begin + {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun), + ?assertEqual(ExpectSeq, UpdateSeq), + Response = couch_db_split:copy_local_docs(DbName, TMap, PickFun), + ?assertEqual(ok, Response), + maps:map(fun(Range, Name) -> + {ok, Db} = couch_db:open_int(Name, []), + FilePath = couch_db:get_filepath(Db), + %% target shard has all the expected in its range docs + {ok, DocsInShard} = couch_db:fold_local_docs(Db, fun(Doc, Acc) -> + DocId = Doc#doc.id, + ExpectedRange = PickFun(DocId, Ranges, undefined), + ?assertEqual(ExpectedRange, Range), + {ok, Acc + 1} + end, 0, []), + ?assertEqual(DocsPerRange, DocsInShard), + ok = couch_db:close(Db), + ok = file:delete(FilePath) + end, TMap) + end)}. + + +should_fail_copy_local_on_missing_source() -> + DbName = ?tempdb(), + Ranges = make_ranges(2), + TMap = make_targets(Ranges), + PickFun = fun fake_pickfun/3, + Response = couch_db_split:copy_local_docs(DbName, TMap, PickFun), + ?assertEqual({error, missing_source}, Response). + + +cleanup_target_test_() -> + { + setup, + fun test_util:start_couch/0, fun test_util:stop/1, + [ + { + setup, + fun setup/0, fun teardown/1, + fun should_delete_existing_targets/1 + }, + {"Should return error on missing source", + fun should_fail_cleanup_target_on_missing_source/0} + ] + }. + + +should_delete_existing_targets(SourceName) -> + {ok, ExpectSeq} = create_docs(SourceName, 100), + Ranges = make_ranges(2), + TMap = make_targets(Ranges), + PickFun = make_pickfun(50), + ?_test(begin + {ok, UpdateSeq} = couch_db_split:split(SourceName, TMap, PickFun), + ?assertEqual(ExpectSeq, UpdateSeq), + maps:map(fun(_Range, TargetName) -> + FilePath = couch_util:with_db(TargetName, fun(Db) -> + couch_db:get_filepath(Db) + end), + ?assertMatch({ok, _}, file:read_file_info(FilePath)), + Response = couch_db_split:cleanup_target(SourceName, TargetName), + ?assertEqual(ok, Response), + ?assertEqual({error, enoent}, file:read_file_info(FilePath)) + end, TMap) + end). + + +should_fail_cleanup_target_on_missing_source() -> + SourceName = ?tempdb(), + TargetName = ?tempdb(), + Response = couch_db_split:cleanup_target(SourceName, TargetName), + ?assertEqual({error, missing_source}, Response). + + +make_pickfun(DocsPerRange) -> + fun(DocId, Ranges, _HashFun) -> + Id = docid_to_integer(DocId), + case {Id div DocsPerRange, Id rem DocsPerRange} of + {N, 0} -> + lists:nth(N, Ranges); + {N, _} -> + lists:nth(N + 1, Ranges) + end + end. + + +fake_pickfun(_, Ranges, _) -> + hd(Ranges). + + +make_targets([]) -> + maps:new(); +make_targets(Ranges) -> + Targets = lists:map(fun(Range) -> + {Range, ?tempdb()} + end, Ranges), + maps:from_list(Targets). + + +make_ranges(Q) when Q > 0 -> + Incr = (2 bsl 31) div Q, + lists:map(fun + (End) when End >= ?RINGTOP - 1 -> + [End - Incr, ?RINGTOP - 1]; + (End) -> + [End - Incr, End - 1] + end, lists:seq(Incr, ?RINGTOP, Incr)); +make_ranges(_) -> + []. + + +create_docs(DbName, 0) -> + couch_util:with_db(DbName, fun(Db) -> + UpdateSeq = couch_db:get_update_seq(Db), + {ok, UpdateSeq} + end); +create_docs(DbName, DocNum) -> + Docs = lists:foldl(fun(I, Acc) -> + [create_doc(I), create_local_doc(I) | Acc] + end, [], lists:seq(DocNum, 1, -1)), + couch_util:with_db(DbName, fun(Db) -> + {ok, _Result} = couch_db:update_docs(Db, Docs), + {ok, _StartTime} = couch_db:ensure_full_commit(Db), + {ok, Db1} = couch_db:reopen(Db), + UpdateSeq = couch_db:get_update_seq(Db1), + {ok, UpdateSeq} + end). + + +create_doc(I) -> + create_prefix_id_doc(I, ""). + + +create_local_doc(I) -> + create_prefix_id_doc(I, "_local/"). + + +create_prefix_id_doc(I, Prefix) -> + Id = iolist_to_binary(io_lib:format(Prefix ++ "~3..0B", [I])), + couch_doc:from_json_obj({[{<<"_id">>, Id}, {<<"value">>, I}]}). + + +docid_to_integer(<<"_local/", DocId/binary>>) -> + docid_to_integer(DocId); +docid_to_integer(DocId) -> + list_to_integer(binary_to_list(DocId)). diff --git a/src/couch/test/eunit/couch_db_tests.erl b/src/couch/test/eunit/couch_db_tests.erl new file mode 100644 index 000000000..d64f7c640 --- /dev/null +++ b/src/couch/test/eunit/couch_db_tests.erl @@ -0,0 +1,171 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_db_tests). + +-include_lib("couch/include/couch_eunit.hrl"). + +-define(TIMEOUT, 120). + + + +create_delete_db_test_()-> + { + "Database create/delete tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun() -> ?tempdb() end, + [ + fun should_create_db/1, + fun should_delete_db/1 + ] + } + } + }. + +create_delete_multiple_dbs_test_()-> + { + "Multiple database create/delete tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun() -> [?tempdb() || _ <- lists:seq(1, 6)] end, + [ + fun should_create_multiple_dbs/1, + fun should_delete_multiple_dbs/1 + ] + } + } + }. + +create_delete_database_continuously_test_() -> + { + "Continious database create/delete tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreachx, + fun(_) -> ?tempdb() end, + [ + {10, fun should_create_delete_database_continuously/2}, + {100, fun should_create_delete_database_continuously/2} + ] + } + } + }. + +open_db_test_()-> + { + "Database open tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun() -> ?tempdb() end, + [ + fun should_create_db_if_missing/1, + fun should_open_db_if_exists/1 + ] + } + } + }. + + +should_create_db(DbName) -> + ?_test(begin + {ok, Before} = couch_server:all_databases(), + ?assertNot(lists:member(DbName, Before)), + ?assert(create_db(DbName)), + {ok, After} = couch_server:all_databases(), + ?assert(lists:member(DbName, After)) + end). + +should_delete_db(DbName) -> + ?_test(begin + ?assert(create_db(DbName)), + {ok, Before} = couch_server:all_databases(), + ?assert(lists:member(DbName, Before)), + couch_server:delete(DbName, []), + {ok, After} = couch_server:all_databases(), + ?assertNot(lists:member(DbName, After)) + end). + +should_create_multiple_dbs(DbNames) -> + ?_test(begin + gen_server:call(couch_server, {set_max_dbs_open, 3}), + {ok, Before} = couch_server:all_databases(), + [?assertNot(lists:member(DbName, Before)) || DbName <- DbNames], + [?assert(create_db(DbName)) || DbName <- DbNames], + {ok, After} = couch_server:all_databases(), + [?assert(lists:member(DbName, After)) || DbName <- DbNames] + end). + +should_delete_multiple_dbs(DbNames) -> + ?_test(begin + [?assert(create_db(DbName)) || DbName <- DbNames], + {ok, Before} = couch_server:all_databases(), + [?assert(lists:member(DbName, Before)) || DbName <- DbNames], + [?assert(delete_db(DbName)) || DbName <- DbNames], + {ok, After} = couch_server:all_databases(), + [?assertNot(lists:member(DbName, After)) || DbName <- DbNames] + end). + +should_create_delete_database_continuously(Times, DbName) -> + {lists:flatten(io_lib:format("~b times", [Times])), + {timeout, ?TIMEOUT, ?_test(begin + ?assert(create_db(DbName)), + lists:foreach(fun(_) -> + ?assert(delete_db(DbName)), + ?assert(create_db(DbName)) + end, lists:seq(1, Times)) + end)}}. + +should_create_db_if_missing(DbName) -> + ?_test(begin + {ok, Before} = couch_server:all_databases(), + ?assertNot(lists:member(DbName, Before)), + {ok, Db} = couch_db:open(DbName, [{create_if_missing, true}]), + ok = couch_db:close(Db), + {ok, After} = couch_server:all_databases(), + ?assert(lists:member(DbName, After)) + end). + +should_open_db_if_exists(DbName) -> + ?_test(begin + ?assert(create_db(DbName)), + {ok, Before} = couch_server:all_databases(), + ?assert(lists:member(DbName, Before)), + {ok, Db} = couch_db:open(DbName, [{create_if_missing, true}]), + ok = couch_db:close(Db), + {ok, After} = couch_server:all_databases(), + ?assert(lists:member(DbName, After)) + end). + + +create_db(DbName) -> + create_db(DbName, []). + +create_db(DbName, Opts) -> + {ok, Db} = couch_db:create(DbName, Opts), + ok = couch_db:close(Db), + true. + +delete_db(DbName) -> + ok = couch_server:delete(DbName, []), + true. diff --git a/src/couch/test/eunit/couch_doc_json_tests.erl b/src/couch/test/eunit/couch_doc_json_tests.erl new file mode 100644 index 000000000..51f228900 --- /dev/null +++ b/src/couch/test/eunit/couch_doc_json_tests.erl @@ -0,0 +1,493 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_doc_json_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + + +setup() -> + mock(couch_log), + mock(config), + mock(couch_db_plugin), + ok. + +teardown(_) -> + meck:unload(couch_log), + meck:unload(config), + meck:unload(couch_db_plugin), + ok. + +mock(couch_db_plugin) -> + ok = meck:new(couch_db_plugin, [passthrough]), + ok = meck:expect(couch_db_plugin, validate_docid, fun(_) -> false end), + ok; +mock(couch_log) -> + ok = meck:new(couch_log, [passthrough]), + ok = meck:expect(couch_log, debug, fun(_, _) -> ok end), + ok; +mock(config) -> + meck:new(config, [passthrough]), + meck:expect(config, get_integer, + fun("couchdb", "max_document_size", 4294967296) -> 1024 end), + meck:expect(config, get, fun(_, _) -> undefined end), + meck:expect(config, get, fun(_, _, Default) -> Default end), + ok. + + +json_doc_test_() -> + { + setup, + fun setup/0, fun teardown/1, + fun(_) -> + [{"Document from JSON", [ + from_json_with_dbname_error_cases(), + from_json_with_db_name_success_cases(), + from_json_success_cases(), + from_json_error_cases() + ]}, + {"Document to JSON", [ + to_json_success_cases() + ]}] + end + }. + +from_json_success_cases() -> + Cases = [ + { + {[]}, + #doc{}, + "Return an empty document for an empty JSON object." + }, + { + {[{<<"_id">>, <<"zing!">>}]}, + #doc{id = <<"zing!">>}, + "Parses document ids." + }, + { + {[{<<"_id">>, <<"_design/foo">>}]}, + #doc{id = <<"_design/foo">>}, + "_design/document ids." + }, + { + {[{<<"_id">>, <<"_local/bam">>}]}, + #doc{id = <<"_local/bam">>}, + "_local/document ids." + }, + { + {[{<<"_rev">>, <<"4-230234">>}]}, + #doc{revs = {4, [<<"230234">>]}}, + "_rev stored in revs." + }, + { + {[{<<"soap">>, 35}]}, + #doc{body = {[{<<"soap">>, 35}]}}, + "Non underscore prefixed fields stored in body." + }, + { + {[{<<"_attachments">>, {[ + {<<"my_attachment.fu">>, {[ + {<<"stub">>, true}, + {<<"content_type">>, <<"application/awesome">>}, + {<<"length">>, 45} + ]}}, + {<<"noahs_private_key.gpg">>, {[ + {<<"data">>, <<"SSBoYXZlIGEgcGV0IGZpc2gh">>}, + {<<"content_type">>, <<"application/pgp-signature">>} + ]}} + ]}}]}, + #doc{atts = [ + couch_att:new([ + {name, <<"my_attachment.fu">>}, + {data, stub}, + {type, <<"application/awesome">>}, + {att_len, 45}, + {disk_len, 45}, + {revpos, undefined} + ]), + couch_att:new([ + {name, <<"noahs_private_key.gpg">>}, + {data, <<"I have a pet fish!">>}, + {type, <<"application/pgp-signature">>}, + {att_len, 18}, + {disk_len, 18}, + {revpos, 0} + ]) + ]}, + "Attachments are parsed correctly." + }, + { + {[{<<"_deleted">>, true}]}, + #doc{deleted = true}, + "_deleted controls the deleted field." + }, + { + {[{<<"_deleted">>, false}]}, + #doc{}, + "{\"_deleted\": false} is ok." + }, + { + {[ + {<<"_revisions">>, + {[{<<"start">>, 4}, + {<<"ids">>, [<<"foo1">>, <<"phi3">>, <<"omega">>]}]}}, + {<<"_rev">>, <<"6-something">>} + ]}, + #doc{revs = {4, [<<"foo1">>, <<"phi3">>, <<"omega">>]}}, + "_revisions attribute are preferred to _rev." + }, + { + {[{<<"_revs_info">>, dropping}]}, + #doc{}, + "Drops _revs_info." + }, + { + {[{<<"_local_seq">>, dropping}]}, + #doc{}, + "Drops _local_seq." + }, + { + {[{<<"_conflicts">>, dropping}]}, + #doc{}, + "Drops _conflicts." + }, + { + {[{<<"_deleted_conflicts">>, dropping}]}, + #doc{}, + "Drops _deleted_conflicts." + } + ], + lists:map( + fun({EJson, Expect, Msg}) -> + {Msg, ?_assertMatch(Expect, couch_doc:from_json_obj_validate(EJson))} + end, + Cases). + +from_json_with_db_name_success_cases() -> + Cases = [ + { + {[]}, + <<"_dbs">>, + #doc{}, + "DbName _dbs is acceptable with no docid" + }, + { + {[{<<"_id">>, <<"zing!">>}]}, + <<"_dbs">>, + #doc{id = <<"zing!">>}, + "DbName _dbs is acceptable with a normal docid" + }, + { + {[{<<"_id">>, <<"_users">>}]}, + <<"_dbs">>, + #doc{id = <<"_users">>}, + "_dbs/_users is acceptable" + }, + { + {[{<<"_id">>, <<"_replicator">>}]}, + <<"_dbs">>, + #doc{id = <<"_replicator">>}, + "_dbs/_replicator is acceptable" + }, + { + {[{<<"_id">>, <<"_global_changes">>}]}, + <<"_dbs">>, + #doc{id = <<"_global_changes">>}, + "_dbs/_global_changes is acceptable" + } + ], + lists:map( + fun({EJson, DbName, Expect, Msg}) -> + {Msg, ?_assertMatch(Expect, couch_doc:from_json_obj_validate(EJson, DbName))} + end, + Cases). + +from_json_error_cases() -> + Cases = [ + { + [], + {bad_request, "Document must be a JSON object"}, + "arrays are invalid" + }, + { + 4, + {bad_request, "Document must be a JSON object"}, + "integers are invalid" + }, + { + true, + {bad_request, "Document must be a JSON object"}, + "literals are invalid" + }, + { + {[{<<"_id">>, {[{<<"foo">>, 5}]}}]}, + {illegal_docid, <<"Document id must be a string">>}, + "Document id must be a string." + }, + { + {[{<<"_id">>, <<"_random">>}]}, + {illegal_docid, + <<"Only reserved document ids may start with underscore.">>}, + "Disallow arbitrary underscore prefixed docids." + }, + { + {[{<<"_rev">>, 5}]}, + {bad_request, <<"Invalid rev format">>}, + "_rev must be a string" + }, + { + {[{<<"_rev">>, "foobar"}]}, + {bad_request, <<"Invalid rev format">>}, + "_rev must be %d-%s" + }, + { + {[{<<"_rev">>, "foo-bar"}]}, + "Error if _rev's integer expection is broken." + }, + { + {[{<<"_revisions">>, {[{<<"start">>, true}]}}]}, + {doc_validation, "_revisions.start isn't an integer."}, + "_revisions.start must be an integer." + }, + { + {[{<<"_revisions">>, {[{<<"start">>, 0}, {<<"ids">>, 5}]}}]}, + {doc_validation, "_revisions.ids isn't a array."}, + "_revions.ids must be a list." + }, + { + {[{<<"_revisions">>, {[{<<"start">>, 0}, {<<"ids">>, [5]}]}}]}, + {doc_validation, "RevId isn't a string"}, + "Revision ids must be strings." + }, + { + {[{<<"_revisions">>, {[{<<"start">>, 0}, + {<<"ids">>, [<<"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx">>]}]}}]}, + {doc_validation, "RevId isn't a valid hexadecimal"}, + "Revision ids must be a valid hex." + }, + { + {[{<<"_something">>, 5}]}, + {doc_validation, <<"Bad special document member: _something">>}, + "Underscore prefix fields are reserved." + }, + { + fun() -> + {[ + {<<"_id">>, <<"large_doc">>}, + {<<"x">> , << <<"x">> || _ <- lists:seq(1,1025) >>} + ]} + end, + {request_entity_too_large, <<"large_doc">>}, + "Document too large." + } + ], + + lists:map(fun + ({Fun, Expect, Msg}) when is_function(Fun, 0) -> + {Msg, + ?_assertThrow(Expect, couch_doc:from_json_obj_validate(Fun()))}; + ({EJson, Expect, Msg}) -> + {Msg, + ?_assertThrow(Expect, couch_doc:from_json_obj_validate(EJson))}; + ({EJson, Msg}) -> + {Msg, + ?_assertThrow(_, couch_doc:from_json_obj_validate(EJson))} + end, Cases). + +from_json_with_dbname_error_cases() -> + Cases = [ + { + {[{<<"_id">>, <<"_random">>}]}, + <<"_dbs">>, + {illegal_docid, + <<"Only reserved document ids may start with underscore.">>}, + "Disallow non-system-DB underscore prefixed docids in _dbs database." + }, + { + {[{<<"_id">>, <<"_random">>}]}, + <<"foobar">>, + {illegal_docid, + <<"Only reserved document ids may start with underscore.">>}, + "Disallow arbitrary underscore prefixed docids in regular database." + }, + { + {[{<<"_id">>, <<"_users">>}]}, + <<"foobar">>, + {illegal_docid, + <<"Only reserved document ids may start with underscore.">>}, + "Disallow system-DB docid _users in regular database." + } + ], + + lists:map( + fun({EJson, DbName, Expect, Msg}) -> + Error = (catch couch_doc:from_json_obj_validate(EJson, DbName)), + {Msg, ?_assertMatch(Expect, Error)} + end, + Cases). + +to_json_success_cases() -> + Cases = [ + { + #doc{}, + {[{<<"_id">>, <<"">>}]}, + "Empty docs are {\"_id\": \"\"}" + }, + { + #doc{id = <<"foo">>}, + {[{<<"_id">>, <<"foo">>}]}, + "_id is added." + }, + { + #doc{revs = {5, ["foo"]}}, + {[{<<"_id">>, <<>>}, {<<"_rev">>, <<"5-foo">>}]}, + "_rev is added." + }, + { + [revs], + #doc{revs = {5, [<<"first">>, <<"second">>]}}, + {[ + {<<"_id">>, <<>>}, + {<<"_rev">>, <<"5-first">>}, + {<<"_revisions">>, {[ + {<<"start">>, 5}, + {<<"ids">>, [<<"first">>, <<"second">>]} + ]}} + ]}, + "_revisions include with revs option" + }, + { + #doc{body = {[{<<"foo">>, <<"bar">>}]}}, + {[{<<"_id">>, <<>>}, {<<"foo">>, <<"bar">>}]}, + "Arbitrary fields are added." + }, + { + #doc{deleted = true, body = {[{<<"foo">>, <<"bar">>}]}}, + {[{<<"_id">>, <<>>}, {<<"foo">>, <<"bar">>}, {<<"_deleted">>, true}]}, + "Deleted docs no longer drop body members." + }, + { + #doc{meta = [ + {revs_info, 4, [{<<"fin">>, deleted}, {<<"zim">>, missing}]} + ]}, + {[ + {<<"_id">>, <<>>}, + {<<"_revs_info">>, [ + {[{<<"rev">>, <<"4-fin">>}, {<<"status">>, <<"deleted">>}]}, + {[{<<"rev">>, <<"3-zim">>}, {<<"status">>, <<"missing">>}]} + ]} + ]}, + "_revs_info field is added correctly." + }, + { + #doc{meta = [{local_seq, 5}]}, + {[{<<"_id">>, <<>>}, {<<"_local_seq">>, 5}]}, + "_local_seq is added as an integer." + }, + { + #doc{meta = [{conflicts, [{3, <<"yep">>}, {1, <<"snow">>}]}]}, + {[ + {<<"_id">>, <<>>}, + {<<"_conflicts">>, [<<"3-yep">>, <<"1-snow">>]} + ]}, + "_conflicts is added as an array of strings." + }, + { + #doc{meta = [{deleted_conflicts, [{10923, <<"big_cowboy_hat">>}]}]}, + {[ + {<<"_id">>, <<>>}, + {<<"_deleted_conflicts">>, [<<"10923-big_cowboy_hat">>]} + ]}, + "_deleted_conflicsts is added as an array of strings." + }, + { + #doc{atts = [ + couch_att:new([ + {name, <<"big.xml">>}, + {type, <<"xml/sucks">>}, + {data, fun() -> ok end}, + {revpos, 1}, + {att_len, 400}, + {disk_len, 400} + ]), + couch_att:new([ + {name, <<"fast.json">>}, + {type, <<"json/ftw">>}, + {data, <<"{\"so\": \"there!\"}">>}, + {revpos, 1}, + {att_len, 16}, + {disk_len, 16} + ]) + ]}, + {[ + {<<"_id">>, <<>>}, + {<<"_attachments">>, {[ + {<<"big.xml">>, {[ + {<<"content_type">>, <<"xml/sucks">>}, + {<<"revpos">>, 1}, + {<<"length">>, 400}, + {<<"stub">>, true} + ]}}, + {<<"fast.json">>, {[ + {<<"content_type">>, <<"json/ftw">>}, + {<<"revpos">>, 1}, + {<<"length">>, 16}, + {<<"stub">>, true} + ]}} + ]}} + ]}, + "Attachments attached as stubs only include a length." + }, + { + [attachments], + #doc{atts = [ + couch_att:new([ + {name, <<"stuff.txt">>}, + {type, <<"text/plain">>}, + {data, fun() -> <<"diet pepsi">> end}, + {revpos, 1}, + {att_len, 10}, + {disk_len, 10} + ]), + couch_att:new([ + {name, <<"food.now">>}, + {type, <<"application/food">>}, + {revpos, 1}, + {data, <<"sammich">>} + ]) + ]}, + {[ + {<<"_id">>, <<>>}, + {<<"_attachments">>, {[ + {<<"stuff.txt">>, {[ + {<<"content_type">>, <<"text/plain">>}, + {<<"revpos">>, 1}, + {<<"data">>, <<"ZGlldCBwZXBzaQ==">>} + ]}}, + {<<"food.now">>, {[ + {<<"content_type">>, <<"application/food">>}, + {<<"revpos">>, 1}, + {<<"data">>, <<"c2FtbWljaA==">>} + ]}} + ]}} + ]}, + "Attachments included inline with attachments option." + } + ], + + lists:map(fun + ({Doc, EJson, Msg}) -> + {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, []))}; + ({Options, Doc, EJson, Msg}) -> + {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, Options))} + end, Cases). diff --git a/src/couch/test/eunit/couch_doc_tests.erl b/src/couch/test/eunit/couch_doc_tests.erl new file mode 100644 index 000000000..cf41df61d --- /dev/null +++ b/src/couch/test/eunit/couch_doc_tests.erl @@ -0,0 +1,145 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_doc_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + + +-define(REQUEST_FIXTURE, + filename:join([?FIXTURESDIR, "multipart.http"])). + +parse_rev_test() -> + ?assertEqual({1, <<"123">>}, couch_doc:parse_rev("1-123")), + ?assertEqual({1, <<"123">>}, couch_doc:parse_rev(<<"1-123">>)), + ?assertException(throw, {bad_request, _}, couch_doc:parse_rev("1f-123")), + ?assertException(throw, {bad_request, _}, couch_doc:parse_rev("bar")). + +doc_from_multi_part_stream_test() -> + ContentType = "multipart/related;boundary=multipart_related_boundary~~~~~~~~~~~~~~~~~~~~", + DataFun = fun() -> request(start) end, + + mock_config(), + {ok, #doc{id = <<"doc0">>, atts = [_]}, _Fun, _Parser} = + couch_doc:doc_from_multi_part_stream(ContentType, DataFun), + meck:unload(config), + ok. + +doc_to_multi_part_stream_test() -> + Boundary = <<"multipart_related_boundary~~~~~~~~~~~~~~~~~~~~">>, + JsonBytes = <<"{\n \"_id\": \"our document goes here\"\n}\n\n">>, + AttData = <<"Hello my important document">>, + AttLength = size(AttData), + Atts = [couch_att:new([ + {name, <<"test">>}, {data, AttData}, {type, <<"text/plain">>}, + {att_len, AttLength}, {disk_len, AttLength}])], + couch_doc:doc_to_multi_part_stream(Boundary, JsonBytes, Atts, fun send/1, true), + AttLengthStr = integer_to_binary(AttLength), + BoundaryLen = size(Boundary), + [ + <<"--", Boundary/binary>>, + <<"Content-Type: application/json">>, + <<>>, + JsonBytes, + <<"--", Boundary/binary>>, + <<"Content-Disposition: attachment; filename=\"test\"">>, + <<"Content-Type: text/plain">>, + <<"Content-Length: ", AttLengthStr/binary>>, + <<>>, + AttData, + <<"--", Boundary:BoundaryLen/binary, "--">> + ] = collected(), + ok. + +len_doc_to_multi_part_stream_test() -> + Boundary = <<"simple_boundary">>, + JsonBytes = <<"{\n \"_id\": \"our document goes here\"\n}\n\n">>, + ContentType = <<"multipart/related; boundary=\"", Boundary/binary, "\"">>, + AttData = <<"Hello my important document">>, + AttLength = size(AttData), + Atts = [couch_att:new([ + {name, <<"test">>}, {data, AttData}, {type, <<"text/plain">>}, + {att_len, AttLength}, {disk_len, AttLength}])], + {ContentType, 258} = %% 258 is expected size of the document + couch_doc:len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, true), + ok. + +validate_docid_test_() -> + {setup, + fun() -> + mock_config(), + ok = meck:new(couch_db_plugin, [passthrough]), + meck:expect(couch_db_plugin, validate_docid, fun(_) -> false end) + end, + fun(_) -> + meck:unload(config), + meck:unload(couch_db_plugin) + end, + [ + ?_assertEqual(ok, couch_doc:validate_docid(<<"idx">>)), + ?_assertEqual(ok, couch_doc:validate_docid(<<"_design/idx">>)), + ?_assertEqual(ok, couch_doc:validate_docid(<<"_local/idx">>)), + ?_assertEqual(ok, couch_doc:validate_docid(large_id(1024))), + ?_assertEqual(ok, couch_doc:validate_docid(<<"_users">>, <<"_dbs">>)), + ?_assertEqual(ok, couch_doc:validate_docid(<<"_replicator">>, <<"_dbs">>)), + ?_assertEqual(ok, couch_doc:validate_docid(<<"_global_changes">>, <<"_dbs">>)), + ?_assertThrow({illegal_docid, _}, + couch_doc:validate_docid(<<>>)), + ?_assertThrow({illegal_docid, _}, + couch_doc:validate_docid(<<16#80>>)), + ?_assertThrow({illegal_docid, _}, + couch_doc:validate_docid(<<"_idx">>)), + ?_assertThrow({illegal_docid, _}, + couch_doc:validate_docid(<<"_">>)), + ?_assertThrow({illegal_docid, _}, + couch_doc:validate_docid(<<"_design/">>)), + ?_assertThrow({illegal_docid, _}, + couch_doc:validate_docid(<<"_local/">>)), + ?_assertThrow({illegal_docid, _}, + couch_doc:validate_docid(large_id(1025))), + ?_assertThrow({illegal_docid, _}, + couch_doc:validate_docid(<<"_users">>, <<"foo">>)), + ?_assertThrow({illegal_docid, _}, + couch_doc:validate_docid(<<"_weeee">>, <<"_dbs">>)) + ] + }. + +large_id(N) -> + << <<"x">> || _ <- lists:seq(1, N) >>. + +request(start) -> + {ok, Doc} = file:read_file(?REQUEST_FIXTURE), + {Doc, fun() -> request(stop) end}; +request(stop) -> + {"", fun() -> request(stop) end}. + +send(Data) -> + send(Data, get(data)). +send(Data, undefined) -> + send(Data, []); +send(Data, Acc) -> + put(data, [Acc|Data]). + +collected() -> + B = binary:replace(iolist_to_binary(get(data)), <<"\r\n">>, <<0>>, [global]), + binary:split(B, [<<0>>], [global]). + +mock_config() -> + ok = meck:new(config, [passthrough]), + meck:expect(config, get, + fun("couchdb", "max_document_id_length", "infinity") -> "1024"; + ("couchdb", "max_attachment_size", "infinity") -> "infinity"; + ("mem3", "shards_db", "_dbs") -> "_dbs"; + (Key, Val, Default) -> meck:passthrough([Key, Val, Default]) + end + ). diff --git a/src/couch/test/eunit/couch_ejson_size_tests.erl b/src/couch/test/eunit/couch_ejson_size_tests.erl new file mode 100644 index 000000000..df9168ed1 --- /dev/null +++ b/src/couch/test/eunit/couch_ejson_size_tests.erl @@ -0,0 +1,72 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_ejson_size_tests). + +-include_lib("eunit/include/eunit.hrl"). + +-define(HWAIR, $\x{10348}). % 4 byte utf8 encoding +-define(EURO, $\x{20ac}). % 3 byte utf8 encoding +-define(CENT, $\x{a2}). % 2 byte utf8 encoding + + +ejson_size_test_() -> + [?_assertEqual(R, couch_ejson_size:encoded_size(Input)) || {R, Input} <- [ + {1, 1}, {1, 1}, {2, -1}, {1, 9}, {2, 10}, {3, -10}, + {2, 11}, {2, 99}, {3, 100}, {3, 999}, {4, 1000}, {4, 9999}, + {5, 10000}, + + {3, 0.0}, {3, 0.1}, {3, 1.0}, {4, -1.0}, {3, 1.0e9}, + {4, 1.0e10}, {5, 1.0e-10}, {5, 1.0e-99}, {6, 1.0e-100}, {3, 1.0e-323}, + + {2, arr_nested(0)}, {22, arr_nested(10)}, {2002, arr_nested(1000)}, + {9, obj_nested(0)}, {69, obj_nested(10)}, {6009, obj_nested(1000)}, + + {4, null}, {4, true}, {5, false}, + + {3, str(1, $x)}, {4, str(1, ?CENT)}, {5, str(1, ?EURO)}, + {6, str(1, ?HWAIR)}, {3, str(1, $\x{1})}, {12, str(10, $x)}, + {22, str(10, ?CENT)}, {32, str(10, ?EURO)}, {42, str(10, ?HWAIR)}, + {12, str(10, $\x{1})} + ]]. + + +%% Helper functions + +arr_nested(MaxDepth) -> + arr_nested(MaxDepth, 0). + + +obj_nested(MaxDepth) -> + obj_nested(MaxDepth, 0). + + +obj(N, K, V) -> + {[{K, V} || _ <- lists:seq(1, N)]}. + + +str(N, C) -> + unicode:characters_to_binary([C || _ <- lists:seq(1, N)]). + + +arr_nested(MaxDepth, MaxDepth) -> + []; + +arr_nested(MaxDepth, Depth) -> + [arr_nested(MaxDepth, Depth + 1)]. + + +obj_nested(MaxDepth, MaxDepth) -> + obj(1, <<"k">>, <<"v">>); + +obj_nested(MaxDepth, Depth) -> + {[{<<"k">>, obj_nested(MaxDepth, Depth + 1)}]}. diff --git a/src/couch/test/eunit/couch_etag_tests.erl b/src/couch/test/eunit/couch_etag_tests.erl new file mode 100644 index 000000000..9d15e483f --- /dev/null +++ b/src/couch/test/eunit/couch_etag_tests.erl @@ -0,0 +1,30 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_etag_tests). + +-include_lib("eunit/include/eunit.hrl"). + +local_with_empty_body_test() -> + Etag = couch_httpd:doc_etag(<<"_local/local-and-empty">>, {[]}, {0, <<"1">>}), + ?assertEqual(Etag, <<"\"5ZVXQYO7VLEOU0TL9VXDNP5PV\"">>). + + +local_with_body_test() -> + DocBody = {[{<<"hello">>,<<"world">>},{<<"relax">>,true}]}, + Etag = couch_httpd:doc_etag(<<"_local/local-with-body">>, DocBody, {0, <<"1">>}), + ?assertEqual(Etag, <<"\"CEFXP6WH8OKYIWO1GLGBHKCCA\"">>). + +normal_doc_uses_rev_test() -> + DocBody = {[{<<"hello">>,<<"world">>},{<<"relax">>,true}]}, + Etag = couch_httpd:doc_etag(<<"nomal-doc">>, DocBody, {1, <<"efda11e34e88ebe31a2f83e84a0435b6">>}), + ?assertEqual(Etag, <<"\"1-efda11e34e88ebe31a2f83e84a0435b6\"">>). diff --git a/src/couch/test/eunit/couch_file_tests.erl b/src/couch/test/eunit/couch_file_tests.erl new file mode 100644 index 000000000..e9806c09a --- /dev/null +++ b/src/couch/test/eunit/couch_file_tests.erl @@ -0,0 +1,533 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_file_tests). + +-include_lib("couch/include/couch_eunit.hrl"). + +-define(BLOCK_SIZE, 4096). +-define(setup(F), {setup, fun setup/0, fun teardown/1, F}). +-define(foreach(Fs), {foreach, fun setup/0, fun teardown/1, Fs}). + + +setup() -> + {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]), + Fd. + +teardown(Fd) -> + case is_process_alive(Fd) of + true -> ok = couch_file:close(Fd); + false -> ok + end. + +open_close_test_() -> + { + "Test for proper file open and close", + { + setup, + fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, + [ + should_return_enoent_if_missed(), + should_ignore_invalid_flags_with_open(), + ?setup(fun should_return_pid_on_file_open/1), + should_close_file_properly(), + ?setup(fun should_create_empty_new_files/1) + ] + } + }. + +should_return_enoent_if_missed() -> + ?_assertEqual({error, enoent}, couch_file:open("not a real file")). + +should_ignore_invalid_flags_with_open() -> + ?_assertMatch({ok, _}, + couch_file:open(?tempfile(), [create, invalid_option])). + +should_return_pid_on_file_open(Fd) -> + ?_assert(is_pid(Fd)). + +should_close_file_properly() -> + {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]), + ok = couch_file:close(Fd), + ?_assert(true). + +should_create_empty_new_files(Fd) -> + ?_assertMatch({ok, 0}, couch_file:bytes(Fd)). + + +read_write_test_() -> + { + "Common file read/write tests", + { + setup, + fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, + ?foreach([ + fun should_increase_file_size_on_write/1, + fun should_return_current_file_size_on_write/1, + fun should_write_and_read_term/1, + fun should_write_and_read_binary/1, + fun should_write_and_read_large_binary/1, + fun should_return_term_as_binary_for_reading_binary/1, + fun should_read_term_written_as_binary/1, + fun should_read_iolist/1, + fun should_fsync/1, + fun should_not_read_beyond_eof/1, + fun should_truncate/1 + ]) + } + }. + + +should_increase_file_size_on_write(Fd) -> + {ok, 0, _} = couch_file:append_term(Fd, foo), + {ok, Size} = couch_file:bytes(Fd), + ?_assert(Size > 0). + +should_return_current_file_size_on_write(Fd) -> + {ok, 0, _} = couch_file:append_term(Fd, foo), + {ok, Size} = couch_file:bytes(Fd), + ?_assertMatch({ok, Size, _}, couch_file:append_term(Fd, bar)). + +should_write_and_read_term(Fd) -> + {ok, Pos, _} = couch_file:append_term(Fd, foo), + ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, Pos)). + +should_write_and_read_binary(Fd) -> + {ok, Pos, _} = couch_file:append_binary(Fd, <<"fancy!">>), + ?_assertMatch({ok, <<"fancy!">>}, couch_file:pread_binary(Fd, Pos)). + +should_return_term_as_binary_for_reading_binary(Fd) -> + {ok, Pos, _} = couch_file:append_term(Fd, foo), + Foo = couch_compress:compress(foo, snappy), + ?_assertMatch({ok, Foo}, couch_file:pread_binary(Fd, Pos)). + +should_read_term_written_as_binary(Fd) -> + {ok, Pos, _} = couch_file:append_binary(Fd, <<131,100,0,3,102,111,111>>), + ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, Pos)). + +should_write_and_read_large_binary(Fd) -> + BigBin = list_to_binary(lists:duplicate(100000, 0)), + {ok, Pos, _} = couch_file:append_binary(Fd, BigBin), + ?_assertMatch({ok, BigBin}, couch_file:pread_binary(Fd, Pos)). + +should_read_iolist(Fd) -> + %% append_binary == append_iolist? + %% Possible bug in pread_iolist or iolist() -> append_binary + {ok, Pos, _} = couch_file:append_binary(Fd, ["foo", $m, <<"bam">>]), + {ok, IoList} = couch_file:pread_iolist(Fd, Pos), + ?_assertMatch(<<"foombam">>, iolist_to_binary(IoList)). + +should_fsync(Fd) -> + {"How does on test fsync?", ?_assertMatch(ok, couch_file:sync(Fd))}. + +should_not_read_beyond_eof(Fd) -> + BigBin = list_to_binary(lists:duplicate(100000, 0)), + DoubleBin = round(byte_size(BigBin) * 2), + {ok, Pos, _Size} = couch_file:append_binary(Fd, BigBin), + {_, Filepath} = couch_file:process_info(Fd), + %% corrupt db file + {ok, Io} = file:open(Filepath, [read, write, binary]), + ok = file:pwrite(Io, Pos, <<0:1/integer, DoubleBin:31/integer>>), + file:close(Io), + unlink(Fd), + ExpectedError = {badmatch, {'EXIT', {bad_return_value, + {read_beyond_eof, Filepath}}}}, + ?_assertError(ExpectedError, couch_file:pread_binary(Fd, Pos)). + +should_truncate(Fd) -> + {ok, 0, _} = couch_file:append_term(Fd, foo), + {ok, Size} = couch_file:bytes(Fd), + BigBin = list_to_binary(lists:duplicate(100000, 0)), + {ok, _, _} = couch_file:append_binary(Fd, BigBin), + ok = couch_file:truncate(Fd, Size), + ?_assertMatch({ok, foo}, couch_file:pread_term(Fd, 0)). + +pread_limit_test_() -> + { + "Read limit tests", + { + setup, + fun() -> + Ctx = test_util:start(?MODULE), + config:set("couchdb", "max_pread_size", "50000"), + Ctx + end, + fun(Ctx) -> + config:delete("couchdb", "max_pread_size"), + test_util:stop(Ctx) + end, + ?foreach([ + fun should_increase_file_size_on_write/1, + fun should_return_current_file_size_on_write/1, + fun should_write_and_read_term/1, + fun should_write_and_read_binary/1, + fun should_not_read_more_than_pread_limit/1 + ]) + } + }. + +should_not_read_more_than_pread_limit(Fd) -> + {_, Filepath} = couch_file:process_info(Fd), + BigBin = list_to_binary(lists:duplicate(100000, 0)), + {ok, Pos, _Size} = couch_file:append_binary(Fd, BigBin), + unlink(Fd), + ExpectedError = {badmatch, {'EXIT', {bad_return_value, + {exceed_pread_limit, Filepath, 50000}}}}, + ?_assertError(ExpectedError, couch_file:pread_binary(Fd, Pos)). + + +header_test_() -> + { + "File header read/write tests", + { + setup, + fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, + [ + ?foreach([ + fun should_write_and_read_atom_header/1, + fun should_write_and_read_tuple_header/1, + fun should_write_and_read_second_header/1, + fun should_truncate_second_header/1, + fun should_produce_same_file_size_on_rewrite/1, + fun should_save_headers_larger_than_block_size/1 + ]), + should_recover_header_marker_corruption(), + should_recover_header_size_corruption(), + should_recover_header_md5sig_corruption(), + should_recover_header_data_corruption() + ] + } + }. + + +should_write_and_read_atom_header(Fd) -> + ok = couch_file:write_header(Fd, hello), + ?_assertMatch({ok, hello}, couch_file:read_header(Fd)). + +should_write_and_read_tuple_header(Fd) -> + ok = couch_file:write_header(Fd, {<<"some_data">>, 32}), + ?_assertMatch({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd)). + +should_write_and_read_second_header(Fd) -> + ok = couch_file:write_header(Fd, {<<"some_data">>, 32}), + ok = couch_file:write_header(Fd, [foo, <<"more">>]), + ?_assertMatch({ok, [foo, <<"more">>]}, couch_file:read_header(Fd)). + +should_truncate_second_header(Fd) -> + ok = couch_file:write_header(Fd, {<<"some_data">>, 32}), + {ok, Size} = couch_file:bytes(Fd), + ok = couch_file:write_header(Fd, [foo, <<"more">>]), + ok = couch_file:truncate(Fd, Size), + ?_assertMatch({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd)). + +should_produce_same_file_size_on_rewrite(Fd) -> + ok = couch_file:write_header(Fd, {<<"some_data">>, 32}), + {ok, Size1} = couch_file:bytes(Fd), + ok = couch_file:write_header(Fd, [foo, <<"more">>]), + {ok, Size2} = couch_file:bytes(Fd), + ok = couch_file:truncate(Fd, Size1), + ok = couch_file:write_header(Fd, [foo, <<"more">>]), + ?_assertMatch({ok, Size2}, couch_file:bytes(Fd)). + +should_save_headers_larger_than_block_size(Fd) -> + Header = erlang:make_tuple(5000, <<"CouchDB">>), + couch_file:write_header(Fd, Header), + {"COUCHDB-1319", ?_assertMatch({ok, Header}, couch_file:read_header(Fd))}. + + +should_recover_header_marker_corruption() -> + ?_assertMatch( + ok, + check_header_recovery( + fun(CouchFd, RawFd, Expect, HeaderPos) -> + ?assertNotMatch(Expect, couch_file:read_header(CouchFd)), + file:pwrite(RawFd, HeaderPos, <<0>>), + ?assertMatch(Expect, couch_file:read_header(CouchFd)) + end) + ). + +should_recover_header_size_corruption() -> + ?_assertMatch( + ok, + check_header_recovery( + fun(CouchFd, RawFd, Expect, HeaderPos) -> + ?assertNotMatch(Expect, couch_file:read_header(CouchFd)), + % +1 for 0x1 byte marker + file:pwrite(RawFd, HeaderPos + 1, <<10/integer>>), + ?assertMatch(Expect, couch_file:read_header(CouchFd)) + end) + ). + +should_recover_header_md5sig_corruption() -> + ?_assertMatch( + ok, + check_header_recovery( + fun(CouchFd, RawFd, Expect, HeaderPos) -> + ?assertNotMatch(Expect, couch_file:read_header(CouchFd)), + % +5 = +1 for 0x1 byte and +4 for term size. + file:pwrite(RawFd, HeaderPos + 5, <<"F01034F88D320B22">>), + ?assertMatch(Expect, couch_file:read_header(CouchFd)) + end) + ). + +should_recover_header_data_corruption() -> + ?_assertMatch( + ok, + check_header_recovery( + fun(CouchFd, RawFd, Expect, HeaderPos) -> + ?assertNotMatch(Expect, couch_file:read_header(CouchFd)), + % +21 = +1 for 0x1 byte, +4 for term size and +16 for MD5 sig + file:pwrite(RawFd, HeaderPos + 21, <<"some data goes here!">>), + ?assertMatch(Expect, couch_file:read_header(CouchFd)) + end) + ). + + +check_header_recovery(CheckFun) -> + Path = ?tempfile(), + {ok, Fd} = couch_file:open(Path, [create, overwrite]), + {ok, RawFd} = file:open(Path, [read, write, raw, binary]), + + {ok, _} = write_random_data(Fd), + ExpectHeader = {some_atom, <<"a binary">>, 756}, + ok = couch_file:write_header(Fd, ExpectHeader), + + {ok, HeaderPos} = write_random_data(Fd), + ok = couch_file:write_header(Fd, {2342, <<"corruption! greed!">>}), + + CheckFun(Fd, RawFd, {ok, ExpectHeader}, HeaderPos), + + ok = file:close(RawFd), + ok = couch_file:close(Fd), + ok. + +write_random_data(Fd) -> + write_random_data(Fd, 100 + couch_rand:uniform(1000)). + +write_random_data(Fd, 0) -> + {ok, Bytes} = couch_file:bytes(Fd), + {ok, (1 + Bytes div ?BLOCK_SIZE) * ?BLOCK_SIZE}; +write_random_data(Fd, N) -> + Choices = [foo, bar, <<"bizzingle">>, "bank", ["rough", stuff]], + Term = lists:nth(couch_rand:uniform(4) + 1, Choices), + {ok, _, _} = couch_file:append_term(Fd, Term), + write_random_data(Fd, N - 1). + + +delete_test_() -> + { + "File delete tests", + { + foreach, + fun() -> + meck:new(config, [passthrough]), + File = ?tempfile() ++ ".couch", + RootDir = filename:dirname(File), + ok = couch_file:init_delete_dir(RootDir), + ok = file:write_file(File, <<>>), + {RootDir, File} + end, + fun({_, File}) -> + meck:unload(config), + file:delete(File) + end, + [ + fun(Cfg) -> + {"enable_database_recovery = false, context = delete", + make_enable_recovery_test_case(Cfg, false, delete)} + end, + fun(Cfg) -> + {"enable_database_recovery = true, context = delete", + make_enable_recovery_test_case(Cfg, true, delete)} + end, + fun(Cfg) -> + {"enable_database_recovery = false, context = compaction", + make_enable_recovery_test_case(Cfg, false, compaction)} + end, + fun(Cfg) -> + {"enable_database_recovery = true, context = compaction", + make_enable_recovery_test_case(Cfg, true, compaction)} + end, + fun(Cfg) -> + {"delete_after_rename = true", + make_delete_after_rename_test_case(Cfg, true)} + end, + fun(Cfg) -> + {"delete_after_rename = false", + make_delete_after_rename_test_case(Cfg, false)} + end + ] + } + }. + + +make_enable_recovery_test_case({RootDir, File}, EnableRecovery, Context) -> + meck:expect(config, get_boolean, fun + ("couchdb", "enable_database_recovery", _) -> EnableRecovery; + ("couchdb", "delete_after_rename", _) -> false + end), + FileExistsBefore = filelib:is_regular(File), + couch_file:delete(RootDir, File, [{context, Context}]), + FileExistsAfter = filelib:is_regular(File), + RenamedFiles = filelib:wildcard(filename:rootname(File) ++ "*.deleted.*"), + DeletedFiles = filelib:wildcard(RootDir ++ "/.delete/*"), + {ExpectRenamedCount, ExpectDeletedCount} = if + EnableRecovery andalso Context =:= delete -> {1, 0}; + true -> {0, 1} + end, + [ + ?_assert(FileExistsBefore), + ?_assertNot(FileExistsAfter), + ?_assertEqual(ExpectRenamedCount, length(RenamedFiles)), + ?_assertEqual(ExpectDeletedCount, length(DeletedFiles)) + ]. + +make_delete_after_rename_test_case({RootDir, File}, DeleteAfterRename) -> + meck:expect(config, get_boolean, fun + ("couchdb", "enable_database_recovery", _) -> false; + ("couchdb", "delete_after_rename", _) -> DeleteAfterRename + end), + FileExistsBefore = filelib:is_regular(File), + couch_file:delete(RootDir, File), + FileExistsAfter = filelib:is_regular(File), + RenamedFiles = filelib:wildcard(filename:join([RootDir, ".delete", "*"])), + ExpectRenamedCount = if DeleteAfterRename -> 0; true -> 1 end, + [ + ?_assert(FileExistsBefore), + ?_assertNot(FileExistsAfter), + ?_assertEqual(ExpectRenamedCount, length(RenamedFiles)) + ]. + + +nuke_dir_test_() -> + { + "Nuke directory tests", + { + foreach, + fun() -> + meck:new(config, [passthrough]), + File0 = ?tempfile() ++ ".couch", + RootDir = filename:dirname(File0), + BaseName = filename:basename(File0), + Seed = couch_rand:uniform(8999999999) + 999999999, + DDocDir = io_lib:format("db.~b_design", [Seed]), + ViewDir = filename:join([RootDir, DDocDir]), + file:make_dir(ViewDir), + File = filename:join([ViewDir, BaseName]), + file:rename(File0, File), + ok = couch_file:init_delete_dir(RootDir), + ok = file:write_file(File, <<>>), + {RootDir, ViewDir} + end, + fun({RootDir, ViewDir}) -> + meck:unload(config), + remove_dir(ViewDir), + Ext = filename:extension(ViewDir), + case filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext) of + [DelDir] -> remove_dir(DelDir); + _ -> ok + end + end, + [ + fun(Cfg) -> + {"enable_database_recovery = false", + make_rename_dir_test_case(Cfg, false)} + end, + fun(Cfg) -> + {"enable_database_recovery = true", + make_rename_dir_test_case(Cfg, true)} + end, + fun(Cfg) -> + {"delete_after_rename = true", + make_delete_dir_test_case(Cfg, true)} + end, + fun(Cfg) -> + {"delete_after_rename = false", + make_delete_dir_test_case(Cfg, false)} + end + ] + } + }. + + +make_rename_dir_test_case({RootDir, ViewDir}, EnableRecovery) -> + meck:expect(config, get_boolean, fun + ("couchdb", "enable_database_recovery", _) -> EnableRecovery; + ("couchdb", "delete_after_rename", _) -> true + end), + DirExistsBefore = filelib:is_dir(ViewDir), + couch_file:nuke_dir(RootDir, ViewDir), + DirExistsAfter = filelib:is_dir(ViewDir), + Ext = filename:extension(ViewDir), + RenamedDirs = filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext), + ExpectRenamedCount = if EnableRecovery -> 1; true -> 0 end, + [ + ?_assert(DirExistsBefore), + ?_assertNot(DirExistsAfter), + ?_assertEqual(ExpectRenamedCount, length(RenamedDirs)) + ]. + +make_delete_dir_test_case({RootDir, ViewDir}, DeleteAfterRename) -> + meck:expect(config, get_boolean, fun + ("couchdb", "enable_database_recovery", _) -> false; + ("couchdb", "delete_after_rename", _) -> DeleteAfterRename + end), + DirExistsBefore = filelib:is_dir(ViewDir), + couch_file:nuke_dir(RootDir, ViewDir), + DirExistsAfter = filelib:is_dir(ViewDir), + Ext = filename:extension(ViewDir), + RenamedDirs = filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext), + RenamedFiles = filelib:wildcard(RootDir ++ "/.delete/*"), + ExpectRenamedCount = if DeleteAfterRename -> 0; true -> 1 end, + [ + ?_assert(DirExistsBefore), + ?_assertNot(DirExistsAfter), + ?_assertEqual(0, length(RenamedDirs)), + ?_assertEqual(ExpectRenamedCount, length(RenamedFiles)) + ]. + +remove_dir(Dir) -> + [file:delete(File) || File <- filelib:wildcard(filename:join([Dir, "*"]))], + file:del_dir(Dir). + + +fsync_error_test_() -> + { + "Test fsync raises errors", + { + setup, + fun() -> + test_util:start(?MODULE, [ioq]) + end, + fun(Ctx) -> + test_util:stop(Ctx) + end, + [ + fun fsync_raises_errors/0 + ] + } + }. + + +fsync_raises_errors() -> + Fd = spawn(fun() -> fake_fsync_fd() end), + ?assertError({fsync_error, eio}, couch_file:sync(Fd)). + + +fake_fsync_fd() -> + % Mocking gen_server did not go very + % well so faking the couch_file pid + % will have to do. + receive + {'$gen_call', From, sync} -> + gen:reply(From, {error, eio}) + end. diff --git a/src/couch/test/eunit/couch_flags_config_tests.erl b/src/couch/test/eunit/couch_flags_config_tests.erl new file mode 100644 index 000000000..1a66cdcff --- /dev/null +++ b/src/couch/test/eunit/couch_flags_config_tests.erl @@ -0,0 +1,116 @@ +-module(couch_flags_config_tests). +-include_lib("eunit/include/eunit.hrl"). + +%% value copied from couch_flags_config +-define(MAX_FLAG_NAME_LENGTH, 256). + +setup() -> + meck:new(couch_log), + meck:expect(couch_log, error, ['_', '_'], meck:val(ok)), + ok. + +teardown(_) -> + meck:unload(). + +couch_flags_config_test_() -> + { + "test couch_flags_config", + { + setup, fun setup/0, fun teardown/1, + all_combinations_return_same_result() + ++ latest_overide_wins() + ++ [ + {"rules_are_sorted", fun rules_are_sorted/0} + ] + } + }. + +all_combinations_return_same_result() -> + Config = [ + {"foo, bar||*", "true"}, + {"baz, qux||*", "false"}, + {"baz||shards/test*", "true"}, + {"baz||shards/blacklist*", "false"}, + {"bar||shards/test*", "false"}, + {"bar||shards/test/blacklist*", "true"} + ], + Expected = [ + {{<<"shards/test/blacklist*">>},{<<"shards/test/blacklist*">>,22,[bar, baz, foo]}}, + {{<<"shards/test*">>},{<<"shards/test*">>, 12, [baz, foo]}}, + {{<<"shards/blacklist*">>},{<<"shards/blacklist*">>, 17, [bar, foo]}}, + {{<<"*">>},{<<"*">>, 1, [bar, foo]}} + ], + Combinations = couch_tests_combinatorics:permutations(Config), + [{test_id(Items), ?_assertEqual(Expected, couch_flags_config:data(Items))} + || Items <- Combinations]. + +rules_are_sorted() -> + Expected = [ + {{<<"shards/test/exact">>},{<<"shards/test/exact">>, 17, [baz,flag_bar,flag_foo]}}, + {{<<"shards/test/blacklist*">>},{<<"shards/test/blacklist*">>,22,[flag_foo]}}, + {{<<"shards/test*">>},{<<"shards/test*">>, 12, [baz,flag_bar,flag_foo]}}, + {{<<"shards/exact">>},{<<"shards/exact">>, 12, [flag_bar,flag_foo]}}, + {{<<"shards/blacklist*">>},{<<"shards/blacklist*">>, 17, []}}, + {{<<"*">>},{<<"*">>, 1, [flag_foo]}} + ], + ?assertEqual(Expected, couch_flags_config:data(test_config())). + +latest_overide_wins() -> + Cases = [ + {[ + {"flag||*", "false"}, {"flag||a*", "true"}, + {"flag||ab*", "true"}, {"flag||abc*", "true"} + ], true}, + {[ + {"flag||*", "true"}, {"flag||a*", "false"}, + {"flag||ab*", "true"}, {"flag||abc*", "false"} + ], false} + ], + [{test_id(Rules, Expected), + ?_assertEqual(Expected, lists:member(flag, + flags(hd(couch_flags_config:data(Rules)))))} + || {Rules, Expected} <- Cases]. + +flags({{_Pattern}, {_Pattern, _Size, Flags}}) -> + Flags. + +test_id(Items, ExpectedResult) -> + lists:flatten(io_lib:format("~p -> ~p", [[P || {P, _} <- Items], ExpectedResult])). + + +test_id(Items) -> + lists:flatten(io_lib:format("~p", [[P || {P, _} <- Items]])). + +test_config() -> + [ + {"flag_foo||*", "true"}, + {"flag_bar||*", "false"}, + {"flag_bar||shards/test*", "true"}, + {"flag_foo||shards/blacklist*", "false"}, + {"baz||shards/test*", "true"}, + {"baz||shards/test/blacklist*", "false"}, + {"flag_bar||shards/exact", "true"}, + {"flag_bar||shards/test/exact", "true"} + ]. + +parse_flags_term_test_() -> + LongBinary = binary:copy(<<"a">>, ?MAX_FLAG_NAME_LENGTH + 1), + ExpectedError = {error, {"Cannot parse list of tags: ~n~p", + [{too_long, LongBinary}]}}, + ExpectedUnknownError = {error,{"Cannot parse list of tags: ~n~p", + [{invalid_flag,<<"dddddddd">>}]}}, + [ + {"empty binary", ?_assertEqual( + [], couch_flags_config:parse_flags_term(<<>>))}, + {"single flag", ?_assertEqual( + [fff], couch_flags_config:parse_flags_term(<<"fff">>))}, + {"sorted", ?_assertEqual( + [aaa,bbb,fff], couch_flags_config:parse_flags_term(<<"fff,aaa,bbb">>))}, + {"whitespace", ?_assertEqual( + [aaa,bbb,fff], couch_flags_config:parse_flags_term(<<"fff , aaa, bbb ">>))}, + {"error", ?_assertEqual( + ExpectedError, couch_flags_config:parse_flags_term(LongBinary))}, + {"unknown_flag", ?_assertEqual( + ExpectedUnknownError, couch_flags_config:parse_flags_term(<<"dddddddd">>))} + ]. + diff --git a/src/couch/test/eunit/couch_flags_tests.erl b/src/couch/test/eunit/couch_flags_tests.erl new file mode 100644 index 000000000..32ec57b77 --- /dev/null +++ b/src/couch/test/eunit/couch_flags_tests.erl @@ -0,0 +1,150 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_flags_tests). + +-include_lib("couch/include/couch_eunit.hrl"). + +%% couch_epi_plugin behaviour callbacks +-export([ + app/0, + providers/0, + services/0, + data_providers/0, + data_subscriptions/0, + processes/0, + notify/3 +]). + +-export([ + rules/0 +]). + +app() -> + test_app. + +providers() -> + [{feature_flags, ?MODULE}]. + +services() -> + []. + +data_providers() -> + []. + +data_subscriptions() -> + []. + +processes() -> + []. + +notify(_, _, _) -> + ok. + +rules() -> + test_config(). + +setup() -> + %% FIXME after we upgrade couch_epi + application:stop(couch_epi), % in case it's already running from other tests... + application:unload(couch_epi), + + application:load(couch_epi), + application:set_env(couch_epi, plugins, [couch_db_epi, ?MODULE]), + meck:expect(config, get, 1, []), + + Ctx = test_util:start_couch([couch_epi]), + Ctx. + + +teardown(Ctx) -> + test_util:stop_couch(Ctx), + ok = application:unload(couch_epi), + meck:unload(), + ok. + +couch_flags_test_() -> + { + "test couch_flags", + { + setup, fun setup/0, fun teardown/1, + enabled_flags_tests() + ++ is_enabled() +%% ++ match_performance() + } + }. + +enabled_flags_tests() -> + + [{"enabled_flags_tests", [ + {"flags_default_rule", + ?_assertEqual( + [foo], couch_flags:enabled("something"))}, + {"flags_wildcard_rule", + ?_assertEqual( + [bar, baz, foo], + couch_flags:enabled("shards/test/something"))}, + {"flags_exact_rule", + ?_assertEqual( + [bar, baz, foo], + couch_flags:enabled("shards/test/exact"))}, + {"flags_blacklist_rule", + ?_assertEqual( + [], + couch_flags:enabled("shards/blacklist/4"))} + ]}]. + +is_enabled() -> + [{"is_enabled_tests", [ + {"flags_default_rule [enabled]", + ?_assert(couch_flags:is_enabled(foo, "something"))}, + {"flags_default_rule [disabled]", + ?_assertNot(couch_flags:is_enabled(baz, "something"))}, + {"flags_default_rule [not_existent]", + ?_assertNot(couch_flags:is_enabled(non_existent, "something"))}, + + {"flags_wildcard_rule [enabled]", + ?_assert(couch_flags:is_enabled(bar, "shards/test/something"))}, + {"flags_wildcard_rule [not_existent]", + ?_assertNot(couch_flags:is_enabled(non_existent, "shards/test/something"))}, + + {"flags_exact_rule [overide_disbled]", + ?_assert(couch_flags:is_enabled(bar, "shards/test/exact"))}, + {"flags_exact_rule [not_existent]", + ?_assertNot(couch_flags:is_enabled(non_existent, "shards/test/exact"))}, + + {"flags_blacklist_rule [overide_enabled]", + ?_assertNot(couch_flags:is_enabled(foo, "shards/blacklist/4"))}, + {"flags_blacklist_rule [not_existent]", + ?_assertNot(couch_flags:is_enabled(non_existent, "shards/blacklist/4"))} + ]}]. + +%% match_performance() -> +%% [{"match_performance", [ +%% ?_test(begin +%% ?debugTime("1 million of operations took", lists:foreach(fun(_) -> +%% couch_flags:is_enabled(bar, "shards/test/exact") +%% end, lists:seq(1, 1000000))) +%% end) +%% ]}]. + + +test_config() -> + [ + {"foo||/*", "true"}, + {"bar||/*", "false"}, + {"bar||/shards/test*", "true"}, + {"foo||/shards/blacklist*", "false"}, + {"baz||/shards/test*", "true"}, + {"bar||/shards/exact", "true"}, + {"bar||/shards/test/exact", "true"} + ]. diff --git a/src/couch/test/eunit/couch_hotp_tests.erl b/src/couch/test/eunit/couch_hotp_tests.erl new file mode 100644 index 000000000..fee10ff5e --- /dev/null +++ b/src/couch/test/eunit/couch_hotp_tests.erl @@ -0,0 +1,28 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_hotp_tests). + +-include_lib("eunit/include/eunit.hrl"). + +hotp_test() -> + Key = <<"12345678901234567890">>, + ?assertEqual(755224, couch_hotp:generate(sha, Key, 0, 6)), + ?assertEqual(287082, couch_hotp:generate(sha, Key, 1, 6)), + ?assertEqual(359152, couch_hotp:generate(sha, Key, 2, 6)), + ?assertEqual(969429, couch_hotp:generate(sha, Key, 3, 6)), + ?assertEqual(338314, couch_hotp:generate(sha, Key, 4, 6)), + ?assertEqual(254676, couch_hotp:generate(sha, Key, 5, 6)), + ?assertEqual(287922, couch_hotp:generate(sha, Key, 6, 6)), + ?assertEqual(162583, couch_hotp:generate(sha, Key, 7, 6)), + ?assertEqual(399871, couch_hotp:generate(sha, Key, 8, 6)), + ?assertEqual(520489, couch_hotp:generate(sha, Key, 9, 6)). diff --git a/src/couch/test/eunit/couch_index_tests.erl b/src/couch/test/eunit/couch_index_tests.erl new file mode 100644 index 000000000..fab3806d0 --- /dev/null +++ b/src/couch/test/eunit/couch_index_tests.erl @@ -0,0 +1,234 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_index_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("couch_mrview/include/couch_mrview.hrl"). +-include_lib("stdlib/include/ms_transform.hrl"). + +-define(TIMEOUT, 1000). + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + create_design_doc(DbName, <<"_design/foo">>, <<"bar">>), + tracer_new(), + DbName. + +teardown(DbName) -> + tracer_delete(), + couch_server:delete(DbName, [?ADMIN_CTX]). + +couch_index_ioq_priority_test_() -> + { + "Test ioq_priority for views", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun check_io_priority_for_updater/1, + fun check_io_priority_for_compactor/1 + ] + } + } + }. + + +check_io_priority_for_updater(DbName) -> + ?_test(begin + {ok, IndexerPid} = couch_index_server:get_index( + couch_mrview_index, DbName, <<"_design/foo">>), + CouchIndexUpdaterPid = updater_pid(IndexerPid), + tracer_record(CouchIndexUpdaterPid), + + create_docs(DbName), + + CommittedSeq = couch_util:with_db(DbName, fun(Db) -> couch_db:get_update_seq(Db) end), + couch_index:get_state(IndexerPid, CommittedSeq), + [UpdaterPid] = wait_spawn_event_for_pid(CouchIndexUpdaterPid), + + [UpdaterMapProcess] = wait_spawn_by_anonymous_fun( + UpdaterPid, '-start_update/4-fun-0-'), + + ?assert(wait_set_io_priority( + UpdaterMapProcess, {view_update, DbName, <<"_design/foo">>})), + + [UpdaterWriterProcess] = wait_spawn_by_anonymous_fun( + UpdaterPid, '-start_update/4-fun-1-'), + ?assert(wait_set_io_priority( + UpdaterWriterProcess, {view_update, DbName, <<"_design/foo">>})), + + ok + end). + +check_io_priority_for_compactor(DbName) -> + ?_test(begin + {ok, IndexerPid} = couch_index_server:get_index( + couch_mrview_index, DbName, <<"_design/foo">>), + {ok, CompactorPid} = couch_index:get_compactor_pid(IndexerPid), + tracer_record(CompactorPid), + + create_docs(DbName), + + couch_index:compact(IndexerPid), + wait_spawn_event_for_pid(CompactorPid), + + [CompactorProcess] = wait_spawn_by_anonymous_fun( + CompactorPid, '-handle_call/3-fun-0-'), + ?assert(wait_set_io_priority( + CompactorProcess, {view_compact, DbName, <<"_design/foo">>})), + ok + end). + +create_docs(DbName) -> + {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), + Doc1 = couch_doc:from_json_obj({[ + {<<"_id">>, <<"doc1">>}, + {<<"value">>, 1} + + ]}), + Doc2 = couch_doc:from_json_obj({[ + {<<"_id">>, <<"doc2">>}, + {<<"value">>, 2} + + ]}), + Doc3 = couch_doc:from_json_obj({[ + {<<"_id">>, <<"doc3">>}, + {<<"value">>, 3} + + ]}), + {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]), + couch_db:ensure_full_commit(Db), + couch_db:close(Db). + +create_design_doc(DbName, DDName, ViewName) -> + {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), + DDoc = couch_doc:from_json_obj({[ + {<<"_id">>, DDName}, + {<<"language">>, <<"javascript">>}, + {<<"views">>, {[ + {ViewName, {[ + {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>} + ]}} + ]}} + ]}), + {ok, Rev} = couch_db:update_doc(Db, DDoc, []), + couch_db:ensure_full_commit(Db), + couch_db:close(Db), + Rev. + +wait_set_io_priority(Pid, IOPriority) -> + test_util:wait_value(fun() -> + does_process_set_io_priority(Pid, IOPriority) + end, true). + +does_process_set_io_priority(Pid, IOPriority) -> + PutCallsArgs = find_calls_to_fun(Pid, {erlang, put, 2}), + lists:any(fun([_, Priority]) -> Priority =:= IOPriority end, PutCallsArgs). + +wait_events(MatchSpec) -> + test_util:wait_other_value(fun() -> select(MatchSpec) end, []). + +find_spawned_by_anonymous_fun(ParentPid, Name) -> + AnonymousFuns = select(ets:fun2ms(fun + ({spawned, Pid, _TS, _Name, _Dict, [PPid, {erlang, apply, [Fun, _]}]}) + when is_function(Fun) andalso PPid =:= ParentPid -> {Pid, Fun} + end)), + lists:filtermap(fun({Pid, Fun}) -> + case erlang:fun_info(Fun, name) of + {name, Name} -> {true, Pid}; + _ -> false + end + end, AnonymousFuns). + +find_calls_to_fun(Pid, {Module, Function, Arity}) -> + select(ets:fun2ms(fun + ({call, P, _TS, _Name, _Dict, [{M, F, Args}]}) + when length(Args) =:= Arity + andalso M =:= Module + andalso F =:= Function + andalso P =:= Pid + -> Args + end)). + +wait_spawn_event_for_pid(ParentPid) -> + wait_events(ets:fun2ms(fun + ({spawned, Pid, _TS, _Name, _Dict, [P, _]}) when P =:= ParentPid -> Pid + end)). + +wait_spawn_by_anonymous_fun(ParentPid, Name) -> + test_util:wait_other_value(fun() -> + find_spawned_by_anonymous_fun(ParentPid, Name) + end, []). + +updater_pid(IndexerPid) -> + {links, Links} = process_info(IndexerPid, links), + [Pid] = select_process_by_name_prefix(Links, "couch_index_updater:init/1"), + Pid. + +select_process_by_name_prefix(Pids, Name) -> + lists:filter(fun(Pid) -> + Key = couch_debug:process_name(Pid), + string:str(Key, Name) =:= 1 + end, Pids). + +select(MatchSpec) -> + lists:filtermap(fun(Event) -> + case ets:test_ms(Event, MatchSpec) of + {ok, false} -> false; + {ok, Result} -> {true, Result}; + _ -> false + end + end, tracer_events()). + + +%% ======================== +%% Tracer related functions +%% ------------------------ +tracer_new() -> + ets:new(?MODULE, [public, named_table]), + {ok, _Tracer} = dbg:tracer(process, {fun tracer_collector/2, 0}), + ok. + +tracer_delete() -> + dbg:stop_clear(), + (catch ets:delete(?MODULE)), + ok. + +tracer_record(Pid) -> + {ok, _} = dbg:tp(erlang, put, x), + {ok, _} = dbg:p(Pid, [c, p, sos]), + ok. + +tracer_events() -> + Events = [{Idx, E} || [Idx, E] <- ets:match(?MODULE, {{trace, '$1'}, '$2'})], + {_, Sorted} = lists:unzip(lists:keysort(1, Events)), + Sorted. + +tracer_collector(Msg, Seq) -> + ets:insert(?MODULE, {{trace, Seq}, normalize_trace_msg(Msg)}), + Seq + 1. + +normalize_trace_msg(TraceMsg) -> + case tuple_to_list(TraceMsg) of + [trace_ts, Pid, Type | Info] -> + {TraceInfo, [Timestamp]} = lists:split(length(Info)-1, Info), + {Type, Pid, Timestamp, couch_debug:process_name(Pid), process_info(Pid), TraceInfo}; + [trace, Pid, Type | TraceInfo] -> + {Type, Pid, os:timestamp(), couch_debug:process_name(Pid), process_info(Pid), TraceInfo} + end. diff --git a/src/couch/test/eunit/couch_key_tree_prop_tests.erl b/src/couch/test/eunit/couch_key_tree_prop_tests.erl new file mode 100644 index 000000000..f8146926a --- /dev/null +++ b/src/couch/test/eunit/couch_key_tree_prop_tests.erl @@ -0,0 +1,530 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_key_tree_prop_tests). + +-include_lib("triq/include/triq.hrl"). +-triq(eunit). + +-define(SIZE_REDUCTION, 3). % How much to reduce size with tree depth. +-define(MAX_BRANCHES, 4). % Maximum number of branches. +-define(RAND_SIZE, 1 bsl 64). + + +% +% Properties +% + + +% Merge random paths from a revtree into itself. Check that no revisions have +% been lost in the process and that result is one of the 3 expected values. +% +prop_revtree_merge_with_subset_of_own_nodes() -> + ?FORALL(Revs, g_revs(), + ?FORALL({RevTree, Branch}, {g_revtree(Revs), g_revtree(Revs, 1)}, + ?IMPLIES(length(Branch) > 0 andalso repeating_revs(levels(RevTree ++ Branch)) == [], + begin + {Merged, Result} = couch_key_tree:merge(RevTree, hd(Branch)), + lists:member(Result, [new_leaf, new_branch, internal_node]) + andalso same_keys(RevTree ++ Branch, Merged) + andalso valid_revtree(Merged) + end + ) + ) + ). + + +% Merge random trees into revtree. +% +prop_revtree_merge_random_nodes() -> + ?FORALL({RevTree, Branch}, {g_revtree(), g_revtree([], 1)}, + ?IMPLIES(length(Branch) > 0, + begin + {Merged, _} = couch_key_tree:merge(RevTree, hd(Branch)), + valid_revtree(Merged) + end + ) + ). + + + +% Merge mix or random and existing revtree paths into revtree +% +prop_revtree_merge_some_existing_some_new() -> + ?FORALL(RevTree, g_revtree(), + ?FORALL(Branch, + begin + KeyList = keylist(RevTree), + Half = lists:sublist(KeyList, length(KeyList) div 2), + g_revtree(Half, 1) + end, + ?IMPLIES(length(Branch) > 0 andalso repeating_revs(levels(RevTree ++ Branch)) == [], + begin + {Merged, _} = couch_key_tree:merge(RevTree, hd(Branch)), + valid_revtree(Merged) + end + ) + ) + ). + + + +% Stem deeper than the current max level. Expect no changes to the revtree +% +prop_no_change_stemming_deeper_than_current_depth() -> + ?FORALL(RevTree, g_revtree(), + begin + StemDepth = depth(RevTree) + 1, + Stemmed = couch_key_tree:stem(RevTree, StemDepth), + StemmedKeys = lists:usort(keylist(Stemmed)), + InputKeys = lists:usort(keylist(RevTree)), + StemmedKeys == InputKeys + end + ). + + +% Stem at a random small depth, make sure that resulting tree has +% unique revisions and the same number or less revisions than input +% +prop_stemming_results_in_same_or_less_total_revs() -> + ?FORALL({RevTree, StemDepth}, {g_revtree(), choose(1, 20)}, + begin + Stemmed = couch_key_tree:stem(RevTree, StemDepth), + OldRealDepth = real_depth(RevTree), + StemmedKeys = keylist(Stemmed), + UniqueStemmedKeys = lists:usort(StemmedKeys), + UniqueInputKeys = lists:usort(keylist(RevTree)), + NewRealDepth = real_depth(Stemmed), + length(StemmedKeys) == length(UniqueStemmedKeys) + andalso length(UniqueStemmedKeys) =< length(UniqueInputKeys) + andalso OldRealDepth >= NewRealDepth + end + ). + + +% Generate a longer path (revtree with no branches) then stem it. +% Always expect it to shrink to stemmed depth. +prop_stem_path_expect_size_to_get_smaller() -> + ?FORALL({RevTree, StemDepth}, + { + ?SIZED(Size, resize(Size * 10, g_revtree([], 1))), + choose(1,5) + }, + ?IMPLIES(real_depth(RevTree) > 5, + begin + Stemmed = couch_key_tree:stem(RevTree, StemDepth), + StemmedKeys = lists:usort(keylist(Stemmed)), + InputKeys = lists:usort(keylist(RevTree)), + length(InputKeys) > length(StemmedKeys) + andalso real_depth(Stemmed) == StemDepth + end + ) + ). + + +% After stemming all leaves are still present +prop_after_stemming_all_leaves_are_present() -> + ?FORALL({RevTree, StemDepth}, + {g_revtree(), choose(1,20)}, + begin + OldRealDepth = real_depth(RevTree), + OldLeaves = leaves(RevTree), + Stemmed = couch_key_tree:stem(RevTree, StemDepth), + NewRealDepth = real_depth(Stemmed), + NewLeaves = leaves(Stemmed), + valid_revtree(Stemmed) + andalso OldRealDepth >= NewRealDepth + andalso OldLeaves == NewLeaves + + end + ). + + +% After stemming paths to root didn't get longer +prop_after_stemming_paths_are_shorter() -> + ?FORALL({StemDepth, RevTree}, {choose(2,10), g_revtree()}, + begin + OldPaths = paths(RevTree), + Stemmed = couch_key_tree:stem(RevTree, StemDepth), + NewPaths = paths(Stemmed), + GrowingPaths = orddict:fold(fun(Rev, Path, Acc) -> + OldPath = orddict:fetch(Rev, OldPaths), + case length(Path) > length(OldPath) of + true -> + [{Rev, Path, OldPath}| Acc]; + false -> + Acc + end + end, [], NewPaths), + valid_revtree(Stemmed) andalso GrowingPaths == [] + end + ). + + +% Check leaf count +prop_leaf_count() -> + ?FORALL(RevTree, g_revtree(), + length(leaves(RevTree)) == couch_key_tree:count_leafs(RevTree) + ). + + +% Check get leafs +prop_get_leafs() -> + ?FORALL(RevTree, g_revtree(), + begin + LeafsFull = couch_key_tree:get_all_leafs(RevTree), + lists:usort([Rev || {_V, {_D, [Rev | _]}} <- LeafsFull]) == leaves(RevTree) + end + ). + + +% +% Generators +% + +% Generate a full rev tree. Most of the forms are just there to set up default +% parameters, _revtree/3 does all heavy lifting. +% + +g_revtree() -> + ?SIZED(Size, g_revtree(Size)). + + +g_revtree(Size) when is_integer(Size) -> + g_revtree(Size, [], ?MAX_BRANCHES); +g_revtree(Revs) when is_list(Revs) -> + ?SIZED(Size, g_revtree(Size, Revs, ?MAX_BRANCHES)). + + +g_revtree(Size, Revs) when is_integer(Size), is_list(Revs) -> + g_revtree(Size, Revs, ?MAX_BRANCHES); +g_revtree(Revs, MaxBranches) when is_list(Revs), is_integer(MaxBranches) -> + ?SIZED(Size, g_revtree(Size, Revs, MaxBranches)). + + +g_revtree(0, _Revs, _MaxBranches) -> + []; +g_revtree(Size, ERevs, MaxBranches) -> + ?LET({Depth, Revs}, {g_stem_depth(Size), g_revs(Size, ERevs)}, + [{Depth, g_treenode(Size, Revs, MaxBranches)}] + ). + + +% Generate a tree node and then recursively generate its children. +% +g_treenode(0, Revs, _) -> + {elements(Revs), x, []}; +g_treenode(Size, Revs, MaxBranches) -> + ?DELAY(?LET(N, int(0, MaxBranches), + begin + [Rev | ChildRevs] = Revs, + {Rev, x, g_nodes(Size div ?SIZE_REDUCTION, N, ChildRevs, MaxBranches)} + end + )). + + +% Generate a list of child nodes. Depending on how many children there are +% the pre-generarated revision list is split into that many sublists. +% +g_nodes(0, _N, _Revs, _MaxBranches) -> + []; +g_nodes(_Size, 0, _Revs, _MaxBranches) -> + []; +g_nodes(Size, ChildCount, Revs, MaxBranches) -> + ?LETSHRINK( + ChildNodes, + begin + ChildRevList = child_revs(ChildCount, Revs, Size, MaxBranches), + [g_treenode(Size, ChildRevs, MaxBranches) || ChildRevs <- ChildRevList] + end, + ordered_nodes(ChildNodes) + ). + + +% Generate each subtree's stem depth +% + + +g_stem_depth(Size) -> + choose(0, expected_height(Size, ?SIZE_REDUCTION) div 2). + + +% Uses the shuffle/1 function to shuffle the input list. Unshuffled list is +% used as the shrink value. +% +g_shuffle(L) when is_list(L) -> + triq_dom:domain(g_shuffle, + fun(Self, _Size) -> {Self, shuffle(L)} end, + fun(Self, _Val) -> {Self, L} end + ). + + +% Wrapper to make a list shuffling generator that doesn't shrink +% +g_shuffle_noshrink(L) when is_list(L) -> + triq_dom:noshrink(g_shuffle(L)). + + +% Generate shuffled sublists up to N items long from a list. +% +g_shuffled_sublists(L, N) -> + ?LET(Shuffled, g_shuffle_noshrink(L), lists:sublist(Shuffled, N)). + + +% Generate revision lists. +% +g_revs() -> + ?SIZED(Size, g_revs(Size)). + + +g_revs(Size) when is_integer(Size) -> + g_revs(Size, []). + + +g_revs(Size, Existing) when is_integer(Size), is_list(Existing) -> + Expected = keys_needed(Size, ?SIZE_REDUCTION, ?MAX_BRANCHES), + Revs = revs(Expected, Existing), + case length(Revs) > Expected of + true -> % have extra, try various sublists + g_shuffled_sublists(Revs, Expected); + false -> + triq_dom:return(Revs) + end. + + +% +% Helper functions +% + + +valid_revtree(RevTree) -> + repeating_revs(levels(RevTree)) == [] andalso children_sorted(RevTree). + + +same_keys(RevTree1, RevTree2) -> + Keys1 = lists:usort(keylist(RevTree1)), + Keys2 = lists:usort(keylist(RevTree2)), + Keys1 == Keys2. + + +all(L) -> + lists:all(fun(E) -> E end, L). + +% Shufle a list of items. Tag each item with a random number then sort +% the list and remove the tags. +% +shuffle(L) -> + Tagged = [{triq_rnd:uniform(), X} || X <- L], + [X || {_, X} <- lists:sort(Tagged)]. + + +% Generate list of relateively unique large random numbers +rand_list(N) when N =< 0 -> + []; +rand_list(N) -> + [triq_rnd:uniform(?RAND_SIZE) || _ <- lists:seq(1, N)]. + + +% Generate a list of revisions to be used as key in revision trees. Expected +% must the number of maximum expected nodes in a revision tree. Existing is an +% optional list revisions which must be included in the result. The output list +% is sorted. +revs(0, _Existing) -> + []; +revs(Expected, Existing) when is_integer(Expected), is_list(Existing) -> + Need = Expected - length(Existing), + lists:usort(lists:append(Existing, rand_list(Need))). + + +% Get the list of all the keys in a revision tree. The input can also be a +% an individual tree (tagged with the depth to virtual root) or a node. +% Yes, this is not tail recursive but the idea is to keep it simple. +% +keylist({_D, Node}) when is_tuple(Node) -> + keylist(Node); +keylist({K, _V, Nodes}) -> + [K | keylist(Nodes)]; +keylist(Nodes) -> + lists:append([keylist(Node) || Node <- Nodes]). + + +% Get the list of leaves from a revision tree. +leaves([]) -> + []; +leaves({_D, Node}) when is_tuple(Node) -> + leaves(Node); +leaves({K, _V, []}) -> + [K]; +leaves({_K, _V, Nodes}) -> + leaves(Nodes); +leaves(Nodes) -> + lists:usort(lists:append([leaves(N) || N <- Nodes])). + + +% Get paths from leaf to root. Result is an orddict of [{LeafRev, [Rev]}] +% +paths([]) -> + orddict:new(); +paths(RevTree) when is_list(RevTree) -> + paths_merge_dicts([paths(T) || T <- RevTree]); +paths({_Depth, Node}) when is_tuple(Node) -> + paths(Node); +paths({K, _V, []}) -> + orddict:store(K, [], orddict:new()); +paths({K, _V, Nodes}) -> + CombinedDict = paths_merge_dicts([paths(N) || N <- Nodes]), + orddict:map(fun(_LeafKey, Path) -> Path ++ [K] end, CombinedDict). + + +paths_merge_dicts(Dicts) -> + lists:foldl(fun(D, AccD) -> + orddict:merge(fun(K, V1, V2) -> + throw({found_duplicates, K, V1, V2}) + end, D, AccD) + end, orddict:new(), Dicts). + + +% Get lists of all the keys at each depth level. Result is an orddict that +% looks like [{depth, [key]}]. The depth used here is the "virtual" depth as +% indicated by the stemmed depth tag that goes with every top level subtree. +% +levels([]) -> + orddict:new(); +levels(RevTree) when is_list(RevTree) -> + lists:foldl(fun(T, Dict) -> levels(T, Dict) end, orddict:new(), RevTree). + + +levels({Depth, Node}, Dict) when is_tuple(Node) -> + levels(Node, Depth, Dict). + + +levels({K, _V, Nodes}, Depth, Dict) -> + Dict1 = case orddict:is_key(Depth, Dict) of + true -> orddict:append(Depth, K, Dict); + false -> orddict:store(Depth, [K], Dict) + end, + levels(Nodes, Depth + 1, Dict1); +levels(Nodes, Depth, Dict) -> + lists:foldl(fun(Node, AccDict) -> + levels(Node, Depth, AccDict) + end, Dict, Nodes). + + +% Using the output of leaves/1 as input return any repeating revisions if +% there are any at a particular level. Levels which have not revisions are +% not returned. +% +repeating_revs(Dict) -> + orddict:filter(fun(_Depth, Revs) -> + length(lists:usort(Revs)) =/= length(Revs) + end, Dict). + + +% Check that children of all nodes are sorted +children_sorted([]) -> + true; +children_sorted(Nodes) when is_list(Nodes) -> + all([children_sorted(N) || N <- Nodes]); +children_sorted({_D, Node}) when is_tuple(Node) -> + children_sorted(Node); +children_sorted({_K, _V, Nodes}) -> + children_sorted(Nodes). + + +% Get the maximum depth of a revtree. The depth is "virtual" as it takes into +% account the distance to the now stemmed root node as indicated by the top +% level subtrees. +% +depth([]) -> + 0; +depth(RevTree) when is_list(RevTree) -> + lists:max([depth(T) || T <- RevTree]); +depth({Depth, Node}) when is_tuple(Node) -> + depth(Node, Depth - 1). + + +depth({_K, _V, Nodes}, Depth) -> + depth(Nodes, Depth + 1); +depth([], Depth) -> + Depth; +depth(Nodes, Depth) -> + lists:max([depth(Node, Depth) || Node <- Nodes]). + + +% Get the "real" tree depth, not the virtual one. As revtrees gets stemmed they +% will keep their virtual depth but the actual number of nodes in the tree +% could be reduced. +% +real_depth([]) -> + 0; +real_depth(RevTree) when is_list(RevTree) -> + lists:max([real_depth(T) || T <- RevTree]); +real_depth({_Depth, Node}) when is_tuple(Node) -> + depth(Node, 0). % Note from here on use the depth/3 function + + +% Return an ordered list of revtree nodes. When sorting only immediate keys +% (revisions) are looked at and comparison doesn't descent into the treee. +% +ordered_nodes(Nodes) -> + lists:sort(fun({K1, _, _}, {K2, _, _}) -> K1 =< K2 end, Nodes). + + +% Calculate a maximum number of rev tree nodes needed for a tree of a given +% height and branchiness. Height is derived from Size and LevelReductionFactor, +% that is how big the sample should be and quickly the size parameter would +% shrink on each level. +% +keys_needed(0, _, _) -> + 0; +keys_needed(Size, LevelReductionFactor, 1) -> + expected_height(Size, LevelReductionFactor); +keys_needed(Size, LevelReductionFactor, Branches) -> + Height = expected_height(Size, LevelReductionFactor), + trunc(math:pow(Branches, Height + 1)) + 1. + + +% Calculate expected tree height for a given sample size and branchiness. +% At each step the size is divided by the reduction factor. +expected_height(Size, LevelReductionFactor) -> + trunc(log(LevelReductionFactor, Size)) + 1. + + +log(B, X) -> + math:log(X) / math:log(B). + + +% Distribute items in a list into roughly equal chunks of a given size. +% +distribute(_ChunkSize, []) -> + []; +distribute(ChunkSize, L) when ChunkSize >= length(L) -> + [L]; +distribute(ChunkSize, L) -> + {L1, L2} = lists:split(ChunkSize, L), + [L1 | distribute(ChunkSize, L2)]. + + +% Split a single (parent) revision list into chunks (sub-lists), one for each +% child. Also, for safety, double check that at this point in the process the +% list of revisions is sufficiently large. If it isn't something went wrong and +% a specific exception is thrown ({not_enough_revisions, Got, Needed}). +% +child_revs(ChildCount, Revs, Size, MaxBranches) -> + NeedKeys = keys_needed(Size, ?SIZE_REDUCTION, MaxBranches), + case length(Revs) >= NeedKeys of + true -> + ChunkSize = trunc(length(Revs) / ChildCount) + 1, + distribute(ChunkSize, Revs); + false -> + throw({not_enough_revisions, length(Revs), NeedKeys}) + end. diff --git a/src/couch/test/eunit/couch_key_tree_tests.erl b/src/couch/test/eunit/couch_key_tree_tests.erl new file mode 100644 index 000000000..5d9cc8372 --- /dev/null +++ b/src/couch/test/eunit/couch_key_tree_tests.erl @@ -0,0 +1,413 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_key_tree_tests). + +-include_lib("couch/include/couch_eunit.hrl"). + +-define(DEPTH, 10). + + +key_tree_merge_test_()-> + { + "Key tree merge", + [ + should_merge_with_empty_tree(), + should_merge_reflexive(), + should_merge_prefix_of_a_tree_with_tree(), + should_produce_conflict_on_merge_with_unrelated_branch(), + should_merge_reflexive_for_child_nodes(), + should_merge_tree_to_itself(), + should_merge_tree_of_odd_length(), + should_merge_tree_with_stem(), + should_merge_with_stem_at_deeper_level(), + should_merge_with_stem_at_deeper_level_with_deeper_paths(), + should_merge_single_tree_with_deeper_stem(), + should_merge_tree_with_large_stem(), + should_merge_stems(), + should_create_conflicts_on_merge(), + should_create_no_conflicts_on_merge(), + should_ignore_conflicting_branch() + ] + }. + +key_tree_missing_leaves_test_()-> + { + "Missing tree leaves", + [ + should_not_find_missing_leaves(), + should_find_missing_leaves() + ] + }. + +key_tree_remove_leaves_test_()-> + { + "Remove tree leaves", + [ + should_have_no_effect_on_removing_no_leaves(), + should_have_no_effect_on_removing_non_existant_branch(), + should_remove_leaf(), + should_produce_empty_tree_on_removing_all_leaves(), + should_have_no_effect_on_removing_non_existant_node(), + should_produce_empty_tree_on_removing_last_leaf() + ] + }. + +key_tree_get_leaves_test_()-> + { + "Leaves retrieving", + [ + should_extract_subtree(), + should_extract_subsubtree(), + should_gather_non_existant_leaf(), + should_gather_leaf(), + shoul_gather_multiple_leaves(), + should_gather_single_leaf_for_multiple_revs(), + should_gather_multiple_for_multiple_revs(), + should_retrieve_full_key_path(), + should_retrieve_full_key_path_for_node(), + should_retrieve_leaves_with_parent_node(), + should_retrieve_all_leaves() + ] + }. + +key_tree_leaf_counting_test_()-> + { + "Leaf counting", + [ + should_have_no_leaves_for_empty_tree(), + should_have_single_leaf_for_tree_with_single_node(), + should_have_two_leaves_for_tree_with_chindler_siblings(), + should_not_affect_on_leaf_counting_for_stemmed_tree() + ] + }. + +key_tree_stemming_test_()-> + { + "Stemming", + [ + should_have_no_effect_for_stemming_more_levels_than_exists(), + should_return_one_deepest_node(), + should_return_two_deepest_nodes() + ] + }. + + +should_merge_with_empty_tree()-> + One = {1, {"1","foo",[]}}, + ?_assertEqual({[One], new_leaf}, + merge_and_stem([], One)). + +should_merge_reflexive()-> + One = {1, {"1","foo",[]}}, + ?_assertEqual({[One], internal_node}, + merge_and_stem([One], One)). + +should_merge_prefix_of_a_tree_with_tree()-> + One = {1, {"1","foo",[]}}, + TwoSibs = [{1, {"1","foo",[]}}, + {1, {"2","foo",[]}}], + ?_assertEqual({TwoSibs, internal_node}, + merge_and_stem(TwoSibs, One)). + +should_produce_conflict_on_merge_with_unrelated_branch()-> + TwoSibs = [{1, {"1","foo",[]}}, + {1, {"2","foo",[]}}], + Three = {1, {"3","foo",[]}}, + ThreeSibs = [{1, {"1","foo",[]}}, + {1, {"2","foo",[]}}, + {1, {"3","foo",[]}}], + ?_assertEqual({ThreeSibs, new_branch}, + merge_and_stem(TwoSibs, Three)). + +should_merge_reflexive_for_child_nodes()-> + TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, + ?_assertEqual({[TwoChild], internal_node}, + merge_and_stem([TwoChild], TwoChild)). + +should_merge_tree_to_itself()-> + TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []}, + {"1b", "bar", []}]}}, + Leafs = couch_key_tree:get_all_leafs([TwoChildSibs]), + Paths = lists:map(fun leaf_to_path/1, Leafs), + FinalTree = lists:foldl(fun(Path, TreeAcc) -> + {NewTree, internal_node} = merge_and_stem(TreeAcc, Path), + NewTree + end, [TwoChildSibs], Paths), + ?_assertEqual([TwoChildSibs], FinalTree). + +leaf_to_path({Value, {Start, Keys}}) -> + [Branch] = to_branch(Value, lists:reverse(Keys)), + {Start - length(Keys) + 1, Branch}. + +to_branch(Value, [Key]) -> + [{Key, Value, []}]; +to_branch(Value, [Key | RestKeys]) -> + [{Key, [], to_branch(Value, RestKeys)}]. + + +should_merge_tree_of_odd_length()-> + TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, + TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []}, + {"1b", "bar", []}]}}, + TwoChildPlusSibs = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}, + {"1b", "bar", []}]}}, + ?_assertEqual({[TwoChildPlusSibs], new_leaf}, + merge_and_stem([TwoChildSibs], TwoChild)). + +should_merge_tree_with_stem()-> + Stemmed = {2, {"1a", "bar", []}}, + TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []}, + {"1b", "bar", []}]}}, + + ?_assertEqual({[TwoChildSibs], internal_node}, + merge_and_stem([TwoChildSibs], Stemmed)). + +should_merge_with_stem_at_deeper_level()-> + Stemmed = {3, {"1bb", "boo", []}}, + TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []}, + {"1b", "bar", [{"1bb", "boo", []}]}]}}, + ?_assertEqual({[TwoChildSibs], internal_node}, + merge_and_stem([TwoChildSibs], Stemmed)). + +should_merge_with_stem_at_deeper_level_with_deeper_paths()-> + Stemmed = {3, {"1bb", "boo", []}}, + StemmedTwoChildSibs = [{2,{"1a", "bar", []}}, + {2,{"1b", "bar", [{"1bb", "boo", []}]}}], + ?_assertEqual({StemmedTwoChildSibs, internal_node}, + merge_and_stem(StemmedTwoChildSibs, Stemmed)). + +should_merge_single_tree_with_deeper_stem()-> + Stemmed = {3, {"1aa", "bar", []}}, + TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, + ?_assertEqual({[TwoChild], internal_node}, + merge_and_stem([TwoChild], Stemmed)). + +should_merge_tree_with_large_stem()-> + Stemmed = {2, {"1a", "bar", [{"1aa", "bar", []}]}}, + TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, + ?_assertEqual({[TwoChild], internal_node}, + merge_and_stem([TwoChild], Stemmed)). + +should_merge_stems()-> + StemmedA = {2, {"1a", "bar", [{"1aa", "bar", []}]}}, + StemmedB = {3, {"1aa", "bar", []}}, + ?_assertEqual({[StemmedA], internal_node}, + merge_and_stem([StemmedA], StemmedB)). + +should_create_conflicts_on_merge()-> + OneChild = {1, {"1","foo",[{"1a", "bar", []}]}}, + Stemmed = {3, {"1aa", "bar", []}}, + ?_assertEqual({[OneChild, Stemmed], new_branch}, + merge_and_stem([OneChild], Stemmed)). + +should_create_no_conflicts_on_merge()-> + OneChild = {1, {"1","foo",[{"1a", "bar", []}]}}, + Stemmed = {3, {"1aa", "bar", []}}, + TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, + ?_assertEqual({[TwoChild], new_leaf}, + merge_and_stem([OneChild, Stemmed], TwoChild)). + +should_ignore_conflicting_branch()-> + %% this test is based on couch-902-test-case2.py + %% foo has conflicts from replication at depth two + %% foo3 is the current value + Foo = {1, {"foo", + "val1", + [{"foo2","val2",[]}, + {"foo3", "val3", []} + ]}}, + %% foo now has an attachment added, which leads to foo4 and val4 + %% off foo3 + Bar = {1, {"foo", + [], + [{"foo3", + [], + [{"foo4","val4",[]} + ]}]}}, + %% this is what the merge returns + %% note that it ignore the conflicting branch as there's no match + FooBar = {1, {"foo", + "val1", + [{"foo2","val2",[]}, + {"foo3", "val3", [{"foo4","val4",[]}]} + ]}}, + { + "COUCHDB-902", + ?_assertEqual({[FooBar], new_leaf}, + merge_and_stem([Foo], Bar)) + }. + +should_not_find_missing_leaves()-> + TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual([], + couch_key_tree:find_missing(TwoChildSibs, + [{0,"1"}, {1,"1a"}])). + +should_find_missing_leaves()-> + Stemmed1 = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}], + Stemmed2 = [{2, {"1aa", "bar", []}}], + TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + [ + ?_assertEqual( + [{0, "10"}, {100, "x"}], + couch_key_tree:find_missing( + TwoChildSibs, + [{0,"1"}, {0, "10"}, {1,"1a"}, {100, "x"}])), + ?_assertEqual( + [{0, "1"}, {100, "x"}], + couch_key_tree:find_missing( + Stemmed1, + [{0,"1"}, {1,"1a"}, {100, "x"}])), + ?_assertEqual( + [{0, "1"}, {1,"1a"}, {100, "x"}], + couch_key_tree:find_missing( + Stemmed2, + [{0,"1"}, {1,"1a"}, {100, "x"}])) + ]. + +should_have_no_effect_on_removing_no_leaves()-> + TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual({TwoChildSibs, []}, + couch_key_tree:remove_leafs(TwoChildSibs, + [])). + +should_have_no_effect_on_removing_non_existant_branch()-> + TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual({TwoChildSibs, []}, + couch_key_tree:remove_leafs(TwoChildSibs, + [{0, "1"}])). + +should_remove_leaf()-> + OneChild = [{0, {"1","foo",[{"1a", "bar", []}]}}], + TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual({OneChild, [{1, "1b"}]}, + couch_key_tree:remove_leafs(TwoChildSibs, + [{1, "1b"}])). + +should_produce_empty_tree_on_removing_all_leaves()-> + TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual({[], [{1, "1b"}, {1, "1a"}]}, + couch_key_tree:remove_leafs(TwoChildSibs, + [{1, "1b"}, {1, "1a"}])). + +should_have_no_effect_on_removing_non_existant_node()-> + Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}], + ?_assertEqual({Stemmed, []}, + couch_key_tree:remove_leafs(Stemmed, + [{1, "1a"}])). + +should_produce_empty_tree_on_removing_last_leaf()-> + Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}], + ?_assertEqual({[], [{2, "1aa"}]}, + couch_key_tree:remove_leafs(Stemmed, + [{2, "1aa"}])). + +should_extract_subtree()-> + TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual({[{"foo", {0, ["1"]}}],[]}, + couch_key_tree:get(TwoChildSibs, [{0, "1"}])). + +should_extract_subsubtree()-> + TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual({[{"bar", {1, ["1a", "1"]}}],[]}, + couch_key_tree:get(TwoChildSibs, [{1, "1a"}])). + +should_gather_non_existant_leaf()-> + TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual({[],[{0, "x"}]}, + couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "x"}])). + +should_gather_leaf()-> + TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual({[{"bar", {1, ["1a","1"]}}],[]}, + couch_key_tree:get_key_leafs(TwoChildSibs, [{1, "1a"}])). + +shoul_gather_multiple_leaves()-> + TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual({[{"bar", {1, ["1a","1"]}},{"bar",{1, ["1b","1"]}}],[]}, + couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "1"}])). + +should_gather_single_leaf_for_multiple_revs() -> + OneChild = [{0, {"1","foo",[{"1a", "bar", []}]}}], + ToFind = [{0, "1"}, {1, "1a"}], + ?_assertEqual({[{"bar", {1, ["1a", "1"]}}],[]}, + couch_key_tree:get_key_leafs(OneChild, ToFind)). + +should_gather_multiple_for_multiple_revs() -> + TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ToFind = [{0, "1"}, {1, "1a"}], + ?_assertEqual({[{"bar", {1, ["1a","1"]}},{"bar",{1, ["1b","1"]}}],[]}, + couch_key_tree:get_key_leafs(TwoChildSibs, ToFind)). + +should_retrieve_full_key_path()-> + TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual({[{0,[{"1", "foo"}]}],[]}, + couch_key_tree:get_full_key_paths(TwoChildSibs, [{0, "1"}])). + +should_retrieve_full_key_path_for_node()-> + TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual({[{1,[{"1a", "bar"},{"1", "foo"}]}],[]}, + couch_key_tree:get_full_key_paths(TwoChildSibs, [{1, "1a"}])). + +should_retrieve_leaves_with_parent_node()-> + Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}], + TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + [ + ?_assertEqual([{2, [{"1aa", "bar"},{"1a", "bar"}]}], + couch_key_tree:get_all_leafs_full(Stemmed)), + ?_assertEqual([{1, [{"1a", "bar"},{"1", "foo"}]}, + {1, [{"1b", "bar"},{"1", "foo"}]}], + couch_key_tree:get_all_leafs_full(TwoChildSibs)) + ]. + +should_retrieve_all_leaves()-> + Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}], + TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + [ + ?_assertEqual([{"bar", {2, ["1aa","1a"]}}], + couch_key_tree:get_all_leafs(Stemmed)), + ?_assertEqual([{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b","1"]}}], + couch_key_tree:get_all_leafs(TwoChildSibs)) + ]. + +should_have_no_leaves_for_empty_tree()-> + ?_assertEqual(0, couch_key_tree:count_leafs([])). + +should_have_single_leaf_for_tree_with_single_node()-> + ?_assertEqual(1, couch_key_tree:count_leafs([{0, {"1","foo",[]}}])). + +should_have_two_leaves_for_tree_with_chindler_siblings()-> + TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}], + ?_assertEqual(2, couch_key_tree:count_leafs(TwoChildSibs)). + +should_not_affect_on_leaf_counting_for_stemmed_tree()-> + ?_assertEqual(1, couch_key_tree:count_leafs([{2, {"1bb", "boo", []}}])). + +should_have_no_effect_for_stemming_more_levels_than_exists()-> + TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}], + ?_assertEqual(TwoChild, couch_key_tree:stem(TwoChild, 3)). + +should_return_one_deepest_node()-> + TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}], + Stemmed = [{2, {"1aa", "bar", []}}], + ?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 1)). + +should_return_two_deepest_nodes()-> + TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}], + Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}], + ?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 2)). + + +merge_and_stem(RevTree, Tree) -> + {Merged, Result} = couch_key_tree:merge(RevTree, Tree), + {couch_key_tree:stem(Merged, ?DEPTH), Result}. diff --git a/src/couch/test/eunit/couch_passwords_tests.erl b/src/couch/test/eunit/couch_passwords_tests.erl new file mode 100644 index 000000000..88de8530f --- /dev/null +++ b/src/couch/test/eunit/couch_passwords_tests.erl @@ -0,0 +1,54 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_passwords_tests). + +-include_lib("couch/include/couch_eunit.hrl"). + + +pbkdf2_test_()-> + {"PBKDF2", + [ + {"Iterations: 1, length: 20", + ?_assertEqual( + {ok, <<"0c60c80f961f0e71f3a9b524af6012062fe037a6">>}, + couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 1, 20))}, + + {"Iterations: 2, length: 20", + ?_assertEqual( + {ok, <<"ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957">>}, + couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 2, 20))}, + + {"Iterations: 4096, length: 20", + ?_assertEqual( + {ok, <<"4b007901b765489abead49d926f721d065a429c1">>}, + couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 4096, 20))}, + + {"Iterations: 4096, length: 25", + ?_assertEqual( + {ok, <<"3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038">>}, + couch_passwords:pbkdf2(<<"passwordPASSWORDpassword">>, + <<"saltSALTsaltSALTsaltSALTsaltSALTsalt">>, + 4096, 25))}, + {"Null byte", + ?_assertEqual( + {ok, <<"56fa6aa75548099dcc37d7f03425e0c3">>}, + couch_passwords:pbkdf2(<<"pass\0word">>, + <<"sa\0lt">>, + 4096, 16))}, + + {timeout, 600, %% this may runs too long on slow hosts + {"Iterations: 16777216 - this may take some time", + ?_assertEqual( + {ok, <<"eefe3d61cd4da4e4e9945b3d6ba2158c2634e984">>}, + couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 16777216, 20) + )}}]}. diff --git a/src/couch/test/eunit/couch_query_servers_tests.erl b/src/couch/test/eunit/couch_query_servers_tests.erl new file mode 100644 index 000000000..f8df896c4 --- /dev/null +++ b/src/couch/test/eunit/couch_query_servers_tests.erl @@ -0,0 +1,95 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_query_servers_tests). + +-include_lib("couch/include/couch_eunit.hrl"). + + +setup() -> + meck:new([config, couch_log]). + + +teardown(_) -> + meck:unload(). + + +sum_overflow_test_() -> + { + "Test overflow detection in the _sum reduce function", + { + setup, + fun setup/0, + fun teardown/1, + [ + fun should_return_error_on_overflow/0, + fun should_return_object_on_log/0, + fun should_return_object_on_false/0 + ] + } + }. + + +should_return_error_on_overflow() -> + meck:reset([config, couch_log]), + meck:expect( + config, get, ["query_server_config", "reduce_limit", "true"], + "true" + ), + meck:expect(couch_log, error, ['_', '_'], ok), + KVs = gen_sum_kvs(), + {ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs), + ?assertMatch({[{<<"error">>, <<"builtin_reduce_error">>} | _]}, Result), + ?assert(meck:called(config, get, '_')), + ?assert(meck:called(couch_log, error, '_')). + + +should_return_object_on_log() -> + meck:reset([config, couch_log]), + meck:expect( + config, get, ["query_server_config", "reduce_limit", "true"], + "log" + ), + meck:expect(couch_log, error, ['_', '_'], ok), + KVs = gen_sum_kvs(), + {ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs), + ?assertMatch({[_ | _]}, Result), + Keys = [K || {K, _} <- element(1, Result)], + ?assert(not lists:member(<<"error">>, Keys)), + ?assert(meck:called(config, get, '_')), + ?assert(meck:called(couch_log, error, '_')). + + +should_return_object_on_false() -> + meck:reset([config, couch_log]), + meck:expect( + config, get, ["query_server_config", "reduce_limit", "true"], + "false" + ), + meck:expect(couch_log, error, ['_', '_'], ok), + KVs = gen_sum_kvs(), + {ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs), + ?assertMatch({[_ | _]}, Result), + Keys = [K || {K, _} <- element(1, Result)], + ?assert(not lists:member(<<"error">>, Keys)), + ?assert(meck:called(config, get, '_')), + ?assertNot(meck:called(couch_log, error, '_')). + + +gen_sum_kvs() -> + lists:map(fun(I) -> + Props = lists:map(fun(_) -> + K = couch_util:encodeBase64Url(crypto:strong_rand_bytes(16)), + {K, 1} + end, lists:seq(1, 20)), + [I, {Props}] + end, lists:seq(1, 10)). diff --git a/src/couch/test/eunit/couch_server_tests.erl b/src/couch/test/eunit/couch_server_tests.erl new file mode 100644 index 000000000..530b7efd0 --- /dev/null +++ b/src/couch/test/eunit/couch_server_tests.erl @@ -0,0 +1,310 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_server_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include("../src/couch_server_int.hrl"). + +start() -> + Ctx = test_util:start_couch(), + config:set("log", "include_sasl", "false", false), + Ctx. + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, []), + Db. + +setup(rename) -> + config:set("couchdb", "enable_database_recovery", "true", false), + setup(); +setup(_) -> + setup(). + +teardown(Db) -> + FilePath = couch_db:get_filepath(Db), + (catch couch_db:close(Db)), + (catch file:delete(FilePath)). + +teardown(rename, Db) -> + config:set("couchdb", "enable_database_recovery", "false", false), + teardown(Db); +teardown(_, Db) -> + teardown(Db). + + +delete_db_test_() -> + { + "Test for proper deletion of db file", + { + setup, + fun start/0, fun test_util:stop/1, + [ + make_test_case(rename, [fun should_rename_on_delete/2]), + make_test_case(delete, [fun should_delete/2]) + ] + } + }. + +make_test_case(Mod, Funs) -> + { + lists:flatten(io_lib:format("~s", [Mod])), + {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]} + }. + +should_rename_on_delete(_, Db) -> + DbName = couch_db:name(Db), + Origin = couch_db:get_filepath(Db), + ?_test(begin + ?assert(filelib:is_regular(Origin)), + ?assertMatch(ok, couch_server:delete(DbName, [])), + ?assertNot(filelib:is_regular(Origin)), + DeletedFiles = deleted_files(Origin), + ?assertMatch([_], DeletedFiles), + [Renamed] = DeletedFiles, + ?assertEqual( + filename:extension(Origin), filename:extension(Renamed)), + ?assert(filelib:is_regular(Renamed)) + end). + +should_delete(_, Db) -> + DbName = couch_db:name(Db), + Origin = couch_db:get_filepath(Db), + ?_test(begin + ?assert(filelib:is_regular(Origin)), + ?assertMatch(ok, couch_server:delete(DbName, [])), + ?assertNot(filelib:is_regular(Origin)), + ?assertMatch([], deleted_files(Origin)) + end). + +deleted_files(ViewFile) -> + filelib:wildcard(filename:rootname(ViewFile) ++ "*.deleted.*"). + + +bad_engine_option_test_() -> + { + setup, + fun start/0, + fun test_util:stop/1, + [ + fun t_bad_engine_option/0 + ] + }. + + +t_bad_engine_option() -> + Resp = couch_server:create(?tempdb(), [{engine, <<"cowabunga!">>}]), + ?assertEqual(Resp, {error, {invalid_engine_extension, <<"cowabunga!">>}}). + + +get_engine_path_test_() -> + { + setup, + fun start/0, fun test_util:stop/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_return_engine_path/1, + fun should_return_invalid_engine_error/1 + ] + } + }. + + +should_return_engine_path(Db) -> + DbName = couch_db:name(Db), + Engine = couch_db_engine:get_engine(Db), + Resp = couch_server:get_engine_path(DbName, Engine), + FilePath = couch_db:get_filepath(Db), + ?_assertMatch({ok, FilePath}, Resp). + + +should_return_invalid_engine_error(Db) -> + DbName = couch_db:name(Db), + Engine = fake_engine, + Resp = couch_server:get_engine_path(DbName, Engine), + ?_assertMatch({error, {invalid_engine, Engine}}, Resp). + + +interleaved_requests_test_() -> + { + setup, + fun start_interleaved/0, + fun stop_interleaved/1, + fun make_interleaved_requests/1 + }. + + +start_interleaved() -> + TestDbName = ?tempdb(), + meck:new(couch_db, [passthrough]), + meck:expect(couch_db, start_link, fun(Engine, DbName, Filename, Options) -> + case DbName of + TestDbName -> + receive + go -> ok + end, + Res = meck:passthrough([Engine, DbName, Filename, Options]), + % We're unlinking and sending a delayed + % EXIT signal so that we can mimic a specific + % message order in couch_server. On a test machine + % this is a big race condition which affects the + % ability to induce the bug. + case Res of + {ok, Db} -> + DbPid = couch_db:get_pid(Db), + unlink(DbPid), + Msg = {'EXIT', DbPid, killed}, + erlang:send_after(2000, whereis(couch_server), Msg); + _ -> + ok + end, + Res; + _ -> + meck:passthrough([Engine, DbName, Filename, Options]) + end + end), + {test_util:start_couch(), TestDbName}. + + +stop_interleaved({Ctx, TestDbName}) -> + couch_server:delete(TestDbName, [?ADMIN_CTX]), + meck:unload(), + test_util:stop_couch(Ctx). + + +make_interleaved_requests({_, TestDbName}) -> + [ + fun() -> t_interleaved_create_delete_open(TestDbName) end + ]. + + +t_interleaved_create_delete_open(DbName) -> + {CrtRef, DelRef, OpenRef} = {make_ref(), make_ref(), make_ref()}, + CrtMsg = {'$gen_call', {self(), CrtRef}, {create, DbName, [?ADMIN_CTX]}}, + DelMsg = {'$gen_call', {self(), DelRef}, {delete, DbName, [?ADMIN_CTX]}}, + OpenMsg = {'$gen_call', {self(), OpenRef}, {open, DbName, [?ADMIN_CTX]}}, + + % Get the current couch_server pid so we're sure + % to not end up messaging two different pids + CouchServer = whereis(couch_server), + + % Start our first instance that will succeed in + % an invalid state. Notice that the opener pid + % spawned by couch_server:open_async/5 will halt + % in our meck expect function waiting for a message. + % + % We're using raw message passing here so that we don't + % have to coordinate multiple processes for this test. + CouchServer ! CrtMsg, + {ok, Opener} = get_opener_pid(DbName), + + % We have to suspend couch_server so that we can enqueue + % our next requests and let the opener finish processing. + erlang:suspend_process(CouchServer), + + % Since couch_server is suspend, this delete request won't + % be processed until after the opener has sent its + % successful open response via gen_server:call/3 + CouchServer ! DelMsg, + + % This open request will be in the queue after the + % delete request but before the gen_server:call/3 + % message which will establish the mixed up state + % in the couch_dbs ets table + CouchServer ! OpenMsg, + + % First release the opener pid so it can continue + % working while we tweak meck + Opener ! go, + + % Replace our expect call to meck so that the OpenMsg + % isn't blocked on the receive + meck:expect(couch_db, start_link, fun(Engine, DbName1, Filename, Options) -> + meck:passthrough([Engine, DbName1, Filename, Options]) + end), + + % Wait for the '$gen_call' message from OpenerPid to arrive + % in couch_server's mailbox + ok = wait_for_open_async_result(CouchServer, Opener), + + % Now monitor and resume the couch_server and assert that + % couch_server does not crash while processing OpenMsg + CSRef = erlang:monitor(process, CouchServer), + erlang:resume_process(CouchServer), + check_monitor_not_triggered(CSRef), + + % The create response is expected to return not_found + % due to the delete request canceling the async opener + % pid and sending not_found to all waiters unconditionally + ?assertEqual({CrtRef, not_found}, get_next_message()), + + % Our delete request was processed normally + ?assertEqual({DelRef, ok}, get_next_message()), + + % The db was deleted thus it should be not found + % when we try and open it. + ?assertMatch({OpenRef, {not_found, no_db_file}}, get_next_message()), + + % And finally assert that couch_server is still + % alive. + ?assert(is_process_alive(CouchServer)), + check_monitor_not_triggered(CSRef). + + +get_opener_pid(DbName) -> + WaitFun = fun() -> + case ets:lookup(couch_dbs, DbName) of + [#entry{pid = Pid}] -> + {ok, Pid}; + [] -> + wait + end + end, + test_util:wait(WaitFun). + + +wait_for_open_async_result(CouchServer, Opener) -> + WaitFun = fun() -> + {_, Messages} = erlang:process_info(CouchServer, messages), + Found = lists:foldl(fun(Msg, Acc) -> + case Msg of + {'$gen_call', {Opener, _}, {open_result, _, _, {ok, _}}} -> + true; + _ -> + Acc + end + end, false, Messages), + if Found -> ok; true -> wait end + end, + test_util:wait(WaitFun). + + +check_monitor_not_triggered(Ref) -> + receive + {'DOWN', Ref, _, _, Reason0} -> + erlang:error({monitor_triggered, Reason0}) + after 100 -> + ok + end. + + +get_next_message() -> + receive + Msg -> + Msg + after 5000 -> + erlang:error(timeout) + end. diff --git a/src/couch/test/eunit/couch_stream_tests.erl b/src/couch/test/eunit/couch_stream_tests.erl new file mode 100644 index 000000000..a7fedf0af --- /dev/null +++ b/src/couch/test/eunit/couch_stream_tests.erl @@ -0,0 +1,124 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_stream_tests). + +-include_lib("couch/include/couch_eunit.hrl"). + +-define(ENGINE(FdVar), {couch_bt_engine_stream, {FdVar, []}}). + +setup() -> + {ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]), + {ok, Stream} = couch_stream:open(?ENGINE(Fd), []), + {Fd, Stream}. + +teardown({Fd, _}) -> + ok = couch_file:close(Fd). + + +stream_test_() -> + { + "CouchDB stream tests", + { + setup, + fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_write/1, + fun should_write_consecutive/1, + fun should_write_empty_binary/1, + fun should_return_file_pointers_on_close/1, + fun should_return_stream_size_on_close/1, + fun should_return_valid_pointers/1, + fun should_recall_last_pointer_position/1, + fun should_stream_more_with_4K_chunk_size/1, + fun should_stop_on_normal_exit_of_stream_opener/1 + ] + } + } + }. + + +should_write({_, Stream}) -> + ?_assertEqual(ok, couch_stream:write(Stream, <<"food">>)). + +should_write_consecutive({_, Stream}) -> + couch_stream:write(Stream, <<"food">>), + ?_assertEqual(ok, couch_stream:write(Stream, <<"foob">>)). + +should_write_empty_binary({_, Stream}) -> + ?_assertEqual(ok, couch_stream:write(Stream, <<>>)). + +should_return_file_pointers_on_close({_, Stream}) -> + couch_stream:write(Stream, <<"foodfoob">>), + {NewEngine, _, _, _, _} = couch_stream:close(Stream), + {ok, Ptrs} = couch_stream:to_disk_term(NewEngine), + ?_assertEqual([{0, 8}], Ptrs). + +should_return_stream_size_on_close({_, Stream}) -> + couch_stream:write(Stream, <<"foodfoob">>), + {_, Length, _, _, _} = couch_stream:close(Stream), + ?_assertEqual(8, Length). + +should_return_valid_pointers({_Fd, Stream}) -> + couch_stream:write(Stream, <<"foodfoob">>), + {NewEngine, _, _, _, _} = couch_stream:close(Stream), + ?_assertEqual(<<"foodfoob">>, read_all(NewEngine)). + +should_recall_last_pointer_position({Fd, Stream}) -> + couch_stream:write(Stream, <<"foodfoob">>), + {_, _, _, _, _} = couch_stream:close(Stream), + {ok, ExpPtr} = couch_file:bytes(Fd), + {ok, Stream2} = couch_stream:open(?ENGINE(Fd)), + ZeroBits = <<0:(8 * 10)>>, + OneBits = <<1:(8 * 10)>>, + ok = couch_stream:write(Stream2, OneBits), + ok = couch_stream:write(Stream2, ZeroBits), + {NewEngine, 20, _, _, _} = couch_stream:close(Stream2), + {ok, Ptrs} = couch_stream:to_disk_term(NewEngine), + [{ExpPtr, 20}] = Ptrs, + AllBits = iolist_to_binary([OneBits, ZeroBits]), + ?_assertEqual(AllBits, read_all(NewEngine)). + +should_stream_more_with_4K_chunk_size({Fd, _}) -> + {ok, Stream} = couch_stream:open(?ENGINE(Fd), [{buffer_size, 4096}]), + lists:foldl( + fun(_, Acc) -> + Data = <<"a1b2c">>, + couch_stream:write(Stream, Data), + [Data | Acc] + end, [], lists:seq(1, 1024)), + {NewEngine, Length, _, _, _} = couch_stream:close(Stream), + {ok, Ptrs} = couch_stream:to_disk_term(NewEngine), + ?_assertMatch({[{0, 4100}, {4106, 1020}], 5120}, {Ptrs, Length}). + +should_stop_on_normal_exit_of_stream_opener({Fd, _}) -> + RunnerPid = self(), + OpenerPid = spawn( + fun() -> + {ok, StreamPid} = couch_stream:open(?ENGINE(Fd)), + RunnerPid ! {pid, StreamPid} + end), + StreamPid = receive + {pid, StreamPid0} -> StreamPid0 + end, + % Confirm the validity of the test by verifying the stream opener has died + ?assertNot(is_process_alive(OpenerPid)), + % Verify the stream itself has also died + ?_assertNot(is_process_alive(StreamPid)). + + +read_all(Engine) -> + Data = couch_stream:foldl(Engine, fun(Bin, Acc) -> [Bin, Acc] end, []), + iolist_to_binary(Data). diff --git a/src/couch/test/eunit/couch_task_status_tests.erl b/src/couch/test/eunit/couch_task_status_tests.erl new file mode 100644 index 000000000..0ec03563b --- /dev/null +++ b/src/couch/test/eunit/couch_task_status_tests.erl @@ -0,0 +1,233 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_task_status_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 1000). + + +setup() -> + Ctx = test_util:start(?MODULE, [couch_log], [{dont_mock, [config]}]), + {ok, TaskStatusPid} = couch_task_status:start_link(), + TaskUpdaterPid = spawn(fun() -> loop() end), + {TaskStatusPid, TaskUpdaterPid, Ctx}. + + +teardown({TaskStatusPid, _, Ctx})-> + test_util:stop_sync_throw(TaskStatusPid, fun() -> + couch_task_status:stop() + end, timeout_error, ?TIMEOUT), + test_util:stop(Ctx). + + +couch_task_status_test_() -> + { + "CouchDB task status updates", + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_register_task/1, + fun should_set_task_startup_time/1, + fun should_have_update_time_as_startup_before_any_progress/1, + fun should_set_task_type/1, + fun should_not_register_multiple_tasks_for_same_pid/1, + fun should_set_task_progress/1, + fun should_update_task_progress/1, + fun should_update_time_changes_on_task_progress/1, + %% fun should_control_update_frequency/1, + fun should_reset_control_update_frequency/1, + fun should_track_multiple_tasks/1, + fun should_finish_task/1 + + ] + } + }. + + +should_register_task({_, Pid, _Ctx}) -> + ok = call(Pid, add, [{type, replication}, {progress, 0}]), + ?_assertEqual(1, length(couch_task_status:all())). + +should_set_task_startup_time({_, Pid, _Ctx}) -> + ok = call(Pid, add, [{type, replication}, {progress, 0}]), + ?_assert(is_integer(get_task_prop(Pid, started_on))). + +should_have_update_time_as_startup_before_any_progress({_, Pid, _Ctx}) -> + ok = call(Pid, add, [{type, replication}, {progress, 0}]), + StartTime = get_task_prop(Pid, started_on), + ?_assertEqual(StartTime, get_task_prop(Pid, updated_on)). + +should_set_task_type({_, Pid, _Ctx}) -> + ok = call(Pid, add, [{type, replication}, {progress, 0}]), + ?_assertEqual(replication, get_task_prop(Pid, type)). + +should_not_register_multiple_tasks_for_same_pid({_, Pid, _Ctx}) -> + ok = call(Pid, add, [{type, replication}, {progress, 0}]), + ?_assertEqual({add_task_error, already_registered}, + call(Pid, add, [{type, compaction}, {progress, 0}])). + +should_set_task_progress({_, Pid, _Ctx}) -> + ok = call(Pid, add, [{type, replication}, {progress, 0}]), + ?_assertEqual(0, get_task_prop(Pid, progress)). + +should_update_task_progress({_, Pid, _Ctx}) -> + ok = call(Pid, add, [{type, replication}, {progress, 0}]), + call(Pid, update, [{progress, 25}]), + ?_assertEqual(25, get_task_prop(Pid, progress)). + +should_update_time_changes_on_task_progress({_, Pid, _Ctx}) -> + ?_assert( + begin + ok = call(Pid, add, [{type, replication}, {progress, 0}]), + ok = timer:sleep(1000), % sleep awhile to customize update time + call(Pid, update, [{progress, 25}]), + get_task_prop(Pid, updated_on) > get_task_prop(Pid, started_on) + end). + +%%should_control_update_frequency({_, Pid, _Ctx}) -> +%% ?_assertEqual(66, +%% begin +%% ok = call(Pid, add, [{type, replication}, {progress, 0}]), +%% call(Pid, update, [{progress, 50}]), +%% call(Pid, update_frequency, 500), +%% call(Pid, update, [{progress, 66}]), +%% call(Pid, update, [{progress, 77}]), +%% get_task_prop(Pid, progress) +%% end). + +should_reset_control_update_frequency({_, Pid, _Ctx}) -> + ?_assertEqual(87, + begin + ok = call(Pid, add, [{type, replication}, {progress, 0}]), + call(Pid, update, [{progress, 50}]), + call(Pid, update_frequency, 500), + call(Pid, update, [{progress, 66}]), + call(Pid, update, [{progress, 77}]), + call(Pid, update_frequency, 0), + call(Pid, update, [{progress, 87}]), + get_task_prop(Pid, progress) + end). + +should_track_multiple_tasks(_) -> + ?_assert(run_multiple_tasks()). + +should_finish_task({_, Pid, _Ctx}) -> + ok = call(Pid, add, [{type, replication}, {progress, 0}]), + ?assertEqual(1, length(couch_task_status:all())), + ok = call(Pid, done), + ?_assertEqual(0, length(couch_task_status:all())). + + +run_multiple_tasks() -> + Pid1 = spawn(fun() -> loop() end), + Pid2 = spawn(fun() -> loop() end), + Pid3 = spawn(fun() -> loop() end), + call(Pid1, add, [{type, replication}, {progress, 0}]), + call(Pid2, add, [{type, compaction}, {progress, 0}]), + call(Pid3, add, [{type, indexer}, {progress, 0}]), + + ?assertEqual(3, length(couch_task_status:all())), + ?assertEqual(replication, get_task_prop(Pid1, type)), + ?assertEqual(compaction, get_task_prop(Pid2, type)), + ?assertEqual(indexer, get_task_prop(Pid3, type)), + + call(Pid2, update, [{progress, 33}]), + call(Pid3, update, [{progress, 42}]), + call(Pid1, update, [{progress, 11}]), + ?assertEqual(42, get_task_prop(Pid3, progress)), + call(Pid1, update, [{progress, 72}]), + ?assertEqual(72, get_task_prop(Pid1, progress)), + ?assertEqual(33, get_task_prop(Pid2, progress)), + + call(Pid1, done), + ?assertEqual(2, length(couch_task_status:all())), + call(Pid3, done), + ?assertEqual(1, length(couch_task_status:all())), + call(Pid2, done), + ?assertEqual(0, length(couch_task_status:all())), + + true. + + +loop() -> + receive + {add, Props, From} -> + Resp = couch_task_status:add_task(Props), + From ! {ok, self(), Resp}, + loop(); + {update, Props, From} -> + Resp = couch_task_status:update(Props), + From ! {ok, self(), Resp}, + loop(); + {update_frequency, Msecs, From} -> + Resp = couch_task_status:set_update_frequency(Msecs), + From ! {ok, self(), Resp}, + loop(); + {done, From} -> + From ! {ok, self(), ok} + end. + +call(Pid, done) -> + Ref = erlang:monitor(process, Pid), + Pid ! {done, self()}, + Res = wait(Pid), + receive + {'DOWN', Ref, _Type, Pid, _Info} -> + Res + after ?TIMEOUT -> + throw(timeout_error) + end; +call(Pid, Command) -> + Pid ! {Command, self()}, + wait(Pid). + +call(Pid, Command, Arg) -> + Pid ! {Command, Arg, self()}, + wait(Pid). + +wait(Pid) -> + receive + {ok, Pid, Msg} -> + Msg + after ?TIMEOUT -> + throw(timeout_error) + end. + +get_task_prop(Pid, Prop) -> + From = list_to_binary(pid_to_list(Pid)), + Element = lists:foldl( + fun(PropList, Acc) -> + case couch_util:get_value(pid, PropList) of + From -> + [PropList | Acc]; + _ -> + Acc + end + end, + [], couch_task_status:all() + ), + case couch_util:get_value(Prop, hd(Element), nil) of + nil -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, "Could not get property '" + ++ couch_util:to_list(Prop) + ++ "' for task " + ++ pid_to_list(Pid)}]}); + Value -> + Value + end. diff --git a/src/couch/test/eunit/couch_totp_tests.erl b/src/couch/test/eunit/couch_totp_tests.erl new file mode 100644 index 000000000..6817a092a --- /dev/null +++ b/src/couch/test/eunit/couch_totp_tests.erl @@ -0,0 +1,55 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_totp_tests). + +-include_lib("eunit/include/eunit.hrl"). + +totp_sha_test() -> + Key = <<"12345678901234567890">>, + ?assertEqual(94287082, couch_totp:generate(sha, Key, 59, 30, 8)), + ?assertEqual(07081804, couch_totp:generate(sha, Key, 1111111109, 30, 8)), + ?assertEqual(14050471, couch_totp:generate(sha, Key, 1111111111, 30, 8)), + ?assertEqual(89005924, couch_totp:generate(sha, Key, 1234567890, 30, 8)), + ?assertEqual(69279037, couch_totp:generate(sha, Key, 2000000000, 30, 8)), + ?assertEqual(65353130, couch_totp:generate(sha, Key, 20000000000, 30, 8)). + +totp_sha256_test() -> + Key = <<"12345678901234567890123456789012">>, + case sha_256_512_supported() of + true -> + ?assertEqual(46119246, couch_totp:generate(sha256, Key, 59, 30, 8)), + ?assertEqual(68084774, couch_totp:generate(sha256, Key, 1111111109, 30, 8)), + ?assertEqual(67062674, couch_totp:generate(sha256, Key, 1111111111, 30, 8)), + ?assertEqual(91819424, couch_totp:generate(sha256, Key, 1234567890, 30, 8)), + ?assertEqual(90698825, couch_totp:generate(sha256, Key, 2000000000, 30, 8)), + ?assertEqual(77737706, couch_totp:generate(sha256, Key, 20000000000, 30, 8)); + false -> + ?debugMsg("sha256 not supported, tests skipped") + end. + +totp_sha512_test() -> + Key = <<"1234567890123456789012345678901234567890123456789012345678901234">>, + case sha_256_512_supported() of + true -> + ?assertEqual(90693936, couch_totp:generate(sha512, Key, 59, 30, 8)), + ?assertEqual(25091201, couch_totp:generate(sha512, Key, 1111111109, 30, 8)), + ?assertEqual(99943326, couch_totp:generate(sha512, Key, 1111111111, 30, 8)), + ?assertEqual(93441116, couch_totp:generate(sha512, Key, 1234567890, 30, 8)), + ?assertEqual(38618901, couch_totp:generate(sha512, Key, 2000000000, 30, 8)), + ?assertEqual(47863826, couch_totp:generate(sha512, Key, 20000000000, 30, 8)); + false -> + ?debugMsg("sha512 not supported, tests skipped") + end. + +sha_256_512_supported() -> + erlang:function_exported(crypto, hmac, 3). diff --git a/src/couch/test/eunit/couch_util_tests.erl b/src/couch/test/eunit/couch_util_tests.erl new file mode 100644 index 000000000..3e145c4f6 --- /dev/null +++ b/src/couch/test/eunit/couch_util_tests.erl @@ -0,0 +1,170 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_util_tests). + +-include_lib("couch/include/couch_eunit.hrl"). + + +setup() -> + %% We cannot start driver from here since it becomes bounded to eunit + %% master process and the next couch_server_sup:start_link call will + %% fail because server couldn't load driver since it already is. + %% + %% On other hand, we cannot unload driver here due to + %% {error, not_loaded_by_this_process} while it is. Any ideas is welcome. + %% + Ctx = test_util:start_couch(), + %% config:start_link(?CONFIG_CHAIN), + %% {ok, _} = couch_drv:start_link(), + Ctx. + +teardown(Ctx) -> + ok = test_util:stop_couch(Ctx), + %% config:stop(), + %% erl_ddll:unload_driver(couch_icu_driver), + ok. + + +collation_test_() -> + { + "Collation tests", + [ + { + setup, + fun setup/0, fun teardown/1, + [ + should_collate_ascii(), + should_collate_non_ascii() + ] + } + ] + }. + +validate_callback_exists_test_() -> + { + "validate_callback_exists tests", + [ + fun should_succeed_for_existent_cb/0, + should_fail_for_missing_cb() + ] + }. + +should_collate_ascii() -> + ?_assertEqual(1, couch_util:collate(<<"foo">>, <<"bar">>)). + +should_collate_non_ascii() -> + ?_assertEqual(-1, couch_util:collate(<<"A">>, <<"aa">>)). + +to_existed_atom_test() -> + ?assert(couch_util:to_existing_atom(true)), + ?assertMatch(foo, couch_util:to_existing_atom(<<"foo">>)), + ?assertMatch(foobarbaz, couch_util:to_existing_atom("foobarbaz")). + +implode_test() -> + ?assertEqual([1, 38, 2, 38, 3], couch_util:implode([1, 2, 3], "&")). + +trim_test() -> + lists:map(fun(S) -> ?assertEqual("foo", couch_util:trim(S)) end, + [" foo", "foo ", "\tfoo", " foo ", "foo\t", "foo\n", "\nfoo"]). + +abs_pathname_test() -> + {ok, Cwd} = file:get_cwd(), + ?assertEqual(Cwd ++ "/foo", couch_util:abs_pathname("./foo")). + +flush_test() -> + ?assertNot(couch_util:should_flush()), + AcquireMem = fun() -> + _IntsToAGazillion = lists:seq(1, 200000), + _LotsOfData = lists:map(fun(_) -> <<"foobar">> end, + lists:seq(1, 500000)), + _ = list_to_binary(_LotsOfData), + + %% Allocation 200K tuples puts us above the memory threshold + %% Originally, there should be: + %% ?assertNot(should_flush()) + %% however, unlike for etap test, GC collects all allocated bits + %% making this conditions fail. So we have to invert the condition + %% since GC works, cleans the memory and everything is fine. + ?assertNot(couch_util:should_flush()) + end, + AcquireMem(), + + %% Checking to flush invokes GC + ?assertNot(couch_util:should_flush()). + +verify_test() -> + ?assert(couch_util:verify("It4Vooya", "It4Vooya")), + ?assertNot(couch_util:verify("It4VooyaX", "It4Vooya")), + ?assert(couch_util:verify(<<"ahBase3r">>, <<"ahBase3r">>)), + ?assertNot(couch_util:verify(<<"ahBase3rX">>, <<"ahBase3r">>)), + ?assertNot(couch_util:verify(nil, <<"ahBase3r">>)). + +find_in_binary_test_() -> + Cases = [ + {<<"foo">>, <<"foobar">>, {exact, 0}}, + {<<"foo">>, <<"foofoo">>, {exact, 0}}, + {<<"foo">>, <<"barfoo">>, {exact, 3}}, + {<<"foo">>, <<"barfo">>, {partial, 3}}, + {<<"f">>, <<"fobarfff">>, {exact, 0}}, + {<<"f">>, <<"obarfff">>, {exact, 4}}, + {<<"f">>, <<"obarggf">>, {exact, 6}}, + {<<"f">>, <<"f">>, {exact, 0}}, + {<<"f">>, <<"g">>, not_found}, + {<<"foo">>, <<"f">>, {partial, 0}}, + {<<"foo">>, <<"g">>, not_found}, + {<<"foo">>, <<"">>, not_found}, + {<<"fofo">>, <<"foofo">>, {partial, 3}}, + {<<"foo">>, <<"gfobarfo">>, {partial, 6}}, + {<<"foo">>, <<"gfobarf">>, {partial, 6}}, + {<<"foo">>, <<"gfobar">>, not_found}, + {<<"fog">>, <<"gbarfogquiz">>, {exact, 4}}, + {<<"ggg">>, <<"ggg">>, {exact, 0}}, + {<<"ggg">>, <<"ggggg">>, {exact, 0}}, + {<<"ggg">>, <<"bggg">>, {exact, 1}}, + {<<"ggg">>, <<"bbgg">>, {partial, 2}}, + {<<"ggg">>, <<"bbbg">>, {partial, 3}}, + {<<"ggg">>, <<"bgbggbggg">>, {exact, 6}}, + {<<"ggg">>, <<"bgbggb">>, not_found} + ], + lists:map( + fun({Needle, Haystack, Result}) -> + Msg = lists:flatten(io_lib:format("Looking for ~s in ~s", + [Needle, Haystack])), + {Msg, ?_assertMatch(Result, + couch_util:find_in_binary(Needle, Haystack))} + end, Cases). + +should_succeed_for_existent_cb() -> + ?_assert(couch_util:validate_callback_exists(lists, any, 2)). + +should_fail_for_missing_cb() -> + Cases = [ + {unknown_module, any, 1}, + {erlang, unknown_function, 1}, + {erlang, whereis, 100} + ], + lists:map( + fun({M, F, A} = MFA) -> + Name = lists:flatten(io_lib:format("~w:~w/~w", [M, F, A])), + {Name, ?_assertThrow( + {error, {undefined_callback, Name, MFA}}, + couch_util:validate_callback_exists(M, F, A))} + end, Cases). + +to_hex_test_() -> + [ + ?_assertEqual("", couch_util:to_hex([])), + ?_assertEqual("010203faff", couch_util:to_hex([1, 2, 3, 250, 255])), + ?_assertEqual("", couch_util:to_hex(<<>>)), + ?_assertEqual("010203faff", couch_util:to_hex(<<1, 2, 3, 250, 255>>)) + ]. diff --git a/src/couch/test/eunit/couch_uuids_tests.erl b/src/couch/test/eunit/couch_uuids_tests.erl new file mode 100644 index 000000000..a836eccc6 --- /dev/null +++ b/src/couch/test/eunit/couch_uuids_tests.erl @@ -0,0 +1,155 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_uuids_tests). + +-include_lib("couch/include/couch_eunit.hrl"). + +-define(TIMEOUT_S, 20). + + +setup() -> + Ctx = test_util:start(?MODULE, [], [{dont_mock, [config]}]), + couch_uuids:start(), + Ctx. + +setup(Opts) -> + Pid = setup(), + lists:foreach( + fun({Option, Value}) -> + config:set("uuids", Option, Value, false) + end, Opts), + Pid. + +teardown(Ctx) -> + couch_uuids:stop(), + test_util:stop(Ctx). + +teardown(_, Ctx) -> + teardown(Ctx). + + +default_test_() -> + { + "Default UUID algorithm", + { + setup, + fun setup/0, fun teardown/1, + fun should_be_unique/1 + } + }. + +sequential_test_() -> + Opts = [{"algorithm", "sequential"}], + Cases = [ + fun should_be_unique/2, + fun should_increment_monotonically/2, + fun should_rollover/2 + ], + { + "UUID algorithm: sequential", + { + foreachx, + fun setup/1, fun teardown/2, + [{Opts, Fun} || Fun <- Cases] + } + }. + +utc_test_() -> + Opts = [{"algorithm", "utc_random"}], + Cases = [ + fun should_be_unique/2, + fun should_increment_monotonically/2 + ], + { + "UUID algorithm: utc_random", + { + foreachx, + fun setup/1, fun teardown/2, + [{Opts, Fun} || Fun <- Cases] + } + }. + +utc_id_suffix_test_() -> + Opts = [{"algorithm", "utc_id"}, {"utc_id_suffix", "bozo"}], + Cases = [ + fun should_be_unique/2, + fun should_increment_monotonically/2, + fun should_preserve_suffix/2 + ], + { + "UUID algorithm: utc_id", + { + foreachx, + fun setup/1, fun teardown/2, + [{Opts, Fun} || Fun <- Cases] + } + }. + + +should_be_unique() -> + %% this one may really runs for too long on slow hosts + {timeout, ?TIMEOUT_S, ?_assert(test_unique(10000, [couch_uuids:new()]))}. +should_be_unique(_) -> + should_be_unique(). +should_be_unique(_, _) -> + should_be_unique(). + +should_increment_monotonically(_, _) -> + ?_assert(couch_uuids:new() < couch_uuids:new()). + +should_rollover(_, _) -> + ?_test(begin + UUID = binary_to_list(couch_uuids:new()), + Prefix = element(1, lists:split(26, UUID)), + N = gen_until_pref_change(Prefix, 0), + ?assert(N >= 5000 andalso N =< 11000) + end). + +should_preserve_suffix(_, _) -> + ?_test(begin + UUID = binary_to_list(couch_uuids:new()), + Suffix = get_suffix(UUID), + ?assert(test_same_suffix(10000, Suffix)) + end). + + +test_unique(0, _) -> + true; +test_unique(N, UUIDs) -> + UUID = couch_uuids:new(), + ?assertNot(lists:member(UUID, UUIDs)), + test_unique(N - 1, [UUID| UUIDs]). + +get_prefix(UUID) -> + element(1, lists:split(26, binary_to_list(UUID))). + +gen_until_pref_change(_, Count) when Count > 8251 -> + Count; +gen_until_pref_change(Prefix, N) -> + case get_prefix(couch_uuids:new()) of + Prefix -> gen_until_pref_change(Prefix, N + 1); + _ -> N + end. + +get_suffix(UUID) when is_binary(UUID) -> + get_suffix(binary_to_list(UUID)); +get_suffix(UUID) -> + element(2, lists:split(14, UUID)). + +test_same_suffix(0, _) -> + true; +test_same_suffix(N, Suffix) -> + case get_suffix(couch_uuids:new()) of + Suffix -> test_same_suffix(N - 1, Suffix); + _ -> false + end. diff --git a/src/couch/test/eunit/couch_work_queue_tests.erl b/src/couch/test/eunit/couch_work_queue_tests.erl new file mode 100644 index 000000000..a192230ef --- /dev/null +++ b/src/couch/test/eunit/couch_work_queue_tests.erl @@ -0,0 +1,402 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_work_queue_tests). + +-include_lib("couch/include/couch_eunit.hrl"). + +-define(TIMEOUT, 100). + + +setup(Opts) -> + {ok, Q} = couch_work_queue:new(Opts), + Producer = spawn_producer(Q), + Consumer = spawn_consumer(Q), + {Q, Producer, Consumer}. + +setup_max_items() -> + setup([{max_items, 3}]). + +setup_max_size() -> + setup([{max_size, 160}]). + +setup_max_items_and_size() -> + setup([{max_size, 160}, {max_items, 3}]). + +setup_multi_workers() -> + {Q, Producer, Consumer1} = setup([{max_size, 160}, + {max_items, 3}, + {multi_workers, true}]), + Consumer2 = spawn_consumer(Q), + Consumer3 = spawn_consumer(Q), + {Q, Producer, [Consumer1, Consumer2, Consumer3]}. + +teardown({Q, Producer, Consumers}) when is_list(Consumers) -> + % consume all to unblock and let producer/consumer stop without timeout + [consume(Consumer, all) || Consumer <- Consumers], + + ok = close_queue(Q), + ok = stop(Producer, "producer"), + R = [stop(Consumer, "consumer") || Consumer <- Consumers], + R = [ok || _ <- Consumers], + ok; +teardown({Q, Producer, Consumer}) -> + teardown({Q, Producer, [Consumer]}). + + +single_consumer_test_() -> + { + "Single producer and consumer", + [ + { + "Queue with 3 max items", + { + foreach, + fun setup_max_items/0, fun teardown/1, + single_consumer_max_item_count() ++ common_cases() + } + }, + { + "Queue with max size of 160 bytes", + { + foreach, + fun setup_max_size/0, fun teardown/1, + single_consumer_max_size() ++ common_cases() + } + }, + { + "Queue with max size of 160 bytes and 3 max items", + { + foreach, + fun setup_max_items_and_size/0, fun teardown/1, + single_consumer_max_items_and_size() ++ common_cases() + } + } + ] + }. + +multiple_consumers_test_() -> + { + "Single producer and multiple consumers", + [ + { + "Queue with max size of 160 bytes and 3 max items", + { + foreach, + fun setup_multi_workers/0, fun teardown/1, + common_cases() ++ multiple_consumers() + } + + } + ] + }. + +common_cases()-> + [ + fun should_block_consumer_on_dequeue_from_empty_queue/1, + fun should_consume_right_item/1, + fun should_timeout_on_close_non_empty_queue/1, + fun should_not_block_producer_for_non_empty_queue_after_close/1, + fun should_be_closed/1 + ]. + +single_consumer_max_item_count()-> + [ + fun should_have_no_items_for_new_queue/1, + fun should_block_producer_on_full_queue_count/1, + fun should_receive_first_queued_item/1, + fun should_consume_multiple_items/1, + fun should_consume_all/1 + ]. + +single_consumer_max_size()-> + [ + fun should_have_zero_size_for_new_queue/1, + fun should_block_producer_on_full_queue_size/1, + fun should_increase_queue_size_on_produce/1, + fun should_receive_first_queued_item/1, + fun should_consume_multiple_items/1, + fun should_consume_all/1 + ]. + +single_consumer_max_items_and_size() -> + single_consumer_max_item_count() ++ single_consumer_max_size(). + +multiple_consumers() -> + [ + fun should_have_zero_size_for_new_queue/1, + fun should_have_no_items_for_new_queue/1, + fun should_increase_queue_size_on_produce/1 + ]. + + +should_have_no_items_for_new_queue({Q, _, _}) -> + ?_assertEqual(0, couch_work_queue:item_count(Q)). + +should_have_zero_size_for_new_queue({Q, _, _}) -> + ?_assertEqual(0, couch_work_queue:size(Q)). + +should_block_consumer_on_dequeue_from_empty_queue({_, _, Consumers}) when is_list(Consumers) -> + [consume(C, 2) || C <- Consumers], + Pongs = [ping(C) || C <- Consumers], + ?_assertEqual([timeout, timeout, timeout], Pongs); +should_block_consumer_on_dequeue_from_empty_queue({_, _, Consumer}) -> + consume(Consumer, 1), + Pong = ping(Consumer), + ?_assertEqual(timeout, Pong). + +should_consume_right_item({Q, Producer, Consumers}) when is_list(Consumers) -> + [consume(C, 3) || C <- Consumers], + + Item1 = produce(Q, Producer, 10, false), + ok = ping(Producer), + ?assertEqual(0, couch_work_queue:item_count(Q)), + ?assertEqual(0, couch_work_queue:size(Q)), + + Item2 = produce(Q, Producer, 10, false), + ok = ping(Producer), + ?assertEqual(0, couch_work_queue:item_count(Q)), + ?assertEqual(0, couch_work_queue:size(Q)), + + Item3 = produce(Q, Producer, 10, false), + ok = ping(Producer), + ?assertEqual(0, couch_work_queue:item_count(Q)), + ?assertEqual(0, couch_work_queue:size(Q)), + + R = [{ping(C), Item} + || {C, Item} <- lists:zip(Consumers, [Item1, Item2, Item3])], + + ?_assertEqual([{ok, Item1}, {ok, Item2}, {ok, Item3}], R); +should_consume_right_item({Q, Producer, Consumer}) -> + consume(Consumer, 1), + Item = produce(Q, Producer, 10, false), + produce(Q, Producer, 20, true), + ok = ping(Producer), + ok = ping(Consumer), + {ok, Items} = last_consumer_items(Consumer), + ?_assertEqual([Item], Items). + +should_increase_queue_size_on_produce({Q, Producer, _}) -> + produce(Q, Producer, 50, true), + ok = ping(Producer), + Count1 = couch_work_queue:item_count(Q), + Size1 = couch_work_queue:size(Q), + + produce(Q, Producer, 10, true), + Count2 = couch_work_queue:item_count(Q), + Size2 = couch_work_queue:size(Q), + + ?_assertEqual([{Count1, Size1}, {Count2, Size2}], [{1, 50}, {2, 60}]). + +should_block_producer_on_full_queue_count({Q, Producer, _}) -> + produce(Q, Producer, 10, true), + ?assertEqual(1, couch_work_queue:item_count(Q)), + ok = ping(Producer), + + produce(Q, Producer, 15, true), + ?assertEqual(2, couch_work_queue:item_count(Q)), + ok = ping(Producer), + + produce(Q, Producer, 20, true), + ?assertEqual(3, couch_work_queue:item_count(Q)), + Pong = ping(Producer), + + ?_assertEqual(timeout, Pong). + +should_block_producer_on_full_queue_size({Q, Producer, _}) -> + produce(Q, Producer, 100, true), + ok = ping(Producer), + ?assertEqual(1, couch_work_queue:item_count(Q)), + ?assertEqual(100, couch_work_queue:size(Q)), + + produce(Q, Producer, 110, false), + Pong = ping(Producer), + ?assertEqual(2, couch_work_queue:item_count(Q)), + ?assertEqual(210, couch_work_queue:size(Q)), + + ?_assertEqual(timeout, Pong). + +should_consume_multiple_items({Q, Producer, Consumer}) -> + Item1 = produce(Q, Producer, 10, true), + ok = ping(Producer), + + Item2 = produce(Q, Producer, 15, true), + ok = ping(Producer), + + consume(Consumer, 2), + + {ok, Items} = last_consumer_items(Consumer), + ?_assertEqual([Item1, Item2], Items). + +should_receive_first_queued_item({Q, Producer, Consumer}) -> + consume(Consumer, 100), + timeout = ping(Consumer), + + Item = produce(Q, Producer, 11, false), + ok = ping(Producer), + + ok = ping(Consumer), + ?assertEqual(0, couch_work_queue:item_count(Q)), + + {ok, Items} = last_consumer_items(Consumer), + ?_assertEqual([Item], Items). + +should_consume_all({Q, Producer, Consumer}) -> + Item1 = produce(Q, Producer, 10, true), + Item2 = produce(Q, Producer, 15, true), + Item3 = produce(Q, Producer, 20, true), + + consume(Consumer, all), + + {ok, Items} = last_consumer_items(Consumer), + ?_assertEqual([Item1, Item2, Item3], Items). + +should_timeout_on_close_non_empty_queue({Q, Producer, _}) -> + produce(Q, Producer, 1, true), + Status = close_queue(Q), + + ?_assertEqual(timeout, Status). + +should_not_block_producer_for_non_empty_queue_after_close({Q, Producer, _}) -> + produce(Q, Producer, 1, true), + close_queue(Q), + Pong = ping(Producer), + Size = couch_work_queue:size(Q), + Count = couch_work_queue:item_count(Q), + + ?_assertEqual({ok, 1, 1}, {Pong, Size, Count}). + +should_be_closed({Q, _, Consumers}) when is_list(Consumers) -> + ok = close_queue(Q), + + [consume(C, 1) || C <- Consumers], + + LastConsumerItems = [last_consumer_items(C) || C <- Consumers], + ItemsCount = couch_work_queue:item_count(Q), + Size = couch_work_queue:size(Q), + + ?_assertEqual({[closed, closed, closed], closed, closed}, + {LastConsumerItems, ItemsCount, Size}); +should_be_closed({Q, _, Consumer}) -> + ok = close_queue(Q), + + consume(Consumer, 1), + + LastConsumerItems = last_consumer_items(Consumer), + ItemsCount = couch_work_queue:item_count(Q), + Size = couch_work_queue:size(Q), + + ?_assertEqual({closed, closed, closed}, + {LastConsumerItems, ItemsCount, Size}). + + +close_queue(Q) -> + test_util:stop_sync(Q, fun() -> + ok = couch_work_queue:close(Q) + end, ?TIMEOUT). + +spawn_consumer(Q) -> + Parent = self(), + spawn(fun() -> consumer_loop(Parent, Q, nil) end). + +consumer_loop(Parent, Q, PrevItem) -> + receive + {stop, Ref} -> + Parent ! {ok, Ref}; + {ping, Ref} -> + Parent ! {pong, Ref}, + consumer_loop(Parent, Q, PrevItem); + {last_item, Ref} -> + Parent ! {item, Ref, PrevItem}, + consumer_loop(Parent, Q, PrevItem); + {consume, N} -> + Result = couch_work_queue:dequeue(Q, N), + consumer_loop(Parent, Q, Result) + end. + +spawn_producer(Q) -> + Parent = self(), + spawn(fun() -> producer_loop(Parent, Q) end). + +producer_loop(Parent, Q) -> + receive + {stop, Ref} -> + Parent ! {ok, Ref}; + {ping, Ref} -> + Parent ! {pong, Ref}, + producer_loop(Parent, Q); + {produce, Ref, Size} -> + Item = crypto:strong_rand_bytes(Size), + Parent ! {item, Ref, Item}, + ok = couch_work_queue:queue(Q, Item), + producer_loop(Parent, Q) + end. + +consume(Consumer, N) -> + Consumer ! {consume, N}. + +last_consumer_items(Consumer) -> + Ref = make_ref(), + Consumer ! {last_item, Ref}, + receive + {item, Ref, Items} -> + Items + after ?TIMEOUT -> + timeout + end. + +produce(Q, Producer, Size, Wait) -> + Ref = make_ref(), + ItemsCount = couch_work_queue:item_count(Q), + Producer ! {produce, Ref, Size}, + receive + {item, Ref, Item} when Wait -> + ok = wait_increment(Q, ItemsCount), + Item; + {item, Ref, Item} -> + Item + after ?TIMEOUT -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, "Timeout asking producer to produce an item"}]}) + end. + +ping(Pid) -> + Ref = make_ref(), + Pid ! {ping, Ref}, + receive + {pong, Ref} -> + ok + after ?TIMEOUT -> + timeout + end. + +stop(Pid, Name) -> + Ref = make_ref(), + Pid ! {stop, Ref}, + receive + {ok, Ref} -> ok + after ?TIMEOUT -> + ?debugMsg("Timeout stopping " ++ Name), + timeout + end. + +wait_increment(Q, ItemsCount) -> + test_util:wait(fun() -> + case couch_work_queue:item_count(Q) > ItemsCount of + true -> + ok; + false -> + wait + end + end). diff --git a/src/couch/test/eunit/couchdb_attachments_tests.erl b/src/couch/test/eunit/couchdb_attachments_tests.erl new file mode 100644 index 000000000..04859dbc9 --- /dev/null +++ b/src/couch/test/eunit/couchdb_attachments_tests.erl @@ -0,0 +1,765 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couchdb_attachments_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("mem3/include/mem3.hrl"). + +-define(COMPRESSION_LEVEL, 8). +-define(ATT_BIN_NAME, <<"logo.png">>). +-define(ATT_TXT_NAME, <<"file.erl">>). +-define(FIXTURE_PNG, filename:join([?FIXTURESDIR, "logo.png"])). +-define(FIXTURE_TXT, ?ABS_PATH(?FILE)). +-define(TIMEOUT, 5000). +-define(TIMEOUT_EUNIT, 100). +-define(TIMEWAIT, 1000). +-define(i2l(I), integer_to_list(I)). + + +start() -> + Ctx = test_util:start_couch(), + % ensure in default compression settings for attachments_compression_tests + config:set("attachments", "compression_level", + ?i2l(?COMPRESSION_LEVEL), false), + config:set("attachments", "compressible_types", "text/*", false), + Ctx. + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, []), + ok = couch_db:close(Db), + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(couch_httpd, port), + Host = Addr ++ ":" ++ ?i2l(Port), + {Host, ?b2l(DbName)}. + +setup({binary, standalone}) -> + {Host, DbName} = setup(), + setup_att(fun create_standalone_png_att/2, Host, DbName, ?FIXTURE_PNG); +setup({text, standalone}) -> + {Host, DbName} = setup(), + setup_att(fun create_standalone_text_att/2, Host, DbName, ?FIXTURE_TXT); +setup({binary, inline}) -> + {Host, DbName} = setup(), + setup_att(fun create_inline_png_att/2, Host, DbName, ?FIXTURE_PNG); +setup({text, inline}) -> + {Host, DbName} = setup(), + setup_att(fun create_inline_text_att/2, Host, DbName, ?FIXTURE_TXT); +setup(compressed) -> + {Host, DbName} = setup(), + setup_att(fun create_already_compressed_att/2, Host, DbName, ?FIXTURE_TXT). +setup_att(Fun, Host, DbName, File) -> + HttpHost = "http://" ++ Host, + AttUrl = Fun(HttpHost, DbName), + {ok, Data} = file:read_file(File), + DocUrl = string:join([HttpHost, DbName, "doc"], "/"), + Helpers = {DbName, DocUrl, AttUrl}, + {Data, Helpers}. + +teardown(_, {_, {DbName, _, _}}) -> + teardown(DbName). + +teardown({_, DbName}) -> + teardown(DbName); +teardown(DbName) -> + ok = couch_server:delete(?l2b(DbName), []), + ok. + + +attachments_test_() -> + { + "Attachments tests", + { + setup, + fun start/0, fun test_util:stop_couch/1, + [ + attachments_md5_tests(), + attachments_compression_tests() + ] + } + }. + +attachments_md5_tests() -> + { + "Attachments MD5 tests", + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_upload_attachment_without_md5/1, + fun should_upload_attachment_by_chunks_without_md5/1, + fun should_upload_attachment_with_valid_md5_header/1, + fun should_upload_attachment_by_chunks_with_valid_md5_header/1, + fun should_upload_attachment_by_chunks_with_valid_md5_trailer/1, + fun should_reject_attachment_with_invalid_md5/1, + fun should_reject_chunked_attachment_with_invalid_md5/1, + fun should_reject_chunked_attachment_with_invalid_md5_trailer/1 + ] + } + }. + +attachments_compression_tests() -> + Funs = [ + fun should_get_att_without_accept_gzip_encoding/2, + fun should_get_att_with_accept_gzip_encoding/2, + fun should_get_att_with_accept_deflate_encoding/2, + fun should_return_406_response_on_unsupported_encoding/2, + fun should_get_doc_with_att_data/2, + fun should_get_doc_with_att_data_stub/2 + ], + { + "Attachments compression tests", + [ + { + "Created via Attachments API", + created_attachments_compression_tests(standalone, Funs) + }, + { + "Created inline via Document API", + created_attachments_compression_tests(inline, Funs) + }, + { + "Created already been compressed via Attachments API", + { + foreachx, + fun setup/1, fun teardown/2, + [{compressed, Fun} || Fun <- Funs] + } + }, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_not_create_compressed_att_with_deflate_encoding/1, + fun should_not_create_compressed_att_with_compress_encoding/1, + fun should_create_compressible_att_with_ctype_params/1 + ] + } + ] + }. + +created_attachments_compression_tests(Mod, Funs) -> + [ + { + "Compressiable attachments", + { + foreachx, + fun setup/1, fun teardown/2, + [{{text, Mod}, Fun} || Fun <- Funs] + } + }, + { + "Uncompressiable attachments", + { + foreachx, + fun setup/1, fun teardown/2, + [{{binary, Mod}, Fun} || Fun <- Funs] + } + } + ]. + + + +should_upload_attachment_without_md5({Host, DbName}) -> + ?_test(begin + AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), + Body = "We all live in a yellow submarine!", + Headers = [ + {"Content-Length", "34"}, + {"Content-Type", "text/plain"}, + {"Host", Host} + ], + {ok, Code, Json} = request("PUT", AttUrl, Headers, Body), + ?assertEqual(201, Code), + ?assertEqual(true, get_json(Json, [<<"ok">>])) + end). + +should_upload_attachment_by_chunks_without_md5({Host, DbName}) -> + ?_test(begin + AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), + AttData = <<"We all live in a yellow submarine!">>, + <> = AttData, + Body = [chunked_body([Part1, Part2]), "\r\n"], + Headers = [ + {"Content-Type", "text/plain"}, + {"Transfer-Encoding", "chunked"}, + {"Host", Host} + ], + {ok, Code, Json} = request("PUT", AttUrl, Headers, Body), + ?assertEqual(201, Code), + ?assertEqual(true, get_json(Json, [<<"ok">>])) + end). + +should_upload_attachment_with_valid_md5_header({Host, DbName}) -> + ?_test(begin + AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), + Body = "We all live in a yellow submarine!", + Headers = [ + {"Content-Length", "34"}, + {"Content-Type", "text/plain"}, + {"Content-MD5", ?b2l(base64:encode(couch_hash:md5_hash(Body)))}, + {"Host", Host} + ], + {ok, Code, Json} = request("PUT", AttUrl, Headers, Body), + ?assertEqual(201, Code), + ?assertEqual(true, get_json(Json, [<<"ok">>])) + end). + +should_upload_attachment_by_chunks_with_valid_md5_header({Host, DbName}) -> + ?_test(begin + AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), + AttData = <<"We all live in a yellow submarine!">>, + <> = AttData, + Body = [chunked_body([Part1, Part2]), "\r\n"], + Headers = [ + {"Content-Type", "text/plain"}, + {"Content-MD5", ?b2l(base64:encode(couch_hash:md5_hash(AttData)))}, + {"Host", Host}, + {"Transfer-Encoding", "chunked"} + ], + {ok, Code, Json} = request("PUT", AttUrl, Headers, Body), + ?assertEqual(201, Code), + ?assertEqual(true, get_json(Json, [<<"ok">>])) + end). + +should_upload_attachment_by_chunks_with_valid_md5_trailer({Host, DbName}) -> + ?_test(begin + AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), + AttData = <<"We all live in a yellow submarine!">>, + <> = AttData, + Body = [chunked_body([Part1, Part2]), + "Content-MD5: ", base64:encode(couch_hash:md5_hash(AttData)), + "\r\n\r\n"], + Headers = [ + {"Content-Type", "text/plain"}, + {"Host", Host}, + {"Trailer", "Content-MD5"}, + {"Transfer-Encoding", "chunked"} + ], + {ok, Code, Json} = request("PUT", AttUrl, Headers, Body), + ?assertEqual(201, Code), + ?assertEqual(true, get_json(Json, [<<"ok">>])) + end). + +should_reject_attachment_with_invalid_md5({Host, DbName}) -> + ?_test(begin + AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), + Body = "We all live in a yellow submarine!", + Headers = [ + {"Content-Length", "34"}, + {"Content-Type", "text/plain"}, + {"Content-MD5", ?b2l(base64:encode(<<"foobar!">>))}, + {"Host", Host} + ], + {ok, Code, Json} = request("PUT", AttUrl, Headers, Body), + ?assertEqual(400, Code), + ?assertEqual(<<"content_md5_mismatch">>, + get_json(Json, [<<"error">>])) + end). + + +should_reject_chunked_attachment_with_invalid_md5({Host, DbName}) -> + ?_test(begin + AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), + AttData = <<"We all live in a yellow submarine!">>, + <> = AttData, + Body = [chunked_body([Part1, Part2]), "\r\n"], + Headers = [ + {"Content-Type", "text/plain"}, + {"Content-MD5", ?b2l(base64:encode(<<"foobar!">>))}, + {"Host", Host}, + {"Transfer-Encoding", "chunked"} + ], + {ok, Code, Json} = request("PUT", AttUrl, Headers, Body), + ?assertEqual(400, Code), + ?assertEqual(<<"content_md5_mismatch">>, + get_json(Json, [<<"error">>])) + end). + +should_reject_chunked_attachment_with_invalid_md5_trailer({Host, DbName}) -> + ?_test(begin + AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"), + AttData = <<"We all live in a yellow submarine!">>, + <> = AttData, + Body = [chunked_body([Part1, Part2]), + "Content-MD5: ", base64:encode(<<"foobar!">>), + "\r\n\r\n"], + Headers = [ + {"Content-Type", "text/plain"}, + {"Host", Host}, + {"Trailer", "Content-MD5"}, + {"Transfer-Encoding", "chunked"} + ], + {ok, Code, Json} = request("PUT", AttUrl, Headers, Body), + ?assertEqual(400, Code), + ?assertEqual(<<"content_md5_mismatch">>, get_json(Json, [<<"error">>])) + end). + +should_get_att_without_accept_gzip_encoding(_, {Data, {_, _, AttUrl}}) -> + ?_test(begin + {ok, Code, Headers, Body} = test_request:get(AttUrl), + ?assertEqual(200, Code), + ?assertNot(lists:member({"Content-Encoding", "gzip"}, Headers)), + ?assertEqual(Data, iolist_to_binary(Body)) + end). + +should_get_att_with_accept_gzip_encoding(compressed, {Data, {_, _, AttUrl}}) -> + ?_test(begin + {ok, Code, Headers, Body} = test_request:get( + AttUrl, [{"Accept-Encoding", "gzip"}]), + ?assertEqual(200, Code), + ?assert(lists:member({"Content-Encoding", "gzip"}, Headers)), + ?assertEqual(Data, zlib:gunzip(iolist_to_binary(Body))) + end); +should_get_att_with_accept_gzip_encoding({text, _}, {Data, {_, _, AttUrl}}) -> + ?_test(begin + {ok, Code, Headers, Body} = test_request:get( + AttUrl, [{"Accept-Encoding", "gzip"}]), + ?assertEqual(200, Code), + ?assert(lists:member({"Content-Encoding", "gzip"}, Headers)), + ?assertEqual(Data, zlib:gunzip(iolist_to_binary(Body))) + end); +should_get_att_with_accept_gzip_encoding({binary, _}, {Data, {_, _, AttUrl}}) -> + ?_test(begin + {ok, Code, Headers, Body} = test_request:get( + AttUrl, [{"Accept-Encoding", "gzip"}]), + ?assertEqual(200, Code), + ?assertEqual(undefined, + couch_util:get_value("Content-Encoding", Headers)), + ?assertEqual(Data, iolist_to_binary(Body)) + end). + +should_get_att_with_accept_deflate_encoding(_, {Data, {_, _, AttUrl}}) -> + ?_test(begin + {ok, Code, Headers, Body} = test_request:get( + AttUrl, [{"Accept-Encoding", "deflate"}]), + ?assertEqual(200, Code), + ?assertEqual(undefined, + couch_util:get_value("Content-Encoding", Headers)), + ?assertEqual(Data, iolist_to_binary(Body)) + end). + +should_return_406_response_on_unsupported_encoding(_, {_, {_, _, AttUrl}}) -> + ?_assertEqual(406, + begin + {ok, Code, _, _} = test_request:get( + AttUrl, [{"Accept-Encoding", "deflate, *;q=0"}]), + Code + end). + +should_get_doc_with_att_data(compressed, {Data, {_, DocUrl, _}}) -> + ?_test(begin + Url = DocUrl ++ "?attachments=true", + {ok, Code, _, Body} = test_request:get( + Url, [{"Accept", "application/json"}]), + ?assertEqual(200, Code), + Json = jiffy:decode(Body), + AttJson = couch_util:get_nested_json_value( + Json, [<<"_attachments">>, ?ATT_TXT_NAME]), + AttData = couch_util:get_nested_json_value( + AttJson, [<<"data">>]), + ?assertEqual( + <<"text/plain">>, + couch_util:get_nested_json_value(AttJson,[<<"content_type">>])), + ?assertEqual(Data, base64:decode(AttData)) + end); +should_get_doc_with_att_data({text, _}, {Data, {_, DocUrl, _}}) -> + ?_test(begin + Url = DocUrl ++ "?attachments=true", + {ok, Code, _, Body} = test_request:get( + Url, [{"Accept", "application/json"}]), + ?assertEqual(200, Code), + Json = jiffy:decode(Body), + AttJson = couch_util:get_nested_json_value( + Json, [<<"_attachments">>, ?ATT_TXT_NAME]), + AttData = couch_util:get_nested_json_value( + AttJson, [<<"data">>]), + ?assertEqual( + <<"text/plain">>, + couch_util:get_nested_json_value(AttJson,[<<"content_type">>])), + ?assertEqual(Data, base64:decode(AttData)) + end); +should_get_doc_with_att_data({binary, _}, {Data, {_, DocUrl, _}}) -> + ?_test(begin + Url = DocUrl ++ "?attachments=true", + {ok, Code, _, Body} = test_request:get( + Url, [{"Accept", "application/json"}]), + ?assertEqual(200, Code), + Json = jiffy:decode(Body), + AttJson = couch_util:get_nested_json_value( + Json, [<<"_attachments">>, ?ATT_BIN_NAME]), + AttData = couch_util:get_nested_json_value( + AttJson, [<<"data">>]), + ?assertEqual( + <<"image/png">>, + couch_util:get_nested_json_value(AttJson,[<<"content_type">>])), + ?assertEqual(Data, base64:decode(AttData)) + end). + +should_get_doc_with_att_data_stub(compressed, {Data, {_, DocUrl, _}}) -> + ?_test(begin + Url = DocUrl ++ "?att_encoding_info=true", + {ok, Code, _, Body} = test_request:get( + Url, [{"Accept", "application/json"}]), + ?assertEqual(200, Code), + Json = jiffy:decode(Body), + {AttJson} = couch_util:get_nested_json_value( + Json, [<<"_attachments">>, ?ATT_TXT_NAME]), + ?assertEqual(<<"gzip">>, + couch_util:get_value(<<"encoding">>, AttJson)), + AttLength = couch_util:get_value(<<"length">>, AttJson), + EncLength = couch_util:get_value(<<"encoded_length">>, AttJson), + ?assertEqual(AttLength, EncLength), + ?assertEqual(iolist_size(zlib:gzip(Data)), AttLength) + end); +should_get_doc_with_att_data_stub({text, _}, {Data, {_, DocUrl, _}}) -> + ?_test(begin + Url = DocUrl ++ "?att_encoding_info=true", + {ok, Code, _, Body} = test_request:get( + Url, [{"Accept", "application/json"}]), + ?assertEqual(200, Code), + Json = jiffy:decode(Body), + {AttJson} = couch_util:get_nested_json_value( + Json, [<<"_attachments">>, ?ATT_TXT_NAME]), + ?assertEqual(<<"gzip">>, + couch_util:get_value(<<"encoding">>, AttJson)), + AttEncLength = iolist_size(gzip(Data)), + ?assertEqual(AttEncLength, + couch_util:get_value(<<"encoded_length">>, AttJson)), + ?assertEqual(byte_size(Data), + couch_util:get_value(<<"length">>, AttJson)) + end); +should_get_doc_with_att_data_stub({binary, _}, {Data, {_, DocUrl, _}}) -> + ?_test(begin + Url = DocUrl ++ "?att_encoding_info=true", + {ok, Code, _, Body} = test_request:get( + Url, [{"Accept", "application/json"}]), + ?assertEqual(200, Code), + Json = jiffy:decode(Body), + {AttJson} = couch_util:get_nested_json_value( + Json, [<<"_attachments">>, ?ATT_BIN_NAME]), + ?assertEqual(undefined, + couch_util:get_value(<<"encoding">>, AttJson)), + ?assertEqual(undefined, + couch_util:get_value(<<"encoded_length">>, AttJson)), + ?assertEqual(byte_size(Data), + couch_util:get_value(<<"length">>, AttJson)) + end). + +should_not_create_compressed_att_with_deflate_encoding({Host, DbName}) -> + ?_assertEqual(415, + begin + HttpHost = "http://" ++ Host, + AttUrl = string:join([HttpHost, DbName, ?docid(), "file.txt"], "/"), + {ok, Data} = file:read_file(?FIXTURE_TXT), + Body = zlib:compress(Data), + Headers = [ + {"Content-Encoding", "deflate"}, + {"Content-Type", "text/plain"} + ], + {ok, Code, _, _} = test_request:put(AttUrl, Headers, Body), + Code + end). + +should_not_create_compressed_att_with_compress_encoding({Host, DbName}) -> + % Note: As of OTP R13B04, it seems there's no LZW compression + % (i.e. UNIX compress utility implementation) lib in OTP. + % However there's a simple working Erlang implementation at: + % http://scienceblogs.com/goodmath/2008/01/simple_lempelziv_compression_i.php + ?_assertEqual(415, + begin + HttpHost = "http://" ++ Host, + AttUrl = string:join([HttpHost, DbName, ?docid(), "file.txt"], "/"), + {ok, Data} = file:read_file(?FIXTURE_TXT), + Headers = [ + {"Content-Encoding", "compress"}, + {"Content-Type", "text/plain"} + ], + {ok, Code, _, _} = test_request:put(AttUrl, Headers, Data), + Code + end). + +should_create_compressible_att_with_ctype_params({Host, DbName}) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(begin + HttpHost = "http://" ++ Host, + DocUrl = string:join([HttpHost, DbName, ?docid()], "/"), + AttUrl = string:join([DocUrl, ?b2l(?ATT_TXT_NAME)], "/"), + {ok, Data} = file:read_file(?FIXTURE_TXT), + Headers = [{"Content-Type", "text/plain; charset=UTF-8"}], + {ok, Code0, _, _} = test_request:put(AttUrl, Headers, Data), + ?assertEqual(201, Code0), + + {ok, Code1, _, Body} = test_request:get( + DocUrl ++ "?att_encoding_info=true"), + ?assertEqual(200, Code1), + Json = jiffy:decode(Body), + {AttJson} = couch_util:get_nested_json_value( + Json, [<<"_attachments">>, ?ATT_TXT_NAME]), + ?assertEqual(<<"gzip">>, + couch_util:get_value(<<"encoding">>, AttJson)), + AttEncLength = iolist_size(gzip(Data)), + ?assertEqual(AttEncLength, + couch_util:get_value(<<"encoded_length">>, AttJson)), + ?assertEqual(byte_size(Data), + couch_util:get_value(<<"length">>, AttJson)) + end)}. + + +compact_after_lowering_attachment_size_limit_test_() -> + { + "Compact after lowering attachment size limit", + { + foreach, + fun() -> + Ctx = test_util:start_couch(), + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + {Ctx, DbName} + end, + fun({Ctx, DbName}) -> + config:delete("couchdb", "max_attachment_size"), + ok = couch_server:delete(DbName, [?ADMIN_CTX]), + test_util:stop_couch(Ctx) + end, + [ + fun should_compact_after_lowering_attachment_size_limit/1 + ] + } + }. + + +should_compact_after_lowering_attachment_size_limit({_Ctx, DbName}) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(begin + {ok, Db1} = couch_db:open(DbName, [?ADMIN_CTX]), + Doc1 = #doc{id = <<"doc1">>, atts = att(1000)}, + {ok, _} = couch_db:update_doc(Db1, Doc1, []), + couch_db:close(Db1), + config:set("couchdb", "max_attachment_size", "1", _Persist = false), + compact_db(DbName), + {ok, Db2} = couch_db:open_int(DbName, []), + {ok, Doc2} = couch_db:open_doc(Db2, <<"doc1">>), + couch_db:close(Db2), + [Att] = Doc2#doc.atts, + ?assertEqual(1000, couch_att:fetch(att_len, Att)) + end)}. + + +att(Size) when is_integer(Size), Size >= 1 -> + [couch_att:new([ + {name, <<"att">>}, + {type, <<"app/binary">>}, + {att_len, Size}, + {data, fun(_Bytes) -> + << <<"x">> || _ <- lists:seq(1, Size) >> + end} + ])]. + + +compact_db(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + {ok, _CompactPid} = couch_db:start_compact(Db), + wait_compaction(DbName, "database", ?LINE), + ok = couch_db:close(Db). + + +wait_compaction(DbName, Kind, Line) -> + WaitFun = fun() -> + case is_compaction_running(DbName) of + true -> wait; + false -> ok + end + end, + case test_util:wait(WaitFun, ?TIMEOUT) of + timeout -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, Line}, + {reason, "Timeout waiting for " + ++ Kind + ++ " database compaction"}]}); + _ -> + ok + end. + + +is_compaction_running(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + {ok, DbInfo} = couch_db:get_db_info(Db), + couch_db:close(Db), + couch_util:get_value(compact_running, DbInfo) =:= true. + + +internal_replication_after_lowering_attachment_size_limit_test_() -> + { + "Internal replication after lowering max attachment size", + { + foreach, + fun() -> + Ctx = test_util:start_couch([mem3]), + SrcName = ?tempdb(), + {ok, SrcDb} = couch_db:create(SrcName, [?ADMIN_CTX]), + ok = couch_db:close(SrcDb), + TgtName = ?tempdb(), + {ok, TgtDb} = couch_db:create(TgtName, [?ADMIN_CTX]), + ok = couch_db:close(TgtDb), + {Ctx, SrcName, TgtName} + end, + fun({Ctx, SrcName, TgtName}) -> + config:delete("couchdb", "max_attachment_size"), + ok = couch_server:delete(SrcName, [?ADMIN_CTX]), + ok = couch_server:delete(TgtName, [?ADMIN_CTX]), + test_util:stop_couch(Ctx) + end, + [ + fun should_replicate_after_lowering_attachment_size/1 + ] + } + }. + +should_replicate_after_lowering_attachment_size({_Ctx, SrcName, TgtName}) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(begin + {ok, SrcDb} = couch_db:open(SrcName, [?ADMIN_CTX]), + SrcDoc = #doc{id = <<"doc">>, atts = att(1000)}, + {ok, _} = couch_db:update_doc(SrcDb, SrcDoc, []), + couch_db:close(SrcDb), + config:set("couchdb", "max_attachment_size", "1", _Persist = false), + % Create a pair of "fake" shards + SrcShard = #shard{name = SrcName, node = node()}, + TgtShard = #shard{name = TgtName, node = node()}, + mem3_rep:go(SrcShard, TgtShard, []), + {ok, TgtDb} = couch_db:open_int(TgtName, []), + {ok, TgtDoc} = couch_db:open_doc(TgtDb, <<"doc">>), + couch_db:close(TgtDb), + [Att] = TgtDoc#doc.atts, + ?assertEqual(1000, couch_att:fetch(att_len, Att)) + end)}. + + +get_json(Json, Path) -> + couch_util:get_nested_json_value(Json, Path). + +to_hex(Val) -> + to_hex(Val, []). + +to_hex(0, Acc) -> + Acc; +to_hex(Val, Acc) -> + to_hex(Val div 16, [hex_char(Val rem 16) | Acc]). + +hex_char(V) when V < 10 -> $0 + V; +hex_char(V) -> $A + V - 10. + +chunked_body(Chunks) -> + chunked_body(Chunks, []). + +chunked_body([], Acc) -> + iolist_to_binary(lists:reverse(Acc, "0\r\n")); +chunked_body([Chunk | Rest], Acc) -> + Size = to_hex(size(Chunk)), + chunked_body(Rest, ["\r\n", Chunk, "\r\n", Size | Acc]). + +get_socket() -> + Options = [binary, {packet, 0}, {active, false}], + Port = mochiweb_socket_server:get(couch_httpd, port), + {ok, Sock} = gen_tcp:connect(bind_address(), Port, Options), + Sock. + +bind_address() -> + case config:get("httpd", "bind_address") of + undefined -> any; + Address -> Address + end. + +request(Method, Url, Headers, Body) -> + RequestHead = [Method, " ", Url, " HTTP/1.1"], + RequestHeaders = [[string:join([Key, Value], ": "), "\r\n"] + || {Key, Value} <- Headers], + Request = [RequestHead, "\r\n", RequestHeaders, "\r\n", Body], + Sock = get_socket(), + gen_tcp:send(Sock, list_to_binary(lists:flatten(Request))), + timer:sleep(?TIMEWAIT), % must wait to receive complete response + {ok, R} = gen_tcp:recv(Sock, 0), + gen_tcp:close(Sock), + [Header, Body1] = re:split(R, "\r\n\r\n", [{return, binary}]), + {ok, {http_response, _, Code, _}, _} = + erlang:decode_packet(http, Header, []), + Json = jiffy:decode(Body1), + {ok, Code, Json}. + +create_standalone_text_att(Host, DbName) -> + {ok, Data} = file:read_file(?FIXTURE_TXT), + Url = string:join([Host, DbName, "doc", ?b2l(?ATT_TXT_NAME)], "/"), + {ok, Code, _Headers, _Body} = test_request:put( + Url, [{"Content-Type", "text/plain"}], Data), + ?assertEqual(201, Code), + Url. + +create_standalone_png_att(Host, DbName) -> + {ok, Data} = file:read_file(?FIXTURE_PNG), + Url = string:join([Host, DbName, "doc", ?b2l(?ATT_BIN_NAME)], "/"), + {ok, Code, _Headers, _Body} = test_request:put( + Url, [{"Content-Type", "image/png"}], Data), + ?assertEqual(201, Code), + Url. + +create_inline_text_att(Host, DbName) -> + {ok, Data} = file:read_file(?FIXTURE_TXT), + Url = string:join([Host, DbName, "doc"], "/"), + Doc = {[ + {<<"_attachments">>, {[ + {?ATT_TXT_NAME, {[ + {<<"content_type">>, <<"text/plain">>}, + {<<"data">>, base64:encode(Data)} + ]} + }]}} + ]}, + {ok, Code, _Headers, _Body} = test_request:put( + Url, [{"Content-Type", "application/json"}], jiffy:encode(Doc)), + ?assertEqual(201, Code), + string:join([Url, ?b2l(?ATT_TXT_NAME)], "/"). + +create_inline_png_att(Host, DbName) -> + {ok, Data} = file:read_file(?FIXTURE_PNG), + Url = string:join([Host, DbName, "doc"], "/"), + Doc = {[ + {<<"_attachments">>, {[ + {?ATT_BIN_NAME, {[ + {<<"content_type">>, <<"image/png">>}, + {<<"data">>, base64:encode(Data)} + ]} + }]}} + ]}, + {ok, Code, _Headers, _Body} = test_request:put( + Url, [{"Content-Type", "application/json"}], jiffy:encode(Doc)), + ?assertEqual(201, Code), + string:join([Url, ?b2l(?ATT_BIN_NAME)], "/"). + +create_already_compressed_att(Host, DbName) -> + {ok, Data} = file:read_file(?FIXTURE_TXT), + Url = string:join([Host, DbName, "doc", ?b2l(?ATT_TXT_NAME)], "/"), + {ok, Code, _Headers, _Body} = test_request:put( + Url, [{"Content-Type", "text/plain"}, {"Content-Encoding", "gzip"}], + zlib:gzip(Data)), + ?assertEqual(201, Code), + Url. + +gzip(Data) -> + Z = zlib:open(), + ok = zlib:deflateInit(Z, ?COMPRESSION_LEVEL, deflated, 16 + 15, 8, default), + Chunk = zlib:deflate(Z, Data), + Last = zlib:deflate(Z, [], finish), + ok = zlib:deflateEnd(Z), + ok = zlib:close(Z), + [Chunk, Last]. diff --git a/src/couch/test/eunit/couchdb_auth_tests.erl b/src/couch/test/eunit/couchdb_auth_tests.erl new file mode 100644 index 000000000..ed2c064de --- /dev/null +++ b/src/couch/test/eunit/couchdb_auth_tests.erl @@ -0,0 +1,96 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couchdb_auth_tests). + +-include_lib("couch/include/couch_eunit.hrl"). + + +setup(PortType) -> + Hashed = couch_passwords:hash_admin_password("artischocko"), + ok = config:set("admins", "rocko", binary_to_list(Hashed), _Persist=false), + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + lists:concat(["http://", Addr, ":", port(PortType), "/_session"]). + +teardown(_, _) -> + ok. + + +auth_test_() -> + Tests = [ + fun should_return_username_on_post_to_session/2, + fun should_not_return_authenticated_field/2, + fun should_return_list_of_handlers/2 + ], + { + "Auth tests", + { + setup, + fun() -> test_util:start_couch([chttpd]) end, fun test_util:stop_couch/1, + [ + make_test_cases(clustered, Tests), + make_test_cases(backdoor, Tests) + ] + } + }. + +make_test_cases(Mod, Funs) -> + { + lists:flatten(io_lib:format("~s", [Mod])), + {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]} + }. + +should_return_username_on_post_to_session(_PortType, Url) -> + ?_assertEqual(<<"rocko">>, + begin + Hashed = couch_passwords:hash_admin_password(<<"artischocko">>), + ok = config:set("admins", "rocko", binary_to_list(Hashed), false), + {ok, _, _, Body} = test_request:post(Url, [{"Content-Type", "application/json"}], + "{\"name\":\"rocko\", \"password\":\"artischocko\"}"), + {Json} = jiffy:decode(Body), + proplists:get_value(<<"name">>, Json) + end). + +should_not_return_authenticated_field(_PortType, Url) -> + ?_assertThrow({not_found, _}, + begin + couch_util:get_nested_json_value(session(Url), [ + <<"info">>, <<"authenticated">>]) + end). + +should_return_list_of_handlers(backdoor, Url) -> + ?_assertEqual([<<"cookie">>,<<"default">>], + begin + couch_util:get_nested_json_value(session(Url), [ + <<"info">>, <<"authentication_handlers">>]) + end); +should_return_list_of_handlers(clustered, Url) -> + ?_assertEqual([<<"cookie">>,<<"default">>], + begin + couch_util:get_nested_json_value(session(Url), [ + <<"info">>, <<"authentication_handlers">>]) + end). + + +%% ------------------------------------------------------------------ +%% Internal Function Definitions +%% ------------------------------------------------------------------ + +session(Url) -> + {ok, _, _, Body} = test_request:get(Url, [{"Content-Type", "application/json"}], + "{\"name\":\"rocko\", \"password\":\"artischocko\"}"), + jiffy:decode(Body). + +port(clustered) -> + integer_to_list(mochiweb_socket_server:get(chttpd, port)); +port(backdoor) -> + integer_to_list(mochiweb_socket_server:get(couch_httpd, port)). diff --git a/src/couch/test/eunit/couchdb_cookie_domain_tests.erl b/src/couch/test/eunit/couchdb_cookie_domain_tests.erl new file mode 100755 index 000000000..e66ab31e6 --- /dev/null +++ b/src/couch/test/eunit/couchdb_cookie_domain_tests.erl @@ -0,0 +1,69 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couchdb_cookie_domain_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(USER, "cookie_domain_test_admin"). +-define(PASS, "pass"). + +setup() -> + Ctx = test_util:start_couch([chttpd]), + Hashed = couch_passwords:hash_admin_password(?PASS), + ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(chttpd, port), + Url = ?l2b(io_lib:format("http://~s:~b/_session", [Addr, Port])), + ContentType = [{"Content-Type", "application/json"}], + Payload = jiffy:encode({[{name, ?l2b(?USER)}, {password, ?l2b(?PASS)}]}), + {ok, ?b2l(Url), ContentType, ?b2l(Payload), Ctx}. + +teardown({ok, _, _, _, Ctx}) -> + ok = config:delete("admins", ?USER, _Persist=false), + test_util:stop_couch(Ctx). + +cookie_test_() -> + { + "Cookie domain tests", + { + setup, + fun setup/0, + fun teardown/1, + fun({ok, Url, ContentType, Payload, _}) -> + [ + should_set_cookie_domain(Url, ContentType, Payload), + should_not_set_cookie_domain(Url, ContentType, Payload) + ] + end + } + }. + +should_set_cookie_domain(Url, ContentType, Payload) -> + ?_test(begin + ok = config:set("couch_httpd_auth", "cookie_domain", + "example.com", false), + {ok, Code, Headers, _} = test_request:post(Url, ContentType, Payload), + ?assertEqual(200, Code), + Cookie = proplists:get_value("Set-Cookie", Headers), + ?assert(string:str(Cookie, "; Domain=example.com") > 0) + end). + +should_not_set_cookie_domain(Url, ContentType, Payload) -> + ?_test(begin + ok = config:set("couch_httpd_auth", "cookie_domain", "", false), + {ok, Code, Headers, _} = test_request:post(Url, ContentType, Payload), + ?assertEqual(200, Code), + Cookie = proplists:get_value("Set-Cookie", Headers), + ?assertEqual(0, string:str(Cookie, "; Domain=")) + end). diff --git a/src/couch/test/eunit/couchdb_cors_tests.erl b/src/couch/test/eunit/couchdb_cors_tests.erl new file mode 100644 index 000000000..82630bba7 --- /dev/null +++ b/src/couch/test/eunit/couchdb_cors_tests.erl @@ -0,0 +1,344 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couchdb_cors_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-include_lib("chttpd/include/chttpd_cors.hrl"). + +-define(TIMEOUT, 1000). + +-define(_assertEqualLists(A, B), + ?_assertEqual(lists:usort(A), lists:usort(B))). + +-define(assertEqualLists(A, B), + ?assertEqual(lists:usort(A), lists:usort(B))). + +start() -> + Ctx = test_util:start_couch([ioq]), + ok = config:set("httpd", "enable_cors", "true", false), + ok = config:set("vhosts", "example.com", "/", false), + Ctx. + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + couch_db:close(Db), + + config:set("cors", "credentials", "false", false), + config:set("cors", "origins", "http://example.com", false), + + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)), + Host = "http://" ++ Addr ++ ":" ++ Port, + {Host, ?b2l(DbName)}. + +setup({Mod, VHost}) -> + {Host, DbName} = setup(), + Url = case Mod of + server -> + Host; + db -> + Host ++ "/" ++ DbName + end, + DefaultHeaders = [{"Origin", "http://example.com"}] + ++ maybe_append_vhost(VHost), + {Host, DbName, Url, DefaultHeaders}. + +teardown(DbName) when is_list(DbName) -> + ok = couch_server:delete(?l2b(DbName), [?ADMIN_CTX]), + ok; +teardown({_, DbName}) -> + teardown(DbName). + +teardown(_, {_, DbName, _, _}) -> + teardown(DbName). + + +cors_test_() -> + Funs = [ + fun should_not_allow_origin/2, + fun should_not_allow_origin_with_port_mismatch/2, + fun should_not_allow_origin_with_scheme_mismatch/2, + fun should_not_all_origin_due_case_mismatch/2, + fun should_make_simple_request/2, + fun should_make_preflight_request/2, + fun should_make_prefligh_request_with_port/2, + fun should_make_prefligh_request_with_scheme/2, + fun should_make_prefligh_request_with_wildcard_origin/2, + fun should_make_request_with_credentials/2, + fun should_make_origin_request_with_auth/2, + fun should_make_preflight_request_with_auth/2 + ], + { + "CORS (COUCHDB-431)", + { + setup, + fun start/0, fun test_util:stop_couch/1, + [ + cors_tests(Funs), + vhost_cors_tests(Funs), + headers_tests() + ] + } + }. + +headers_tests() -> + { + "Various headers tests", + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_not_return_cors_headers_for_invalid_origin/1, + fun should_not_return_cors_headers_for_invalid_origin_preflight/1, + fun should_make_request_against_attachment/1, + fun should_make_range_request_against_attachment/1, + fun should_make_request_with_if_none_match_header/1 + ] + } + }. + +cors_tests(Funs) -> + { + "CORS tests", + [ + make_test_case(server, false, Funs), + make_test_case(db, false, Funs) + ] + }. + +vhost_cors_tests(Funs) -> + { + "Virtual Host CORS", + [ + make_test_case(server, true, Funs), + make_test_case(db, true, Funs) + ] + }. + +make_test_case(Mod, UseVhost, Funs) -> + { + case Mod of server -> "Server"; db -> "Database" end, + {foreachx, fun setup/1, fun teardown/2, [{{Mod, UseVhost}, Fun} + || Fun <- Funs]} + }. + + +should_not_allow_origin(_, {_, _, Url, Headers0}) -> + ?_assertEqual(undefined, + begin + config:delete("cors", "origins", false), + Headers1 = proplists:delete("Origin", Headers0), + Headers = [{"Origin", "http://127.0.0.1"}] + ++ Headers1, + {ok, _, Resp, _} = test_request:get(Url, Headers), + proplists:get_value("Access-Control-Allow-Origin", Resp) + end). + +should_not_allow_origin_with_port_mismatch({_, VHost}, {_, _, Url, _}) -> + ?_assertEqual(undefined, + begin + Headers = [{"Origin", "http://example.com:5984"}, + {"Access-Control-Request-Method", "GET"}] + ++ maybe_append_vhost(VHost), + {ok, _, Resp, _} = test_request:options(Url, Headers), + proplists:get_value("Access-Control-Allow-Origin", Resp) + end). + +should_not_allow_origin_with_scheme_mismatch({_, VHost}, {_, _, Url, _}) -> + ?_assertEqual(undefined, + begin + Headers = [{"Origin", "http://example.com:5984"}, + {"Access-Control-Request-Method", "GET"}] + ++ maybe_append_vhost(VHost), + {ok, _, Resp, _} = test_request:options(Url, Headers), + proplists:get_value("Access-Control-Allow-Origin", Resp) + end). + +should_not_all_origin_due_case_mismatch({_, VHost}, {_, _, Url, _}) -> + ?_assertEqual(undefined, + begin + Headers = [{"Origin", "http://ExAmPlE.CoM"}, + {"Access-Control-Request-Method", "GET"}] + ++ maybe_append_vhost(VHost), + {ok, _, Resp, _} = test_request:options(Url, Headers), + proplists:get_value("Access-Control-Allow-Origin", Resp) + end). + +should_make_simple_request(_, {_, _, Url, DefaultHeaders}) -> + ?_test(begin + {ok, _, Resp, _} = test_request:get(Url, DefaultHeaders), + ?assertEqual( + undefined, + proplists:get_value("Access-Control-Allow-Credentials", Resp)), + ?assertEqual( + "http://example.com", + proplists:get_value("Access-Control-Allow-Origin", Resp)), + ?assertEqualLists( + ?COUCH_HEADERS ++ list_simple_headers(Resp), + split_list(proplists:get_value("Access-Control-Expose-Headers", Resp))) + end). + +should_make_preflight_request(_, {_, _, Url, DefaultHeaders}) -> + ?_assertEqualLists(?SUPPORTED_METHODS, + begin + Headers = DefaultHeaders + ++ [{"Access-Control-Request-Method", "GET"}], + {ok, _, Resp, _} = test_request:options(Url, Headers), + split_list(proplists:get_value("Access-Control-Allow-Methods", Resp)) + end). + +should_make_prefligh_request_with_port({_, VHost}, {_, _, Url, _}) -> + ?_assertEqual("http://example.com:5984", + begin + config:set("cors", "origins", "http://example.com:5984", + false), + Headers = [{"Origin", "http://example.com:5984"}, + {"Access-Control-Request-Method", "GET"}] + ++ maybe_append_vhost(VHost), + {ok, _, Resp, _} = test_request:options(Url, Headers), + proplists:get_value("Access-Control-Allow-Origin", Resp) + end). + +should_make_prefligh_request_with_scheme({_, VHost}, {_, _, Url, _}) -> + ?_assertEqual("https://example.com:5984", + begin + config:set("cors", "origins", "https://example.com:5984", + false), + Headers = [{"Origin", "https://example.com:5984"}, + {"Access-Control-Request-Method", "GET"}] + ++ maybe_append_vhost(VHost), + {ok, _, Resp, _} = test_request:options(Url, Headers), + proplists:get_value("Access-Control-Allow-Origin", Resp) + end). + +should_make_prefligh_request_with_wildcard_origin({_, VHost}, {_, _, Url, _}) -> + ?_assertEqual("https://example.com:5984", + begin + config:set("cors", "origins", "*", false), + Headers = [{"Origin", "https://example.com:5984"}, + {"Access-Control-Request-Method", "GET"}] + ++ maybe_append_vhost(VHost), + {ok, _, Resp, _} = test_request:options(Url, Headers), + proplists:get_value("Access-Control-Allow-Origin", Resp) + end). + +should_make_request_with_credentials(_, {_, _, Url, DefaultHeaders}) -> + ?_assertEqual("true", + begin + ok = config:set("cors", "credentials", "true", false), + {ok, _, Resp, _} = test_request:options(Url, DefaultHeaders), + proplists:get_value("Access-Control-Allow-Credentials", Resp) + end). + +should_make_origin_request_with_auth(_, {_, _, Url, DefaultHeaders}) -> + ?_assertEqual("http://example.com", + begin + Hashed = couch_passwords:hash_admin_password(<<"test">>), + config:set("admins", "test", ?b2l(Hashed), false), + {ok, _, Resp, _} = test_request:get( + Url, DefaultHeaders, [{basic_auth, {"test", "test"}}]), + config:delete("admins", "test", false), + proplists:get_value("Access-Control-Allow-Origin", Resp) + end). + +should_make_preflight_request_with_auth(_, {_, _, Url, DefaultHeaders}) -> + ?_assertEqualLists(?SUPPORTED_METHODS, + begin + Hashed = couch_passwords:hash_admin_password(<<"test">>), + config:set("admins", "test", ?b2l(Hashed), false), + Headers = DefaultHeaders + ++ [{"Access-Control-Request-Method", "GET"}], + {ok, _, Resp, _} = test_request:options( + Url, Headers, [{basic_auth, {"test", "test"}}]), + config:delete("admins", "test", false), + split_list(proplists:get_value("Access-Control-Allow-Methods", Resp)) + end). + +should_not_return_cors_headers_for_invalid_origin({Host, _}) -> + ?_assertEqual(undefined, + begin + Headers = [{"Origin", "http://127.0.0.1"}], + {ok, _, Resp, _} = test_request:get(Host, Headers), + proplists:get_value("Access-Control-Allow-Origin", Resp) + end). + +should_not_return_cors_headers_for_invalid_origin_preflight({Host, _}) -> + ?_assertEqual(undefined, + begin + Headers = [{"Origin", "http://127.0.0.1"}, + {"Access-Control-Request-Method", "GET"}], + {ok, _, Resp, _} = test_request:options(Host, Headers), + proplists:get_value("Access-Control-Allow-Origin", Resp) + end). + +should_make_request_against_attachment({Host, DbName}) -> + {"COUCHDB-1689", + ?_assertEqual(200, + begin + Url = Host ++ "/" ++ DbName, + {ok, Code0, _, _} = test_request:put( + Url ++ "/doc/file.txt", [{"Content-Type", "text/plain"}], + "hello, couch!"), + ?assert(Code0 =:= 201), + {ok, Code, _, _} = test_request:get( + Url ++ "/doc?attachments=true", + [{"Origin", "http://example.com"}]), + Code + end)}. + +should_make_range_request_against_attachment({Host, DbName}) -> + {"COUCHDB-1689", + ?_assertEqual(206, + begin + Url = Host ++ "/" ++ DbName, + {ok, Code0, _, _} = test_request:put( + Url ++ "/doc/file.txt", + [{"Content-Type", "application/octet-stream"}], + "hello, couch!"), + ?assert(Code0 =:= 201), + {ok, Code, _, _} = test_request:get( + Url ++ "/doc/file.txt", [{"Origin", "http://example.com"}, + {"Range", "bytes=0-6"}]), + Code + end)}. + +should_make_request_with_if_none_match_header({Host, DbName}) -> + {"COUCHDB-1697", + ?_assertEqual(304, + begin + Url = Host ++ "/" ++ DbName, + {ok, Code0, Headers0, _} = test_request:put( + Url ++ "/doc", [{"Content-Type", "application/json"}], "{}"), + ?assert(Code0 =:= 201), + ETag = proplists:get_value("ETag", Headers0), + {ok, Code, _, _} = test_request:get( + Url ++ "/doc", [{"Origin", "http://example.com"}, + {"If-None-Match", ETag}]), + Code + end)}. + + +maybe_append_vhost(true) -> + [{"Host", "http://example.com"}]; +maybe_append_vhost(false) -> + []. + +split_list(S) -> + re:split(S, "\\s*,\\s*", [trim, {return, list}]). + +list_simple_headers(Headers) -> + LCHeaders = [string:to_lower(K) || {K, _V} <- Headers], + lists:filter(fun(H) -> lists:member(H, ?SIMPLE_HEADERS) end, LCHeaders). diff --git a/src/couch/test/eunit/couchdb_db_tests.erl b/src/couch/test/eunit/couchdb_db_tests.erl new file mode 100644 index 000000000..734bafb9f --- /dev/null +++ b/src/couch/test/eunit/couchdb_db_tests.erl @@ -0,0 +1,91 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couchdb_db_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("mem3/include/mem3.hrl"). + +setup() -> + DbName = ?b2l(?tempdb()), + fabric:create_db(DbName), + DbName. + + +teardown(DbName) -> + (catch fabric:delete_db(DbName)), + ok. + + +clustered_db_test_() -> + { + "Checking clustered db API", + { + setup, + fun() -> test_util:start_couch([ddoc_cache, mem3]) end, + fun test_util:stop/1, + [ + { + "DB deletion", + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_close_deleted_db/1, + fun should_kill_caller_from_load_validation_funs_for_deleted_db/1 + ] + } + } + ] + } + }. + + +should_close_deleted_db(DbName) -> + ?_test(begin + [#shard{name = ShardName} | _] = mem3:shards(DbName), + {ok, Db} = couch_db:open(ShardName, []), + + MonitorRef = couch_db:monitor(Db), + fabric:delete_db(DbName), + receive + {'DOWN', MonitorRef, _Type, _Pid, _Info} -> + ok + after 2000 -> + throw(timeout_error) + end, + test_util:wait(fun() -> + case ets:lookup(couch_dbs, DbName) of + [] -> ok; + _ -> wait + end + end), + ?assertEqual([], ets:lookup(couch_dbs, DbName)) + end). + + +should_kill_caller_from_load_validation_funs_for_deleted_db(DbName) -> + ?_test(begin + [#shard{name = ShardName} | _] = mem3:shards(DbName), + {ok, Db} = couch_db:open(ShardName, []), + + MonitorRef = couch_db:monitor(Db), + fabric:delete_db(DbName), + receive + {'DOWN', MonitorRef, _Type, _Pid, _Info} -> + ok + after 2000 -> + throw(timeout_error) + end, + ?assertError(database_does_not_exist, couch_db:load_validation_funs(Db)) + end). diff --git a/src/couch/test/eunit/couchdb_design_doc_tests.erl b/src/couch/test/eunit/couchdb_design_doc_tests.erl new file mode 100644 index 000000000..eef12e039 --- /dev/null +++ b/src/couch/test/eunit/couchdb_design_doc_tests.erl @@ -0,0 +1,88 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couchdb_design_doc_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + create_design_doc(DbName, <<"_design/foo">>), + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)), + BaseUrl = "http://" ++ Addr ++ ":" ++ Port, + {?b2l(DbName), BaseUrl}. + + +teardown({DbName, _}) -> + couch_server:delete(?l2b(DbName), [?ADMIN_CTX]), + ok. + + +design_list_test_() -> + { + "Check _list functionality", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_return_empty_when_plain_return/1, + fun should_return_empty_when_no_docs/1 + ] + } + } + }. + +should_return_empty_when_plain_return({DbName, BaseUrl}) -> + ?_test(begin + ?assertEqual(<<>>, + query_text(BaseUrl, DbName, "foo", "_list/plain_return/simple_view")) + end). + +should_return_empty_when_no_docs({DbName, BaseUrl}) -> + ?_test(begin + ?assertEqual(<<>>, + query_text(BaseUrl, DbName, "foo", "_list/simple_render/simple_view")) + end). + +create_design_doc(DbName, DDName) -> + {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), + DDoc = couch_doc:from_json_obj({[ + {<<"_id">>, DDName}, + {<<"language">>, <<"javascript">>}, + {<<"views">>, {[ + {<<"simple_view">>, {[ + {<<"map">>, <<"function(doc) {emit(doc._id, doc)}">> }, + {<<"reduce">>, <<"function (key, values, rereduce) {return sum(values);}">> } + ]}} + ]}}, + {<<"lists">>, {[ + {<<"plain_return">>, <<"function(head, req) {return;}">>}, + {<<"simple_render">>, <<"function(head, req) {var row; while(row=getRow()) {send(JSON.stringify(row)); }}">>} + ]}} + ]}), + {ok, Rev} = couch_db:update_doc(Db, DDoc, []), + couch_db:ensure_full_commit(Db), + couch_db:close(Db), + Rev. + +query_text(BaseUrl, DbName, DDoc, Path) -> + {ok, Code, _Headers, Body} = test_request:get( + BaseUrl ++ "/" ++ DbName ++ "/_design/" ++ DDoc ++ "/" ++ Path), + ?assertEqual(200, Code), + Body. diff --git a/src/couch/test/eunit/couchdb_file_compression_tests.erl b/src/couch/test/eunit/couchdb_file_compression_tests.erl new file mode 100644 index 000000000..8f0fe5bf1 --- /dev/null +++ b/src/couch/test/eunit/couchdb_file_compression_tests.erl @@ -0,0 +1,260 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couchdb_file_compression_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(DDOC_ID, <<"_design/test">>). +-define(DOCS_COUNT, 5000). +-define(TIMEOUT, 60000). + +setup() -> + config:set("couchdb", "file_compression", "none", false), + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = populate_db(Db, ?DOCS_COUNT), + DDoc = couch_doc:from_json_obj({[ + {<<"_id">>, ?DDOC_ID}, + {<<"language">>, <<"javascript">>}, + {<<"views">>, {[ + {<<"by_id">>, {[ + {<<"map">>, <<"function(doc){emit(doc._id, doc.string);}">>} + ]}} + ]} + } + ]}), + {ok, _} = couch_db:update_doc(Db, DDoc, []), + refresh_index(DbName), + ok = couch_db:close(Db), + DbName. + +teardown(DbName) -> + ok = couch_server:delete(DbName, [?ADMIN_CTX]), + ok. + + +couch_file_compression_test_() -> + { + "CouchDB file compression tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_use_none/1, + fun should_use_deflate_1/1, + fun should_use_deflate_9/1, + fun should_use_snappy/1, + fun should_compare_compression_methods/1 + ] + } + } + }. + + +should_use_none(DbName) -> + config:set("couchdb", "file_compression", "none", false), + { + "Use no compression", + [ + {"compact database", + {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_db(DbName))}}, + {"compact view", + {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_view(DbName))}} + ] + }. + +should_use_deflate_1(DbName) -> + config:set("couchdb", "file_compression", "deflate_1", false), + { + "Use deflate compression at level 1", + [ + {"compact database", + {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_db(DbName))}}, + {"compact view", + {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_view(DbName))}} + ] + }. + +should_use_deflate_9(DbName) -> + config:set("couchdb", "file_compression", "deflate_9", false), + { + "Use deflate compression at level 9", + [ + {"compact database", + {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_db(DbName))}}, + {"compact view", + {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_view(DbName))}} + ] + }. + +should_use_snappy(DbName) -> + config:set("couchdb", "file_compression", "snappy", false), + { + "Use snappy compression", + [ + {"compact database", + {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_db(DbName))}}, + {"compact view", + {timeout, 5 + ?TIMEOUT div 1000, ?_test(compact_view(DbName))}} + ] + }. + +should_compare_compression_methods(DbName) -> + {"none > snappy > deflate_1 > deflate_9", + {timeout, ?TIMEOUT div 1000, ?_test(compare_compression_methods(DbName))}}. + +compare_compression_methods(DbName) -> + config:set("couchdb", "file_compression", "none", false), + ExternalSizePreCompact = db_external_size(DbName), + compact_db(DbName), + compact_view(DbName), + DbSizeNone = db_disk_size(DbName), + ViewSizeNone = view_disk_size(DbName), + ExternalSizeNone = db_external_size(DbName), + ViewExternalSizeNone = view_external_size(DbName), + + config:set("couchdb", "file_compression", "snappy", false), + compact_db(DbName), + compact_view(DbName), + DbSizeSnappy = db_disk_size(DbName), + ViewSizeSnappy = view_disk_size(DbName), + ExternalSizeSnappy = db_external_size(DbName), + ViewExternalSizeSnappy = view_external_size(DbName), + + ?assert(DbSizeNone > DbSizeSnappy), + ?assert(ViewSizeNone > ViewSizeSnappy), + + config:set("couchdb", "file_compression", "deflate_1", false), + compact_db(DbName), + compact_view(DbName), + DbSizeDeflate1 = db_disk_size(DbName), + ViewSizeDeflate1 = view_disk_size(DbName), + + ?assert(DbSizeSnappy > DbSizeDeflate1), + ?assert(ViewSizeSnappy > ViewSizeDeflate1), + + config:set("couchdb", "file_compression", "deflate_9", false), + compact_db(DbName), + compact_view(DbName), + DbSizeDeflate9 = db_disk_size(DbName), + ViewSizeDeflate9 = view_disk_size(DbName), + ExternalSizeDeflate9 = db_external_size(DbName), + ViewExternalSizeDeflate9 = view_external_size(DbName), + + ?assert(DbSizeDeflate1 > DbSizeDeflate9), + ?assert(ViewSizeDeflate1 > ViewSizeDeflate9), + ?assert(ExternalSizePreCompact >= ExternalSizeNone), + ?assert(ExternalSizeNone =:= ExternalSizeSnappy), + ?assert(ExternalSizeNone =:= ExternalSizeDeflate9), + ?assert(ViewExternalSizeNone =:= ViewExternalSizeSnappy), + ?assert(ViewExternalSizeNone =:= ViewExternalSizeDeflate9). + + +populate_db(_Db, NumDocs) when NumDocs =< 0 -> + ok; +populate_db(Db, NumDocs) -> + Docs = lists:map( + fun(_) -> + couch_doc:from_json_obj({[ + {<<"_id">>, couch_uuids:random()}, + {<<"string">>, ?l2b(lists:duplicate(1000, $X))} + ]}) + end, + lists:seq(1, 500)), + {ok, _} = couch_db:update_docs(Db, Docs, []), + populate_db(Db, NumDocs - 500). + +refresh_index(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]), + couch_mrview:query_view(Db, DDoc, <<"by_id">>, [{update, true}]), + ok = couch_db:close(Db). + +compact_db(DbName) -> + DiskSizeBefore = db_disk_size(DbName), + {ok, Db} = couch_db:open_int(DbName, []), + {ok, _CompactPid} = couch_db:start_compact(Db), + wait_compaction(DbName, "database", ?LINE), + ok = couch_db:close(Db), + DiskSizeAfter = db_disk_size(DbName), + ?assert(DiskSizeBefore > DiskSizeAfter). + +compact_view(DbName) -> + DiskSizeBefore = view_disk_size(DbName), + {ok, _MonRef} = couch_mrview:compact(DbName, ?DDOC_ID, [monitor]), + wait_compaction(DbName, "view group", ?LINE), + DiskSizeAfter = view_disk_size(DbName), + ?assert(DiskSizeBefore > DiskSizeAfter). + +db_disk_size(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + {ok, Info} = couch_db:get_db_info(Db), + ok = couch_db:close(Db), + active_size(Info). + +db_external_size(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + {ok, Info} = couch_db:get_db_info(Db), + ok = couch_db:close(Db), + external_size(Info). + +view_disk_size(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]), + {ok, Info} = couch_mrview:get_info(Db, DDoc), + ok = couch_db:close(Db), + active_size(Info). + +view_external_size(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + {ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]), + {ok, Info} = couch_mrview:get_info(Db, DDoc), + ok = couch_db:close(Db), + external_size(Info). + +active_size(Info) -> + couch_util:get_nested_json_value({Info}, [sizes, active]). + +external_size(Info) -> + couch_util:get_nested_json_value({Info}, [sizes, external]). + +wait_compaction(DbName, Kind, Line) -> + WaitFun = fun() -> + case is_compaction_running(DbName) of + true -> wait; + false -> ok + end + end, + case test_util:wait(WaitFun, ?TIMEOUT) of + timeout -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, Line}, + {reason, "Timeout waiting for " + ++ Kind + ++ " database compaction"}]}); + _ -> + ok + end. + +is_compaction_running(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + {ok, DbInfo} = couch_db:get_db_info(Db), + {ok, ViewInfo} = couch_mrview:get_info(Db, ?DDOC_ID), + couch_db:close(Db), + (couch_util:get_value(compact_running, ViewInfo) =:= true) + orelse (couch_util:get_value(compact_running, DbInfo) =:= true). diff --git a/src/couch/test/eunit/couchdb_location_header_tests.erl b/src/couch/test/eunit/couchdb_location_header_tests.erl new file mode 100644 index 000000000..c6c039eb0 --- /dev/null +++ b/src/couch/test/eunit/couchdb_location_header_tests.erl @@ -0,0 +1,78 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couchdb_location_header_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 1000). + + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + couch_db:close(Db), + + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)), + Host = "http://" ++ Addr ++ ":" ++ Port, + {Host, ?b2l(DbName)}. + +teardown({_, DbName}) -> + ok = couch_server:delete(?l2b(DbName), [?ADMIN_CTX]), + ok. + + +header_test_() -> + { + "CouchDB Location Header Tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_work_with_newlines_in_docs/1, + fun should_work_with_newlines_in_attachments/1 + ] + } + } + }. + +should_work_with_newlines_in_docs({Host, DbName}) -> + Url = Host ++ "/" ++ DbName ++ "/docid%0A", + {"COUCHDB-708", + ?_assertEqual( + Url, + begin + {ok, _, Headers, _} = test_request:put(Url, + [{"Content-Type", "application/json"}], "{}"), + proplists:get_value("Location", Headers) + end)}. + +should_work_with_newlines_in_attachments({Host, DbName}) -> + Url = Host ++ "/" ++ DbName, + AttUrl = Url ++ "/docid%0A/readme.txt", + {"COUCHDB-708", + ?_assertEqual( + AttUrl, + begin + Body = "We all live in a yellow submarine!", + Headers0 = [ + {"Content-Length", "34"}, + {"Content-Type", "text/plain"} + ], + {ok, _, Headers, _} = test_request:put(AttUrl, Headers0, Body), + proplists:get_value("Location", Headers) + end)}. diff --git a/src/couch/test/eunit/couchdb_mrview_cors_tests.erl b/src/couch/test/eunit/couchdb_mrview_cors_tests.erl new file mode 100644 index 000000000..0f69048a0 --- /dev/null +++ b/src/couch/test/eunit/couchdb_mrview_cors_tests.erl @@ -0,0 +1,140 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couchdb_mrview_cors_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + + + +-define(DDOC, {[ + {<<"_id">>, <<"_design/foo">>}, + {<<"shows">>, {[ + {<<"bar">>, <<"function(doc, req) {return '

wosh

';}">>} + ]}} +]}). + +-define(USER, "mrview_cors_test_admin"). +-define(PASS, "pass"). +-define(AUTH, {basic_auth, {?USER, ?PASS}}). + + +start() -> + Ctx = test_util:start_couch([chttpd]), + Hashed = couch_passwords:hash_admin_password(?PASS), + ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), + ok = config:set("httpd", "enable_cors", "true", false), + ok = config:set("vhosts", "example.com", "/", false), + Ctx. + +setup(PortType) -> + DbName = ?tempdb(), + ok = create_db(PortType, DbName), + + config:set("cors", "credentials", "false", false), + config:set("cors", "origins", "http://example.com", false), + + Host = host_url(PortType), + upload_ddoc(Host, ?b2l(DbName)), + {Host, ?b2l(DbName)}. + +teardown(Ctx) -> + ok = config:delete("admins", ?USER, _Persist=false), + test_util:stop_couch(Ctx). + +teardown(PortType, {_Host, DbName}) -> + delete_db(PortType, ?l2b(DbName)), + ok. + +cors_test_() -> + { + "CORS for mrview", + { + setup, + fun start/0, fun teardown/1, + [show_tests()] + } + }. + +show_tests() -> + { + "Check CORS for show", + [ + make_test_case(clustered, [fun should_make_shows_request/2]), + make_test_case(backdoor, [fun should_make_shows_request/2]) + ] + }. + +make_test_case(Mod, Funs) -> + { + lists:flatten(io_lib:format("~s", [Mod])), + {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]} + }. + +should_make_shows_request(_, {Host, DbName}) -> + ?_test(begin + ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_show/bar", + Headers = [{"Origin", "http://example.com"}, + {"Access-Control-Request-Method", "GET"}, ?AUTH], + {ok, _, Resp, Body} = test_request:get(ReqUrl, Headers), + Origin = proplists:get_value("Access-Control-Allow-Origin", Resp), + ?assertEqual("http://example.com", Origin), + ?assertEqual(<<"

wosh

">>, Body) + end). + +create_db(backdoor, DbName) -> + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + couch_db:close(Db); +create_db(clustered, DbName) -> + {ok, Status, _, _} = test_request:put(db_url(DbName), [?AUTH], ""), + assert_success(create_db, Status), + ok. + +delete_db(backdoor, DbName) -> + couch_server:delete(DbName, [?ADMIN_CTX]); +delete_db(clustered, DbName) -> + {ok, Status, _, _} = test_request:delete(db_url(DbName), [?AUTH]), + assert_success(delete_db, Status), + ok. + +assert_success(create_db, Status) -> + true = lists:member(Status, [201, 202]); +assert_success(delete_db, Status) -> + true = lists:member(Status, [200, 202]). + + +host_url(PortType) -> + "http://" ++ bind_address(PortType) ++ ":" ++ port(PortType). + +bind_address(PortType) -> + config:get(section(PortType), "bind_address", "127.0.0.1"). + +section(backdoor) -> "http"; +section(clustered) -> "chttpd". + +db_url(DbName) when is_binary(DbName) -> + db_url(binary_to_list(DbName)); +db_url(DbName) when is_list(DbName) -> + host_url(clustered) ++ "/" ++ DbName. + +port(clustered) -> + integer_to_list(mochiweb_socket_server:get(chttpd, port)); +port(backdoor) -> + integer_to_list(mochiweb_socket_server:get(couch_httpd, port)). + + +upload_ddoc(Host, DbName) -> + Url = Host ++ "/" ++ DbName ++ "/_design/foo", + Body = couch_util:json_encode(?DDOC), + {ok, 201, _Resp, _Body} = test_request:put(Url, [?AUTH], Body), + ok. diff --git a/src/couch/test/eunit/couchdb_mrview_tests.erl b/src/couch/test/eunit/couchdb_mrview_tests.erl new file mode 100644 index 000000000..1c96a0ae0 --- /dev/null +++ b/src/couch/test/eunit/couchdb_mrview_tests.erl @@ -0,0 +1,253 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couchdb_mrview_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + + + +-define(DDOC, {[ + {<<"_id">>, <<"_design/foo">>}, + {<<"shows">>, {[ + {<<"bar">>, <<"function(doc, req) {return '

wosh

';}">>} + ]}}, + {<<"updates">>, {[ + {<<"report">>, <<"function(doc, req) {" + "var data = JSON.parse(req.body); " + "return ['test', data];" + "}">>} + ]}}, + {<<"views">>, {[ + {<<"view1">>, {[ + {<<"map">>, <<"function(doc){emit(doc._id, doc._rev)}">>} + ]}} + ]}} +]}). + +-define(USER, "admin"). +-define(PASS, "pass"). +-define(AUTH, {basic_auth, {?USER, ?PASS}}). + + +start() -> + Ctx = test_util:start_couch([chttpd]), + Hashed = couch_passwords:hash_admin_password(?PASS), + ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), + Ctx. + +setup(PortType) -> + ok = meck:new(mochiweb_socket, [passthrough]), + ok = meck:expect(mochiweb_socket, recv, fun mochiweb_socket_recv/3), + + DbName = ?tempdb(), + ok = create_db(PortType, DbName), + + Host = host_url(PortType), + upload_ddoc(Host, ?b2l(DbName)), + {Host, ?b2l(DbName)}. + +teardown(Ctx) -> + ok = config:delete("admins", ?USER, _Persist=false), + test_util:stop_couch(Ctx). + +teardown(PortType, {_Host, DbName}) -> + (catch meck:unload(mochiweb_socket)), + delete_db(PortType, ?l2b(DbName)), + ok. + +mrview_show_test_() -> + { + "Check show functionality", + { + setup, + fun start/0, fun teardown/1, + [ + make_test_case(clustered, [fun should_return_invalid_request_body/2]), + make_test_case(backdoor, [fun should_return_invalid_request_body/2]) + ] + } + }. + +mrview_query_test_() -> + { + "Check view query functionality", + { + setup, + fun start/0, fun teardown/1, + [ + make_test_case(clustered, [fun should_return_400_for_wrong_order_of_keys/2]), + make_test_case(backdoor, [fun should_return_400_for_wrong_order_of_keys/2]) + ] + } + }. + +mrview_cleanup_index_files_test_() -> + { + "Check index files cleanup", + { + setup, + fun start/0, fun teardown/1, + [ + make_test_case(clustered, [fun should_cleanup_index_files/2]) + ] + } + }. + + +make_test_case(Mod, Funs) -> + { + lists:flatten(io_lib:format("~s", [Mod])), + {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]} + }. + +should_return_invalid_request_body(PortType, {Host, DbName}) -> + ?_test(begin + ok = create_doc(PortType, ?l2b(DbName), <<"doc_id">>, {[]}), + ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_update/report/doc_id", + {ok, Status, _Headers, Body} = + test_request:post(ReqUrl, [?AUTH], <<"{truncated}">>), + {Props} = jiffy:decode(Body), + ?assertEqual( + <<"bad_request">>, couch_util:get_value(<<"error">>, Props)), + ?assertEqual( + <<"Invalid request body">>, couch_util:get_value(<<"reason">>, Props)), + ?assertEqual(400, Status), + ok + end). + +should_return_400_for_wrong_order_of_keys(_PortType, {Host, DbName}) -> + Args = [{start_key, "\"bbb\""}, {end_key, "\"aaa\""}], + ?_test(begin + ReqUrl = Host ++ "/" ++ DbName + ++ "/_design/foo/_view/view1?" ++ mochiweb_util:urlencode(Args), + {ok, Status, _Headers, Body} = test_request:get(ReqUrl, [?AUTH]), + {Props} = jiffy:decode(Body), + ?assertEqual( + <<"query_parse_error">>, couch_util:get_value(<<"error">>, Props)), + ?assertEqual( + <<"No rows can match your key range, reverse your start_key and end_key or set descending=true">>, + couch_util:get_value(<<"reason">>, Props)), + ?assertEqual(400, Status), + ok + end). + +should_cleanup_index_files(_PortType, {Host, DbName}) -> + ?_test(begin + IndexWildCard = [ + config:get("couchdb", "view_index_dir"), + "/.shards/*/", + DbName, + ".[0-9]*_design/mrview/*" + ], + ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_view/view1", + {ok, _Status0, _Headers0, _Body0} = test_request:get(ReqUrl, [?AUTH]), + FileList0 = filelib:wildcard(IndexWildCard), + ?assertNotEqual([], FileList0), + + % It is hard to simulate inactive view. + % Since couch_mrview:cleanup is called on view definition change. + % That's why we just create extra files in place + ToDelete = lists:map(fun(FilePath) -> + ViewFile = filename:join([ + filename:dirname(FilePath), + "11111111111111111111111111111111.view"]), + file:write_file(ViewFile, <<>>), + ViewFile + end, FileList0), + FileList1 = filelib:wildcard(IndexWildCard), + ?assertEqual([], lists:usort(FileList1 -- (FileList0 ++ ToDelete))), + + CleanupUrl = Host ++ "/" ++ DbName ++ "/_view_cleanup", + {ok, _Status1, _Headers1, _Body1} = test_request:post( + CleanupUrl, [], <<>>, [?AUTH]), + test_util:wait(fun() -> + IndexFiles = filelib:wildcard(IndexWildCard), + case lists:usort(FileList0) == lists:usort(IndexFiles) of + false -> wait; + true -> ok + end + end), + ok + end). + + +create_doc(backdoor, DbName, Id, Body) -> + JsonDoc = couch_util:json_apply_field({<<"_id">>, Id}, Body), + Doc = couch_doc:from_json_obj(JsonDoc), + {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), + {ok, _} = couch_db:update_docs(Db, [Doc]), + couch_db:ensure_full_commit(Db), + couch_db:close(Db); +create_doc(clustered, DbName, Id, Body) -> + JsonDoc = couch_util:json_apply_field({<<"_id">>, Id}, Body), + Doc = couch_doc:from_json_obj(JsonDoc), + {ok, _} = fabric:update_docs(DbName, [Doc], [?ADMIN_CTX]), + ok. + +create_db(backdoor, DbName) -> + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + couch_db:close(Db); +create_db(clustered, DbName) -> + {ok, Status, _, _} = test_request:put(db_url(DbName), [?AUTH], ""), + assert_success(create_db, Status), + ok. + +delete_db(backdoor, DbName) -> + couch_server:delete(DbName, [?ADMIN_CTX]); +delete_db(clustered, DbName) -> + {ok, Status, _, _} = test_request:delete(db_url(DbName), [?AUTH]), + assert_success(delete_db, Status), + ok. + +assert_success(create_db, Status) -> + ?assert(lists:member(Status, [201, 202])); +assert_success(delete_db, Status) -> + ?assert(lists:member(Status, [200, 202])). + + +host_url(PortType) -> + "http://" ++ bind_address(PortType) ++ ":" ++ port(PortType). + +bind_address(PortType) -> + config:get(section(PortType), "bind_address", "127.0.0.1"). + +section(backdoor) -> "http"; +section(clustered) -> "chttpd". + +db_url(DbName) when is_binary(DbName) -> + db_url(binary_to_list(DbName)); +db_url(DbName) when is_list(DbName) -> + host_url(clustered) ++ "/" ++ DbName. + +port(clustered) -> + integer_to_list(mochiweb_socket_server:get(chttpd, port)); +port(backdoor) -> + integer_to_list(mochiweb_socket_server:get(couch_httpd, port)). + + +upload_ddoc(Host, DbName) -> + Url = Host ++ "/" ++ DbName ++ "/_design/foo", + Body = couch_util:json_encode(?DDOC), + {ok, 201, _Resp, _Body} = test_request:put(Url, [?AUTH], Body), + ok. + +mochiweb_socket_recv(Sock, Len, Timeout) -> + case meck:passthrough([Sock, Len, Timeout]) of + {ok, <<"{truncated}">>} -> + {error, closed}; + {ok, Data} -> + {ok, Data}; + Else -> + Else + end. diff --git a/src/couch/test/eunit/couchdb_os_proc_pool.erl b/src/couch/test/eunit/couchdb_os_proc_pool.erl new file mode 100644 index 000000000..69f8051ad --- /dev/null +++ b/src/couch/test/eunit/couchdb_os_proc_pool.erl @@ -0,0 +1,306 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couchdb_os_proc_pool). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 1000). + + +setup() -> + ok = couch_proc_manager:reload(), + ok = setup_config(). + +teardown(_) -> + ok. + +os_proc_pool_test_() -> + { + "OS processes pool tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + should_block_new_proc_on_full_pool(), + should_free_slot_on_proc_unexpected_exit(), + should_reuse_known_proc(), +% should_process_waiting_queue_as_fifo(), + should_reduce_pool_on_idle_os_procs() + ] + } + } + }. + + +should_block_new_proc_on_full_pool() -> + ?_test(begin + Client1 = spawn_client(), + Client2 = spawn_client(), + Client3 = spawn_client(), + + ?assertEqual(ok, ping_client(Client1)), + ?assertEqual(ok, ping_client(Client2)), + ?assertEqual(ok, ping_client(Client3)), + + Proc1 = get_client_proc(Client1, "1"), + Proc2 = get_client_proc(Client2, "2"), + Proc3 = get_client_proc(Client3, "3"), + + ?assertNotEqual(Proc1, Proc2), + ?assertNotEqual(Proc2, Proc3), + ?assertNotEqual(Proc3, Proc1), + + Client4 = spawn_client(), + ?assertEqual(timeout, ping_client(Client4)), + + ?assertEqual(ok, stop_client(Client1)), + ?assertEqual(ok, ping_client(Client4)), + + Proc4 = get_client_proc(Client4, "4"), + + ?assertEqual(Proc1#proc.pid, Proc4#proc.pid), + ?assertNotEqual(Proc1#proc.client, Proc4#proc.client), + + lists:map(fun(C) -> + ?assertEqual(ok, stop_client(C)) + end, [Client2, Client3, Client4]) + end). + + +should_free_slot_on_proc_unexpected_exit() -> + ?_test(begin + Client1 = spawn_client(), + Client2 = spawn_client(), + Client3 = spawn_client(), + + ?assertEqual(ok, ping_client(Client1)), + ?assertEqual(ok, ping_client(Client2)), + ?assertEqual(ok, ping_client(Client3)), + + Proc1 = get_client_proc(Client1, "1"), + Proc2 = get_client_proc(Client2, "2"), + Proc3 = get_client_proc(Client3, "3"), + + ?assertNotEqual(Proc1#proc.pid, Proc2#proc.pid), + ?assertNotEqual(Proc1#proc.client, Proc2#proc.client), + ?assertNotEqual(Proc2#proc.pid, Proc3#proc.pid), + ?assertNotEqual(Proc2#proc.client, Proc3#proc.client), + ?assertNotEqual(Proc3#proc.pid, Proc1#proc.pid), + ?assertNotEqual(Proc3#proc.client, Proc1#proc.client), + + ?assertEqual(ok, kill_client(Client1)), + + Client4 = spawn_client(), + ?assertEqual(ok, ping_client(Client4)), + + Proc4 = get_client_proc(Client4, "4"), + + ?assertEqual(Proc4#proc.pid, Proc1#proc.pid), + ?assertNotEqual(Proc4#proc.client, Proc1#proc.client), + ?assertNotEqual(Proc2#proc.pid, Proc4#proc.pid), + ?assertNotEqual(Proc2#proc.client, Proc4#proc.client), + ?assertNotEqual(Proc3#proc.pid, Proc4#proc.pid), + ?assertNotEqual(Proc3#proc.client, Proc4#proc.client), + + lists:map(fun(C) -> + ?assertEqual(ok, stop_client(C)) + end, [Client2, Client3, Client4]) + end). + + +should_reuse_known_proc() -> + ?_test(begin + Client1 = spawn_client(<<"ddoc1">>), + Client2 = spawn_client(<<"ddoc2">>), + + ?assertEqual(ok, ping_client(Client1)), + ?assertEqual(ok, ping_client(Client2)), + + Proc1 = get_client_proc(Client1, "1"), + Proc2 = get_client_proc(Client2, "2"), + ?assertNotEqual(Proc1#proc.pid, Proc2#proc.pid), + + ?assertEqual(ok, stop_client(Client1)), + ?assertEqual(ok, stop_client(Client2)), + ?assert(is_process_alive(Proc1#proc.pid)), + ?assert(is_process_alive(Proc2#proc.pid)), + + Client1Again = spawn_client(<<"ddoc1">>), + ?assertEqual(ok, ping_client(Client1Again)), + Proc1Again = get_client_proc(Client1Again, "1-again"), + ?assertEqual(Proc1#proc.pid, Proc1Again#proc.pid), + ?assertNotEqual(Proc1#proc.client, Proc1Again#proc.client), + ?assertEqual(ok, stop_client(Client1Again)) + end). + + +%should_process_waiting_queue_as_fifo() -> +% ?_test(begin +% Client1 = spawn_client(<<"ddoc1">>), +% Client2 = spawn_client(<<"ddoc2">>), +% Client3 = spawn_client(<<"ddoc3">>), +% Client4 = spawn_client(<<"ddoc4">>), +% timer:sleep(100), +% Client5 = spawn_client(<<"ddoc5">>), +% +% ?assertEqual(ok, ping_client(Client1)), +% ?assertEqual(ok, ping_client(Client2)), +% ?assertEqual(ok, ping_client(Client3)), +% ?assertEqual(timeout, ping_client(Client4)), +% ?assertEqual(timeout, ping_client(Client5)), +% +% Proc1 = get_client_proc(Client1, "1"), +% ?assertEqual(ok, stop_client(Client1)), +% ?assertEqual(ok, ping_client(Client4)), +% Proc4 = get_client_proc(Client4, "4"), +% +% ?assertNotEqual(Proc4#proc.client, Proc1#proc.client), +% ?assertEqual(Proc1#proc.pid, Proc4#proc.pid), +% ?assertEqual(timeout, ping_client(Client5)), +% +% ?assertEqual(ok, stop_client(Client2)), +% ?assertEqual(ok, stop_client(Client3)), +% ?assertEqual(ok, stop_client(Client4)), +% ?assertEqual(ok, stop_client(Client5)) +% end). + + +should_reduce_pool_on_idle_os_procs() -> + ?_test(begin + %% os_process_idle_limit is in sec + config:set("query_server_config", + "os_process_idle_limit", "1", false), + ok = confirm_config("os_process_idle_limit", "1"), + + Client1 = spawn_client(<<"ddoc1">>), + Client2 = spawn_client(<<"ddoc2">>), + Client3 = spawn_client(<<"ddoc3">>), + + ?assertEqual(ok, ping_client(Client1)), + ?assertEqual(ok, ping_client(Client2)), + ?assertEqual(ok, ping_client(Client3)), + + ?assertEqual(3, couch_proc_manager:get_proc_count()), + + ?assertEqual(ok, stop_client(Client1)), + ?assertEqual(ok, stop_client(Client2)), + ?assertEqual(ok, stop_client(Client3)), + + timer:sleep(1200), + ?assertEqual(1, couch_proc_manager:get_proc_count()) + end). + + +setup_config() -> + config:set("native_query_servers", "enable_erlang_query_server", "true", false), + config:set("query_server_config", "os_process_limit", "3", false), + config:set("query_server_config", "os_process_soft_limit", "2", false), + ok = confirm_config("os_process_soft_limit", "2"). + +confirm_config(Key, Value) -> + confirm_config(Key, Value, 0). + +confirm_config(Key, Value, Count) -> + case config:get("query_server_config", Key) of + Value -> + ok; + _ when Count > 10 -> + erlang:error({config_setup, [ + {module, ?MODULE}, + {line, ?LINE}, + {value, timeout} + ]}); + _ -> + %% we need to wait to let gen_server:cast finish + timer:sleep(10), + confirm_config(Key, Value, Count + 1) + end. + +spawn_client() -> + Parent = self(), + Ref = make_ref(), + Pid = spawn(fun() -> + Proc = couch_query_servers:get_os_process(<<"erlang">>), + loop(Parent, Ref, Proc) + end), + {Pid, Ref}. + +spawn_client(DDocId) -> + Parent = self(), + Ref = make_ref(), + Pid = spawn(fun() -> + DDocKey = {DDocId, <<"1-abcdefgh">>}, + DDoc = #doc{body={[{<<"language">>, <<"erlang">>}]}}, + Proc = couch_query_servers:get_ddoc_process(DDoc, DDocKey), + loop(Parent, Ref, Proc) + end), + {Pid, Ref}. + +ping_client({Pid, Ref}) -> + Pid ! ping, + receive + {pong, Ref} -> + ok + after ?TIMEOUT -> + timeout + end. + +get_client_proc({Pid, Ref}, ClientName) -> + Pid ! get_proc, + receive + {proc, Ref, Proc} -> Proc + after ?TIMEOUT -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, "Timeout getting client " + ++ ClientName ++ " proc"}]}) + end. + +stop_client({Pid, Ref}) -> + Pid ! stop, + receive + {stop, Ref} -> + ok + after ?TIMEOUT -> + timeout + end. + +kill_client({Pid, Ref}) -> + Pid ! die, + receive + {die, Ref} -> + ok + after ?TIMEOUT -> + timeout + end. + +loop(Parent, Ref, Proc) -> + receive + ping -> + Parent ! {pong, Ref}, + loop(Parent, Ref, Proc); + get_proc -> + Parent ! {proc, Ref, Proc}, + loop(Parent, Ref, Proc); + stop -> + couch_query_servers:ret_os_process(Proc), + Parent ! {stop, Ref}; + die -> + Parent ! {die, Ref}, + exit(some_error) + end. diff --git a/src/couch/test/eunit/couchdb_update_conflicts_tests.erl b/src/couch/test/eunit/couchdb_update_conflicts_tests.erl new file mode 100644 index 000000000..e92c73856 --- /dev/null +++ b/src/couch/test/eunit/couchdb_update_conflicts_tests.erl @@ -0,0 +1,281 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couchdb_update_conflicts_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(i2l(I), integer_to_list(I)). +-define(DOC_ID, <<"foobar">>). +-define(LOCAL_DOC_ID, <<"_local/foobar">>). +-define(NUM_CLIENTS, [100, 500, 1000, 2000, 5000, 10000]). +-define(TIMEOUT, 20000). + +start() -> + Ctx = test_util:start_couch(), + config:set("couchdb", "delayed_commits", "true", false), + Ctx. + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX, overwrite]), + Doc = couch_doc:from_json_obj({[{<<"_id">>, ?DOC_ID}, + {<<"value">>, 0}]}), + {ok, Rev} = couch_db:update_doc(Db, Doc, []), + ok = couch_db:close(Db), + RevStr = couch_doc:rev_to_str(Rev), + {DbName, RevStr}. +setup(_) -> + setup(). + +teardown({DbName, _}) -> + ok = couch_server:delete(DbName, []), + ok. +teardown(_, {DbName, _RevStr}) -> + teardown({DbName, _RevStr}). + + +view_indexes_cleanup_test_() -> + { + "Update conflicts", + { + setup, + fun start/0, fun test_util:stop_couch/1, + [ + concurrent_updates(), + bulk_docs_updates() + ] + } + }. + +concurrent_updates()-> + { + "Concurrent updates", + { + foreachx, + fun setup/1, fun teardown/2, + [{NumClients, fun should_concurrently_update_doc/2} + || NumClients <- ?NUM_CLIENTS] + } + }. + +bulk_docs_updates()-> + { + "Bulk docs updates", + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_bulk_create_delete_doc/1, + fun should_bulk_create_local_doc/1, + fun should_ignore_invalid_local_doc/1 + ] + } + }. + + +should_concurrently_update_doc(NumClients, {DbName, InitRev})-> + {?i2l(NumClients) ++ " clients", + {inorder, + [{"update doc", + {timeout, ?TIMEOUT div 1000, + ?_test(concurrent_doc_update(NumClients, DbName, InitRev))}}, + {"ensure in single leaf", + ?_test(ensure_in_single_revision_leaf(DbName))}]}}. + +should_bulk_create_delete_doc({DbName, InitRev})-> + ?_test(bulk_delete_create(DbName, InitRev)). + +should_bulk_create_local_doc({DbName, _})-> + ?_test(bulk_create_local_doc(DbName)). + +should_ignore_invalid_local_doc({DbName, _})-> + ?_test(ignore_invalid_local_doc(DbName)). + + +concurrent_doc_update(NumClients, DbName, InitRev) -> + Clients = lists:map( + fun(Value) -> + ClientDoc = couch_doc:from_json_obj({[ + {<<"_id">>, ?DOC_ID}, + {<<"_rev">>, InitRev}, + {<<"value">>, Value} + ]}), + Pid = spawn_client(DbName, ClientDoc), + {Value, Pid, erlang:monitor(process, Pid)} + end, + lists:seq(1, NumClients)), + + lists:foreach(fun({_, Pid, _}) -> Pid ! go end, Clients), + + {NumConflicts, SavedValue} = lists:foldl( + fun({Value, Pid, MonRef}, {AccConflicts, AccValue}) -> + receive + {'DOWN', MonRef, process, Pid, {ok, _NewRev}} -> + {AccConflicts, Value}; + {'DOWN', MonRef, process, Pid, conflict} -> + {AccConflicts + 1, AccValue}; + {'DOWN', MonRef, process, Pid, Error} -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, "Client " ++ ?i2l(Value) + ++ " got update error: " + ++ couch_util:to_list(Error)}]}) + after ?TIMEOUT div 2 -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, "Timeout waiting for client " + ++ ?i2l(Value) ++ " to die"}]}) + end + end, {0, nil}, Clients), + ?assertEqual(NumClients - 1, NumConflicts), + + {ok, Db} = couch_db:open_int(DbName, []), + {ok, Leaves} = couch_db:open_doc_revs(Db, ?DOC_ID, all, []), + ok = couch_db:close(Db), + ?assertEqual(1, length(Leaves)), + + [{ok, Doc2}] = Leaves, + {JsonDoc} = couch_doc:to_json_obj(Doc2, []), + ?assertEqual(SavedValue, couch_util:get_value(<<"value">>, JsonDoc)). + +ensure_in_single_revision_leaf(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + {ok, Leaves} = couch_db:open_doc_revs(Db, ?DOC_ID, all, []), + ok = couch_db:close(Db), + [{ok, Doc}] = Leaves, + + %% FIXME: server restart won't work from test side + %% stop(ok), + %% start(), + + {ok, Db2} = couch_db:open_int(DbName, []), + {ok, Leaves2} = couch_db:open_doc_revs(Db2, ?DOC_ID, all, []), + ok = couch_db:close(Db2), + ?assertEqual(1, length(Leaves2)), + + [{ok, Doc2}] = Leaves, + ?assertEqual(Doc, Doc2). + +bulk_delete_create(DbName, InitRev) -> + {ok, Db} = couch_db:open_int(DbName, []), + + DeletedDoc = couch_doc:from_json_obj({[ + {<<"_id">>, ?DOC_ID}, + {<<"_rev">>, InitRev}, + {<<"_deleted">>, true} + ]}), + NewDoc = couch_doc:from_json_obj({[ + {<<"_id">>, ?DOC_ID}, + {<<"value">>, 666} + ]}), + + {ok, Results} = couch_db:update_docs(Db, [DeletedDoc, NewDoc], []), + ok = couch_db:close(Db), + + ?assertEqual(2, length([ok || {ok, _} <- Results])), + [{ok, Rev1}, {ok, Rev2}] = Results, + + {ok, Db2} = couch_db:open_int(DbName, []), + {ok, [{ok, Doc1}]} = couch_db:open_doc_revs( + Db2, ?DOC_ID, [Rev1], [conflicts, deleted_conflicts]), + {ok, [{ok, Doc2}]} = couch_db:open_doc_revs( + Db2, ?DOC_ID, [Rev2], [conflicts, deleted_conflicts]), + ok = couch_db:close(Db2), + + {Doc1Props} = couch_doc:to_json_obj(Doc1, []), + {Doc2Props} = couch_doc:to_json_obj(Doc2, []), + + %% Document was deleted + ?assert(couch_util:get_value(<<"_deleted">>, Doc1Props)), + %% New document not flagged as deleted + ?assertEqual(undefined, couch_util:get_value(<<"_deleted">>, + Doc2Props)), + %% New leaf revision has the right value + ?assertEqual(666, couch_util:get_value(<<"value">>, + Doc2Props)), + %% Deleted document has no conflicts + ?assertEqual(undefined, couch_util:get_value(<<"_conflicts">>, + Doc1Props)), + %% Deleted document has no deleted conflicts + ?assertEqual(undefined, couch_util:get_value(<<"_deleted_conflicts">>, + Doc1Props)), + %% New leaf revision doesn't have conflicts + ?assertEqual(undefined, couch_util:get_value(<<"_conflicts">>, + Doc1Props)), + %% New leaf revision doesn't have deleted conflicts + ?assertEqual(undefined, couch_util:get_value(<<"_deleted_conflicts">>, + Doc1Props)), + + %% Deleted revision has position 2 + ?assertEqual(2, element(1, Rev1)), + %% New leaf revision has position 3 + ?assertEqual(3, element(1, Rev2)). + + +bulk_create_local_doc(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + + LocalDoc = couch_doc:from_json_obj({[ + {<<"_id">>, ?LOCAL_DOC_ID}, + {<<"_rev">>, <<"0-1">>} + ]}), + + {ok, Results} = couch_db:update_docs(Db, [LocalDoc], + [], replicated_changes), + ok = couch_db:close(Db), + ?assertEqual([], Results), + + {ok, Db2} = couch_db:open_int(DbName, []), + {ok, LocalDoc1} = couch_db:open_doc_int(Db2, ?LOCAL_DOC_ID, []), + ok = couch_db:close(Db2), + ?assertEqual(?LOCAL_DOC_ID, LocalDoc1#doc.id), + ?assertEqual({0, [<<"2">>]}, LocalDoc1#doc.revs). + + +ignore_invalid_local_doc(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + + LocalDoc = couch_doc:from_json_obj({[ + {<<"_id">>, ?LOCAL_DOC_ID}, + {<<"_rev">>, <<"0-abcdef">>} + ]}), + + {ok, Results} = couch_db:update_docs(Db, [LocalDoc], + [], replicated_changes), + ok = couch_db:close(Db), + ?assertEqual([], Results), + + {ok, Db2} = couch_db:open_int(DbName, []), + Result2 = couch_db:open_doc_int(Db2, ?LOCAL_DOC_ID, []), + ok = couch_db:close(Db2), + ?assertEqual({not_found, missing}, Result2). + + +spawn_client(DbName, Doc) -> + spawn(fun() -> + {ok, Db} = couch_db:open_int(DbName, []), + receive + go -> ok + end, + erlang:yield(), + Result = try + couch_db:update_doc(Db, Doc, []) + catch _:Error -> + Error + end, + ok = couch_db:close(Db), + exit(Result) + end). diff --git a/src/couch/test/eunit/couchdb_vhosts_tests.erl b/src/couch/test/eunit/couchdb_vhosts_tests.erl new file mode 100644 index 000000000..1c4117215 --- /dev/null +++ b/src/couch/test/eunit/couchdb_vhosts_tests.erl @@ -0,0 +1,272 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couchdb_vhosts_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 1000). +-define(iofmt(S, A), lists:flatten(io_lib:format(S, A))). + + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"doc1">>}, + {<<"value">>, 666} + ]}), + + Doc1 = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/doc1">>}, + {<<"shows">>, {[ + {<<"test">>, <<"function(doc, req) { + return { json: { + requested_path: '/' + req.requested_path.join('/'), + path: '/' + req.path.join('/')}};}">>} + ]}}, + {<<"rewrites">>, [ + {[ + {<<"from">>, <<"/">>}, + {<<"to">>, <<"_show/test">>} + ]} + ]} + ]}), + {ok, _} = couch_db:update_docs(Db, [Doc, Doc1]), + couch_db:ensure_full_commit(Db), + couch_db:close(Db), + + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)), + Url = "http://" ++ Addr ++ ":" ++ Port, + {Url, ?b2l(DbName)}. + +teardown({_, DbName}) -> + ok = couch_server:delete(?l2b(DbName), []), + ok. + + +vhosts_test_() -> + { + "Virtual Hosts rewrite tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_return_database_info/1, + fun should_return_revs_info/1, + fun should_return_virtual_request_path_field_in_request/1, + fun should_return_real_request_path_field_in_request/1, + fun should_match_wildcard_vhost/1, + fun should_return_db_info_for_wildcard_vhost_for_custom_db/1, + fun should_replace_rewrite_variables_for_db_and_doc/1, + fun should_return_db_info_for_vhost_with_resource/1, + fun should_return_revs_info_for_vhost_with_resource/1, + fun should_return_db_info_for_vhost_with_wildcard_resource/1, + fun should_return_path_for_vhost_with_wildcard_host/1 + ] + } + } + }. + +should_return_database_info({Url, DbName}) -> + ?_test(begin + ok = config:set("vhosts", "example.com", "/" ++ DbName, false), + case test_request:get(Url, [], [{host_header, "example.com"}]) of + {ok, _, _, Body} -> + {JsonBody} = jiffy:decode(Body), + ?assert(proplists:is_defined(<<"db_name">>, JsonBody)); + Else -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])}]}) + end + end). + +should_return_revs_info({Url, DbName}) -> + ?_test(begin + ok = config:set("vhosts", "example.com", "/" ++ DbName, false), + case test_request:get(Url ++ "/doc1?revs_info=true", [], + [{host_header, "example.com"}]) of + {ok, _, _, Body} -> + {JsonBody} = jiffy:decode(Body), + ?assert(proplists:is_defined(<<"_revs_info">>, JsonBody)); + Else -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])}]}) + end + end). + +should_return_virtual_request_path_field_in_request({Url, DbName}) -> + ?_test(begin + ok = config:set("vhosts", "example1.com", + "/" ++ DbName ++ "/_design/doc1/_rewrite/", + false), + case test_request:get(Url, [], [{host_header, "example1.com"}]) of + {ok, _, _, Body} -> + {Json} = jiffy:decode(Body), + ?assertEqual(<<"/">>, + proplists:get_value(<<"requested_path">>, Json)); + Else -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])}]}) + end + end). + +should_return_real_request_path_field_in_request({Url, DbName}) -> + ?_test(begin + ok = config:set("vhosts", "example1.com", + "/" ++ DbName ++ "/_design/doc1/_rewrite/", + false), + case test_request:get(Url, [], [{host_header, "example1.com"}]) of + {ok, _, _, Body} -> + {Json} = jiffy:decode(Body), + Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"), + ?assertEqual(Path, proplists:get_value(<<"path">>, Json)); + Else -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])}]}) + end + end). + +should_match_wildcard_vhost({Url, DbName}) -> + ?_test(begin + ok = config:set("vhosts", "*.example.com", + "/" ++ DbName ++ "/_design/doc1/_rewrite", false), + case test_request:get(Url, [], [{host_header, "test.example.com"}]) of + {ok, _, _, Body} -> + {Json} = jiffy:decode(Body), + Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"), + ?assertEqual(Path, proplists:get_value(<<"path">>, Json)); + Else -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])}]}) + end + end). + +should_return_db_info_for_wildcard_vhost_for_custom_db({Url, DbName}) -> + ?_test(begin + ok = config:set("vhosts", ":dbname.example1.com", + "/:dbname", false), + Host = DbName ++ ".example1.com", + case test_request:get(Url, [], [{host_header, Host}]) of + {ok, _, _, Body} -> + {JsonBody} = jiffy:decode(Body), + ?assert(proplists:is_defined(<<"db_name">>, JsonBody)); + Else -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])}]}) + end + end). + +should_replace_rewrite_variables_for_db_and_doc({Url, DbName}) -> + ?_test(begin + ok = config:set("vhosts",":appname.:dbname.example1.com", + "/:dbname/_design/:appname/_rewrite/", false), + Host = "doc1." ++ DbName ++ ".example1.com", + case test_request:get(Url, [], [{host_header, Host}]) of + {ok, _, _, Body} -> + {Json} = jiffy:decode(Body), + Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"), + ?assertEqual(Path, proplists:get_value(<<"path">>, Json)); + Else -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])}]}) + end + end). + +should_return_db_info_for_vhost_with_resource({Url, DbName}) -> + ?_test(begin + ok = config:set("vhosts", + "example.com/test", "/" ++ DbName, false), + ReqUrl = Url ++ "/test", + case test_request:get(ReqUrl, [], [{host_header, "example.com"}]) of + {ok, _, _, Body} -> + {JsonBody} = jiffy:decode(Body), + ?assert(proplists:is_defined(<<"db_name">>, JsonBody)); + Else -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])}]}) + end + end). + + +should_return_revs_info_for_vhost_with_resource({Url, DbName}) -> + ?_test(begin + ok = config:set("vhosts", + "example.com/test", "/" ++ DbName, false), + ReqUrl = Url ++ "/test/doc1?revs_info=true", + case test_request:get(ReqUrl, [], [{host_header, "example.com"}]) of + {ok, _, _, Body} -> + {JsonBody} = jiffy:decode(Body), + ?assert(proplists:is_defined(<<"_revs_info">>, JsonBody)); + Else -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])}]}) + end + end). + +should_return_db_info_for_vhost_with_wildcard_resource({Url, DbName}) -> + ?_test(begin + ok = config:set("vhosts", "*.example2.com/test", "/*", false), + ReqUrl = Url ++ "/test", + Host = DbName ++ ".example2.com", + case test_request:get(ReqUrl, [], [{host_header, Host}]) of + {ok, _, _, Body} -> + {JsonBody} = jiffy:decode(Body), + ?assert(proplists:is_defined(<<"db_name">>, JsonBody)); + Else -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])}]}) + end + end). + +should_return_path_for_vhost_with_wildcard_host({Url, DbName}) -> + ?_test(begin + ok = config:set("vhosts", "*/test1", + "/" ++ DbName ++ "/_design/doc1/_show/test", + false), + case test_request:get(Url ++ "/test1") of + {ok, _, _, Body} -> + {Json} = jiffy:decode(Body), + Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"), + ?assertEqual(Path, proplists:get_value(<<"path">>, Json)); + Else -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, ?iofmt("Request failed: ~p", [Else])}]}) + end + end). diff --git a/src/couch/test/eunit/couchdb_views_tests.erl b/src/couch/test/eunit/couchdb_views_tests.erl new file mode 100644 index 000000000..60bb5c975 --- /dev/null +++ b/src/couch/test/eunit/couchdb_views_tests.erl @@ -0,0 +1,669 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couchdb_views_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("couch_mrview/include/couch_mrview.hrl"). + +-define(DELAY, 100). +-define(TIMEOUT, 1000). +-define(WAIT_DELAY_COUNT, 40). + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + FooRev = create_design_doc(DbName, <<"_design/foo">>, <<"bar">>), + query_view(DbName, "foo", "bar"), + BooRev = create_design_doc(DbName, <<"_design/boo">>, <<"baz">>), + query_view(DbName, "boo", "baz"), + {DbName, {FooRev, BooRev}}. + +setup_with_docs() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + create_docs(DbName), + create_design_doc(DbName, <<"_design/foo">>, <<"bar">>), + DbName. + +setup_legacy() -> + DbName = <<"test">>, + DbFileName = "test.couch", + OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]), + OldViewName = "3b835456c235b1827e012e25666152f3.view", + FixtureViewFilePath = filename:join([?FIXTURESDIR, OldViewName]), + NewViewName = "6cf2c2f766f87b618edf6630b00f8736.view", + + DbDir = config:get("couchdb", "database_dir"), + ViewDir = config:get("couchdb", "view_index_dir"), + OldViewFilePath = filename:join([ViewDir, ".test_design", OldViewName]), + NewViewFilePath = filename:join([ViewDir, ".test_design", "mrview", + NewViewName]), + + NewDbFilePath = filename:join([DbDir, DbFileName]), + + Files = [NewDbFilePath, OldViewFilePath, NewViewFilePath], + + %% make sure there is no left over + lists:foreach(fun(File) -> file:delete(File) end, Files), + + % copy old db file into db dir + {ok, _} = file:copy(OldDbFilePath, NewDbFilePath), + + % copy old view file into view dir + ok = filelib:ensure_dir(OldViewFilePath), + + {ok, _} = file:copy(FixtureViewFilePath, OldViewFilePath), + + {DbName, Files}. + +teardown({DbName, _}) -> + teardown(DbName); +teardown(DbName) when is_binary(DbName) -> + couch_server:delete(DbName, [?ADMIN_CTX]), + ok. + +teardown_legacy({_DbName, Files}) -> + lists:foreach(fun(File) -> file:delete(File) end, Files). + +view_indexes_cleanup_test_() -> + { + "View indexes cleanup", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_have_two_indexes_alive_before_deletion/1, + fun should_cleanup_index_file_after_ddoc_deletion/1, + fun should_cleanup_all_index_files/1 + ] + } + } + }. + +view_group_db_leaks_test_() -> + { + "View group db leaks", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup_with_docs/0, fun teardown/1, + [ + fun couchdb_1138/1, + fun couchdb_1309/1 + ] + } + } + }. + +view_group_shutdown_test_() -> + { + "View group shutdown", + { + setup, + fun() -> + meck:new(couch_mrview_index, [passthrough]), + test_util:start_couch() + end, + fun(Ctx) -> + test_util:stop_couch(Ctx), + meck:unload() + end, + [couchdb_1283()] + } + }. + +backup_restore_test_() -> + { + "Upgrade and bugs related tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup_with_docs/0, fun teardown/1, + [ + fun should_not_remember_docs_in_index_after_backup_restore/1 + ] + } + } + }. + + +upgrade_test_() -> + { + "Upgrade tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup_legacy/0, fun teardown_legacy/1, + [ + fun should_upgrade_legacy_view_files/1 + ] + } + } + }. + +should_not_remember_docs_in_index_after_backup_restore(DbName) -> + ?_test(begin + %% COUCHDB-640 + + ok = backup_db_file(DbName), + create_doc(DbName, "doc666"), + + Rows0 = query_view(DbName, "foo", "bar"), + ?assert(has_doc("doc1", Rows0)), + ?assert(has_doc("doc2", Rows0)), + ?assert(has_doc("doc3", Rows0)), + ?assert(has_doc("doc666", Rows0)), + + ?assertEqual(ok, restore_backup_db_file(DbName)), + + Rows1 = query_view(DbName, "foo", "bar"), + ?assert(has_doc("doc1", Rows1)), + ?assert(has_doc("doc2", Rows1)), + ?assert(has_doc("doc3", Rows1)), + ?assertNot(has_doc("doc666", Rows1)) + end). + +should_upgrade_legacy_view_files({DbName, Files}) -> + ?_test(begin + [_NewDbFilePath, OldViewFilePath, NewViewFilePath] = Files, + ok = config:set("query_server_config", "commit_freq", "0", false), + + % ensure old header + OldHeader = read_header(OldViewFilePath), + ?assertMatch(#index_header{}, OldHeader), + + % query view for expected results + Rows0 = query_view(DbName, "test", "test"), + ?assertEqual(2, length(Rows0)), + + % ensure old file gone + ?assertNot(filelib:is_regular(OldViewFilePath)), + + % add doc to trigger update + DocUrl = db_url(DbName) ++ "/boo", + {ok, _, _, _} = test_request:put( + DocUrl, [{"Content-Type", "application/json"}], <<"{\"a\":3}">>), + + % query view for expected results + Rows1 = query_view(DbName, "test", "test"), + ?assertEqual(3, length(Rows1)), + + % ensure new header + timer:sleep(2000), % have to wait for awhile to upgrade the index + NewHeader = read_header(NewViewFilePath), + ?assertMatch(#mrheader{}, NewHeader) + end). + + +should_have_two_indexes_alive_before_deletion({DbName, _}) -> + view_cleanup(DbName), + ?_assertEqual(2, count_index_files(DbName)). + +should_cleanup_index_file_after_ddoc_deletion({DbName, {FooRev, _}}) -> + delete_design_doc(DbName, <<"_design/foo">>, FooRev), + view_cleanup(DbName), + ?_assertEqual(1, count_index_files(DbName)). + +should_cleanup_all_index_files({DbName, {FooRev, BooRev}})-> + delete_design_doc(DbName, <<"_design/foo">>, FooRev), + delete_design_doc(DbName, <<"_design/boo">>, BooRev), + view_cleanup(DbName), + ?_assertEqual(0, count_index_files(DbName)). + +couchdb_1138(DbName) -> + ?_test(begin + {ok, IndexerPid} = couch_index_server:get_index( + couch_mrview_index, DbName, <<"_design/foo">>), + ?assert(is_pid(IndexerPid)), + ?assert(is_process_alive(IndexerPid)), + ?assertEqual(2, count_users(DbName)), + + wait_indexer(IndexerPid), + + Rows0 = query_view(DbName, "foo", "bar"), + ?assertEqual(3, length(Rows0)), + ?assertEqual(2, count_users(DbName)), + ?assert(is_process_alive(IndexerPid)), + + create_doc(DbName, "doc1000"), + Rows1 = query_view(DbName, "foo", "bar"), + ?assertEqual(4, length(Rows1)), + ?assertEqual(2, count_users(DbName)), + + ?assert(is_process_alive(IndexerPid)), + + compact_db(DbName), + ?assert(is_process_alive(IndexerPid)), + + compact_view_group(DbName, "foo"), + ?assertEqual(2, count_users(DbName)), + + ?assert(is_process_alive(IndexerPid)), + + create_doc(DbName, "doc1001"), + Rows2 = query_view(DbName, "foo", "bar"), + ?assertEqual(5, length(Rows2)), + ?assertEqual(2, count_users(DbName)), + + ?assert(is_process_alive(IndexerPid)) + end). + +couchdb_1309(DbName) -> + ?_test(begin + {ok, IndexerPid} = couch_index_server:get_index( + couch_mrview_index, DbName, <<"_design/foo">>), + ?assert(is_pid(IndexerPid)), + ?assert(is_process_alive(IndexerPid)), + ?assertEqual(2, count_users(DbName)), + + wait_indexer(IndexerPid), + + create_doc(DbName, "doc1001"), + Rows0 = query_view(DbName, "foo", "bar"), + check_rows_value(Rows0, null), + ?assertEqual(4, length(Rows0)), + ?assertEqual(2, count_users(DbName)), + + ?assert(is_process_alive(IndexerPid)), + + update_design_doc(DbName, <<"_design/foo">>, <<"bar">>), + {ok, NewIndexerPid} = couch_index_server:get_index( + couch_mrview_index, DbName, <<"_design/foo">>), + ?assert(is_pid(NewIndexerPid)), + ?assert(is_process_alive(NewIndexerPid)), + ?assertNotEqual(IndexerPid, NewIndexerPid), + UserCnt = case count_users(DbName) of + N when N > 2 -> + timer:sleep(1000), + count_users(DbName); + N -> N + end, + ?assertEqual(2, UserCnt), + + Rows1 = query_view(DbName, "foo", "bar", ok), + ?assertEqual(0, length(Rows1)), + Rows2 = query_view(DbName, "foo", "bar"), + check_rows_value(Rows2, 1), + ?assertEqual(4, length(Rows2)), + + ok = stop_indexer( %% FIXME we need to grab monitor earlier + fun() -> ok end, + IndexerPid, ?LINE, + "old view group is not dead after ddoc update"), + + ok = stop_indexer( + fun() -> couch_server:delete(DbName, [?ADMIN_USER]) end, + NewIndexerPid, ?LINE, + "new view group did not die after DB deletion") + end). + +couchdb_1283() -> + ?_test(begin + ok = config:set("couchdb", "max_dbs_open", "3", false), + ok = config:set("couchdb", "delayed_commits", "false", false), + + {ok, MDb1} = couch_db:create(?tempdb(), [?ADMIN_CTX]), + DDoc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/foo">>}, + {<<"language">>, <<"javascript">>}, + {<<"views">>, {[ + {<<"foo">>, {[ + {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} + ]}}, + {<<"foo2">>, {[ + {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} + ]}}, + {<<"foo3">>, {[ + {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} + ]}}, + {<<"foo4">>, {[ + {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} + ]}}, + {<<"foo5">>, {[ + {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} + ]}} + ]}} + ]}), + {ok, _} = couch_db:update_doc(MDb1, DDoc, []), + ok = populate_db(MDb1, 100, 100), + query_view(couch_db:name(MDb1), "foo", "foo"), + ok = couch_db:close(MDb1), + + {ok, Pid} = couch_index_server:get_index( + couch_mrview_index, couch_db:name(MDb1), <<"_design/foo">>), + + % Start and pause compacton + WaitRef = erlang:make_ref(), + meck:expect(couch_mrview_index, compact, fun(Db, State, Opts) -> + receive {WaitRef, From, init} -> ok end, + From ! {WaitRef, inited}, + receive {WaitRef, go} -> ok end, + meck:passthrough([Db, State, Opts]) + end), + + {ok, CPid} = gen_server:call(Pid, compact), + CRef = erlang:monitor(process, CPid), + ?assert(is_process_alive(CPid)), + + % Make sure that our compactor is waiting for us + % before we continue our assertions + CPid ! {WaitRef, self(), init}, + receive {WaitRef, inited} -> ok end, + + % Make sure that a compaction process takes a monitor + % on the database's main_pid + ?assertEqual(true, lists:member(CPid, couch_db:monitored_by(MDb1))), + + % Finish compaction to and make sure the monitor + % disappears + CPid ! {WaitRef, go}, + wait_for_process_shutdown(CRef, normal, + {reason, "Failure compacting view group"}), + + % Make sure that the monitor was removed + ?assertEqual(false, lists:member(CPid, couch_db:monitored_by(MDb1))) + end). + +wait_for_process_shutdown(Pid, ExpectedReason, Error) -> + receive + {'DOWN', Pid, process, _, Reason} -> + ?assertEqual(ExpectedReason, Reason) + after ?TIMEOUT -> + erlang:error( + {assertion_failed, + [{module, ?MODULE}, {line, ?LINE}, Error]}) + end. + + +create_doc(DbName, DocId) when is_list(DocId) -> + create_doc(DbName, ?l2b(DocId)); +create_doc(DbName, DocId) when is_binary(DocId) -> + {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), + Doc666 = couch_doc:from_json_obj({[ + {<<"_id">>, DocId}, + {<<"value">>, 999} + ]}), + {ok, _} = couch_db:update_docs(Db, [Doc666]), + couch_db:ensure_full_commit(Db), + couch_db:close(Db). + +create_docs(DbName) -> + {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), + Doc1 = couch_doc:from_json_obj({[ + {<<"_id">>, <<"doc1">>}, + {<<"value">>, 1} + + ]}), + Doc2 = couch_doc:from_json_obj({[ + {<<"_id">>, <<"doc2">>}, + {<<"value">>, 2} + + ]}), + Doc3 = couch_doc:from_json_obj({[ + {<<"_id">>, <<"doc3">>}, + {<<"value">>, 3} + + ]}), + {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]), + couch_db:ensure_full_commit(Db), + couch_db:close(Db). + +populate_db(Db, BatchSize, N) when N > 0 -> + Docs = lists:map( + fun(_) -> + couch_doc:from_json_obj({[ + {<<"_id">>, couch_uuids:new()}, + {<<"value">>, base64:encode(crypto:strong_rand_bytes(1000))} + ]}) + end, + lists:seq(1, BatchSize)), + {ok, _} = couch_db:update_docs(Db, Docs, []), + populate_db(Db, BatchSize, N - length(Docs)); +populate_db(_Db, _, _) -> + ok. + +create_design_doc(DbName, DDName, ViewName) -> + {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), + DDoc = couch_doc:from_json_obj({[ + {<<"_id">>, DDName}, + {<<"language">>, <<"javascript">>}, + {<<"views">>, {[ + {ViewName, {[ + {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>} + ]}} + ]}} + ]}), + {ok, Rev} = couch_db:update_doc(Db, DDoc, []), + couch_db:ensure_full_commit(Db), + couch_db:close(Db), + Rev. + +update_design_doc(DbName, DDName, ViewName) -> + {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), + {ok, Doc} = couch_db:open_doc(Db, DDName, [?ADMIN_CTX]), + {Props} = couch_doc:to_json_obj(Doc, []), + Rev = couch_util:get_value(<<"_rev">>, Props), + DDoc = couch_doc:from_json_obj({[ + {<<"_id">>, DDName}, + {<<"_rev">>, Rev}, + {<<"language">>, <<"javascript">>}, + {<<"views">>, {[ + {ViewName, {[ + {<<"map">>, <<"function(doc) { emit(doc.value, 1); }">>} + ]}} + ]}} + ]}), + {ok, NewRev} = couch_db:update_doc(Db, DDoc, [?ADMIN_CTX]), + couch_db:ensure_full_commit(Db), + couch_db:close(Db), + NewRev. + +delete_design_doc(DbName, DDName, Rev) -> + {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), + DDoc = couch_doc:from_json_obj({[ + {<<"_id">>, DDName}, + {<<"_rev">>, couch_doc:rev_to_str(Rev)}, + {<<"_deleted">>, true} + ]}), + {ok, _} = couch_db:update_doc(Db, DDoc, [Rev]), + couch_db:close(Db). + +db_url(DbName) -> + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)), + "http://" ++ Addr ++ ":" ++ Port ++ "/" ++ ?b2l(DbName). + +query_view(DbName, DDoc, View) -> + query_view(DbName, DDoc, View, false). + +query_view(DbName, DDoc, View, Stale) -> + {ok, Code, _Headers, Body} = test_request:get( + db_url(DbName) ++ "/_design/" ++ DDoc ++ "/_view/" ++ View + ++ case Stale of + false -> []; + _ -> "?stale=" ++ atom_to_list(Stale) + end), + ?assertEqual(200, Code), + {Props} = jiffy:decode(Body), + couch_util:get_value(<<"rows">>, Props, []). + +check_rows_value(Rows, Value) -> + lists:foreach( + fun({Row}) -> + ?assertEqual(Value, couch_util:get_value(<<"value">>, Row)) + end, Rows). + +view_cleanup(DbName) -> + {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), + couch_mrview:cleanup(Db), + couch_db:close(Db). + +count_users(DbName) -> + {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]), + DbPid = couch_db:get_pid(Db), + {monitored_by, Monitors0} = process_info(DbPid, monitored_by), + Monitors = lists:filter(fun is_pid/1, Monitors0), + CouchFiles = [P || P <- Monitors, couch_file:process_info(P) =/= undefined], + ok = couch_db:close(Db), + length(lists:usort(Monitors) -- [self() | CouchFiles]). + +count_index_files(DbName) -> + % call server to fetch the index files + RootDir = config:get("couchdb", "view_index_dir"), + length(filelib:wildcard(RootDir ++ "/." ++ + binary_to_list(DbName) ++ "_design"++"/mrview/*")). + +has_doc(DocId1, Rows) -> + DocId = iolist_to_binary(DocId1), + lists:any(fun({R}) -> lists:member({<<"id">>, DocId}, R) end, Rows). + +backup_db_file(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + try + SrcPath = couch_db:get_filepath(Db), + Src = if + is_list(SrcPath) -> SrcPath; + true -> binary_to_list(SrcPath) + end, + ok = copy_tree(Src, Src ++ ".backup") + after + couch_db:close(Db) + end. + +restore_backup_db_file(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + Src = couch_db:get_filepath(Db), + ok = couch_db:close(Db), + DbPid = couch_db:get_pid(Db), + exit(DbPid, shutdown), + ok = copy_tree(Src ++ ".backup", Src), + + test_util:wait(fun() -> + case couch_server:open(DbName, [{timeout, ?TIMEOUT}]) of + {ok, WaitDb} -> + case couch_db:get_pid(WaitDb) == DbPid of + true -> wait; + false -> ok + end; + Else -> + Else + end + end, ?TIMEOUT, ?DELAY). + +compact_db(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + {ok, _} = couch_db:start_compact(Db), + ok = couch_db:close(Db), + wait_db_compact_done(DbName, ?WAIT_DELAY_COUNT). + +wait_db_compact_done(_DbName, 0) -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, "DB compaction failed to finish"}]}); +wait_db_compact_done(DbName, N) -> + {ok, Db} = couch_db:open_int(DbName, []), + ok = couch_db:close(Db), + CompactorPid = couch_db:get_compactor_pid(Db), + case is_pid(CompactorPid) of + false -> + ok; + true -> + ok = timer:sleep(?DELAY), + wait_db_compact_done(DbName, N - 1) + end. + +compact_view_group(DbName, DDocId) when is_list(DDocId) -> + compact_view_group(DbName, ?l2b("_design/" ++ DDocId)); +compact_view_group(DbName, DDocId) when is_binary(DDocId) -> + ok = couch_mrview:compact(DbName, DDocId), + wait_view_compact_done(DbName, DDocId, 10). + +wait_view_compact_done(_DbName, _DDocId, 0) -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, "DB compaction failed to finish"}]}); +wait_view_compact_done(DbName, DDocId, N) -> + {ok, Code, _Headers, Body} = test_request:get( + db_url(DbName) ++ "/" ++ ?b2l(DDocId) ++ "/_info"), + ?assertEqual(200, Code), + {Info} = jiffy:decode(Body), + {IndexInfo} = couch_util:get_value(<<"view_index">>, Info), + CompactRunning = couch_util:get_value(<<"compact_running">>, IndexInfo), + case CompactRunning of + false -> + ok; + true -> + ok = timer:sleep(?DELAY), + wait_view_compact_done(DbName, DDocId, N - 1) + end. + +read_header(File) -> + {ok, Fd} = couch_file:open(File), + {ok, {_Sig, Header}} = couch_file:read_header(Fd), + couch_file:close(Fd), + Header. + +stop_indexer(StopFun, Pid, Line, Reason) -> + case test_util:stop_sync(Pid, StopFun) of + timeout -> + erlang:error( + {assertion_failed, + [{module, ?MODULE}, {line, Line}, + {reason, Reason}]}); + ok -> + ok + end. + +wait_indexer(IndexerPid) -> + test_util:wait(fun() -> + {ok, Info} = couch_index:get_info(IndexerPid), + case couch_util:get_value(compact_running, Info) of + true -> + wait; + false -> + ok + end + end). + +copy_tree(Src, Dst) -> + case filelib:is_dir(Src) of + true -> + {ok, Files} = file:list_dir(Src), + copy_tree(Files, Src, Dst); + false -> + ok = filelib:ensure_dir(Dst), + {ok, _} = file:copy(Src, Dst), + ok + end. + +copy_tree([], _Src, _Dst) -> + ok; +copy_tree([File | Rest], Src, Dst) -> + FullSrc = filename:join(Src, File), + FullDst = filename:join(Dst, File), + ok = copy_tree(FullSrc, FullDst), + copy_tree(Rest, Src, Dst). diff --git a/src/couch/test/eunit/fixtures/3b835456c235b1827e012e25666152f3.view b/src/couch/test/eunit/fixtures/3b835456c235b1827e012e25666152f3.view new file mode 100644 index 000000000..9c67648be Binary files /dev/null and b/src/couch/test/eunit/fixtures/3b835456c235b1827e012e25666152f3.view differ diff --git a/src/couch/test/eunit/fixtures/couch_stats_aggregates.cfg b/src/couch/test/eunit/fixtures/couch_stats_aggregates.cfg new file mode 100644 index 000000000..30e475da8 --- /dev/null +++ b/src/couch/test/eunit/fixtures/couch_stats_aggregates.cfg @@ -0,0 +1,19 @@ +% Licensed to the Apache Software Foundation (ASF) under one +% or more contributor license agreements. See the NOTICE file +% distributed with this work for additional information +% regarding copyright ownership. The ASF licenses this file +% to you under the Apache License, Version 2.0 (the +% "License"); you may not use this file except in compliance +% with the License. You may obtain a copy of the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, +% software distributed under the License is distributed on an +% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +% KIND, either express or implied. See the License for the +% specific language governing permissions and limitations +% under the License. + +{testing, stuff, "yay description"}. +{number, '11', "randomosity"}. diff --git a/src/couch/test/eunit/fixtures/couch_stats_aggregates.ini b/src/couch/test/eunit/fixtures/couch_stats_aggregates.ini new file mode 100644 index 000000000..cc5cd2187 --- /dev/null +++ b/src/couch/test/eunit/fixtures/couch_stats_aggregates.ini @@ -0,0 +1,20 @@ +; Licensed to the Apache Software Foundation (ASF) under one +; or more contributor license agreements. See the NOTICE file +; distributed with this work for additional information +; regarding copyright ownership. The ASF licenses this file +; to you under the Apache License, Version 2.0 (the +; "License"); you may not use this file except in compliance +; with the License. You may obtain a copy of the License at +; +; http://www.apache.org/licenses/LICENSE-2.0 +; +; Unless required by applicable law or agreed to in writing, +; software distributed under the License is distributed on an +; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +; KIND, either express or implied. See the License for the +; specific language governing permissions and limitations +; under the License. + +[stats] +rate = 10000000 ; We call collect_sample in testing +samples = [0, 1] diff --git a/src/couch/test/eunit/fixtures/db_non_partitioned.couch b/src/couch/test/eunit/fixtures/db_non_partitioned.couch new file mode 100644 index 000000000..327d9bb5d Binary files /dev/null and b/src/couch/test/eunit/fixtures/db_non_partitioned.couch differ diff --git a/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req.couch b/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req.couch new file mode 100644 index 000000000..b0d39c9ec Binary files /dev/null and b/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req.couch differ diff --git a/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req_for_2_docs.couch b/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req_for_2_docs.couch new file mode 100644 index 000000000..b584fce31 Binary files /dev/null and b/src/couch/test/eunit/fixtures/db_v6_with_1_purge_req_for_2_docs.couch differ diff --git a/src/couch/test/eunit/fixtures/db_v6_with_2_purge_req.couch b/src/couch/test/eunit/fixtures/db_v6_with_2_purge_req.couch new file mode 100644 index 000000000..ee4e11b7f Binary files /dev/null and b/src/couch/test/eunit/fixtures/db_v6_with_2_purge_req.couch differ diff --git a/src/couch/test/eunit/fixtures/db_v6_without_purge_req.couch b/src/couch/test/eunit/fixtures/db_v6_without_purge_req.couch new file mode 100644 index 000000000..814feb8e1 Binary files /dev/null and b/src/couch/test/eunit/fixtures/db_v6_without_purge_req.couch differ diff --git a/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req.couch b/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req.couch new file mode 100644 index 000000000..cab8331db Binary files /dev/null and b/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req.couch differ diff --git a/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req_for_2_docs.couch b/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req_for_2_docs.couch new file mode 100644 index 000000000..b613646b1 Binary files /dev/null and b/src/couch/test/eunit/fixtures/db_v7_with_1_purge_req_for_2_docs.couch differ diff --git a/src/couch/test/eunit/fixtures/db_v7_with_2_purge_req.couch b/src/couch/test/eunit/fixtures/db_v7_with_2_purge_req.couch new file mode 100644 index 000000000..126fc919e Binary files /dev/null and b/src/couch/test/eunit/fixtures/db_v7_with_2_purge_req.couch differ diff --git a/src/couch/test/eunit/fixtures/db_v7_without_purge_req.couch b/src/couch/test/eunit/fixtures/db_v7_without_purge_req.couch new file mode 100644 index 000000000..762dc8dad Binary files /dev/null and b/src/couch/test/eunit/fixtures/db_v7_without_purge_req.couch differ diff --git a/src/couch/test/eunit/fixtures/logo.png b/src/couch/test/eunit/fixtures/logo.png new file mode 100644 index 000000000..d21ac025b Binary files /dev/null and b/src/couch/test/eunit/fixtures/logo.png differ diff --git a/src/couch/test/eunit/fixtures/multipart.http b/src/couch/test/eunit/fixtures/multipart.http new file mode 100644 index 000000000..fe9f271cc --- /dev/null +++ b/src/couch/test/eunit/fixtures/multipart.http @@ -0,0 +1,13 @@ +{ + "_id": "our document goes here" +} + +--multipart_related_boundary~~~~~~~~~~~~~~~~~~~~ +Content-Type: application/json + +{"value":0,"_id":"doc0","_rev":"1-7e97409c987eac3a99385a17ad4cbabe","_attachments":{"plus1":{"stub":false,"follows":true,"content_type":"application/json","length":14}},".cache":{"plus1":{"timestamp":"2012-08-13T13:59:27.826Z"}}} +--multipart_related_boundary~~~~~~~~~~~~~~~~~~~~ + +{"value":"01"} +--multipart_related_boundary~~~~~~~~~~~~~~~~~~~~-- + diff --git a/src/couch/test/eunit/fixtures/os_daemon_bad_perm.sh b/src/couch/test/eunit/fixtures/os_daemon_bad_perm.sh new file mode 100644 index 000000000..345c8b40b --- /dev/null +++ b/src/couch/test/eunit/fixtures/os_daemon_bad_perm.sh @@ -0,0 +1,17 @@ +#!/bin/sh -e +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +# Please do not make this file executable as that's the error being tested. + +sleep 5 diff --git a/src/couch/test/eunit/fixtures/os_daemon_can_reboot.sh b/src/couch/test/eunit/fixtures/os_daemon_can_reboot.sh new file mode 100755 index 000000000..5bc10e83f --- /dev/null +++ b/src/couch/test/eunit/fixtures/os_daemon_can_reboot.sh @@ -0,0 +1,15 @@ +#!/bin/sh -e +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. + +sleep 2 diff --git a/src/couch/test/eunit/fixtures/os_daemon_configer.escript b/src/couch/test/eunit/fixtures/os_daemon_configer.escript new file mode 100755 index 000000000..f146b8314 --- /dev/null +++ b/src/couch/test/eunit/fixtures/os_daemon_configer.escript @@ -0,0 +1,97 @@ +#! /usr/bin/env escript + +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-include("../../include/couch_eunit.hrl"). + +read() -> + case io:get_line('') of + eof -> + stop; + Data -> + jiffy:decode(Data) + end. + +write(Mesg) -> + Data = iolist_to_binary(jiffy:encode(Mesg)), + io:format(binary_to_list(Data) ++ "\n", []). + +get_cfg(Section) -> + write([<<"get">>, Section]), + read(). + +get_cfg(Section, Name) -> + write([<<"get">>, Section, Name]), + read(). + +log(Mesg) -> + write([<<"log">>, Mesg]). + +log(Mesg, Level) -> + write([<<"log">>, Mesg, {[{<<"level">>, Level}]}]). + +test_get_cfg1() -> + Path = list_to_binary(?FILE), + FileName = list_to_binary(filename:basename(?FILE)), + {[{FileName, Path}]} = get_cfg(<<"os_daemons">>). + +test_get_cfg2() -> + Path = list_to_binary(?FILE), + FileName = list_to_binary(filename:basename(?FILE)), + Path = get_cfg(<<"os_daemons">>, FileName), + <<"sequential">> = get_cfg(<<"uuids">>, <<"algorithm">>). + + +test_get_unknown_cfg() -> + {[]} = get_cfg(<<"aal;3p4">>), + null = get_cfg(<<"aal;3p4">>, <<"313234kjhsdfl">>). + +test_log() -> + log(<<"foobar!">>), + log(<<"some stuff!">>, <<"debug">>), + log(2), + log(true), + write([<<"log">>, <<"stuff">>, 2]), + write([<<"log">>, 3, null]), + write([<<"log">>, [1, 2], {[{<<"level">>, <<"debug">>}]}]), + write([<<"log">>, <<"true">>, {[]}]). + +do_tests() -> + test_get_cfg1(), + test_get_cfg2(), + test_get_unknown_cfg(), + test_log(), + loop(io:read("")). + +loop({ok, _}) -> + loop(io:read("")); +loop(eof) -> + init:stop(); +loop({error, _Reason}) -> + init:stop(). + +main([]) -> + init_code_path(), + do_tests(). + +init_code_path() -> + Paths = [ + "couchdb", + "jiffy", + "ibrowse", + "mochiweb", + "snappy" + ], + lists:foreach(fun(Name) -> + code:add_patha(filename:join([?BUILDDIR(), "src", Name, "ebin"])) + end, Paths). diff --git a/src/couch/test/eunit/fixtures/os_daemon_die_on_boot.sh b/src/couch/test/eunit/fixtures/os_daemon_die_on_boot.sh new file mode 100755 index 000000000..256ee7935 --- /dev/null +++ b/src/couch/test/eunit/fixtures/os_daemon_die_on_boot.sh @@ -0,0 +1,15 @@ +#!/bin/sh -e +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. + +exit 1 diff --git a/src/couch/test/eunit/fixtures/os_daemon_die_quickly.sh b/src/couch/test/eunit/fixtures/os_daemon_die_quickly.sh new file mode 100755 index 000000000..f5a13684e --- /dev/null +++ b/src/couch/test/eunit/fixtures/os_daemon_die_quickly.sh @@ -0,0 +1,15 @@ +#!/bin/sh -e +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. + +sleep 1 diff --git a/src/couch/test/eunit/fixtures/os_daemon_looper.escript b/src/couch/test/eunit/fixtures/os_daemon_looper.escript new file mode 100755 index 000000000..73974e905 --- /dev/null +++ b/src/couch/test/eunit/fixtures/os_daemon_looper.escript @@ -0,0 +1,26 @@ +#! /usr/bin/env escript + +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +loop() -> + loop(io:read("")). + +loop({ok, _}) -> + loop(io:read("")); +loop(eof) -> + stop; +loop({error, Reason}) -> + throw({error, Reason}). + +main([]) -> + loop(). diff --git a/src/couch/test/eunit/fixtures/test.couch b/src/couch/test/eunit/fixtures/test.couch new file mode 100644 index 000000000..32c79af32 Binary files /dev/null and b/src/couch/test/eunit/fixtures/test.couch differ diff --git a/src/couch/test/eunit/global_changes_tests.erl b/src/couch/test/eunit/global_changes_tests.erl new file mode 100644 index 000000000..4392aafac --- /dev/null +++ b/src/couch/test/eunit/global_changes_tests.erl @@ -0,0 +1,159 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(global_changes_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(USER, "admin"). +-define(PASS, "pass"). +-define(AUTH, {basic_auth, {?USER, ?PASS}}). + +setup() -> + Host = get_host(), + ok = add_admin(?USER, ?PASS), + DbName = "foo/" ++ ?b2l(?tempdb()), + ok = http_create_db(DbName), + {Host, DbName}. + +teardown({_, DbName}) -> + ok = http_delete_db(DbName), + delete_admin(?USER), + ok. + +http_create_db(Name) -> + {ok, Status, _, _} = test_request:put(db_url(Name), [?AUTH], ""), + true = lists:member(Status, [201, 202]), + ok. + +http_delete_db(Name) -> + {ok, Status, _, _} = test_request:delete(db_url(Name), [?AUTH]), + true = lists:member(Status, [200, 202]), + ok. + +db_url(Name) -> + get_host() ++ "/" ++ escape(Name). + +start_couch() -> + Ctx = test_util:start_couch([chttpd, global_changes]), + ok = ensure_db_exists("_global_changes"), + Ctx. + +ensure_db_exists(Name) -> + case fabric:create_db(Name) of + ok -> + ok; + {error, file_exists} -> + ok + end. + +global_changes_test_() -> + { + "Checking global_changes endpoint", + { + setup, + fun start_couch/0, + fun test_util:stop/1, + [ + check_response() + ] + } + }. + +check_response() -> + { + "Check response", + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_return_correct_response_on_create/1, + fun should_return_correct_response_on_update/1 + ] + } + }. + +should_return_correct_response_on_create({Host, DbName}) -> + ?_test(begin + Headers = [?AUTH], + create_doc(Host, DbName, "bar/baz"), + {Status, Events} = request_updates(Host, DbName, Headers), + ?assertEqual(200, Status), + ?assertEqual([<<"created">>, <<"updated">>], Events) + end). + +should_return_correct_response_on_update({Host, DbName}) -> + ?_test(begin + Headers = [?AUTH], + create_doc(Host, DbName, "bar/baz"), + update_doc(Host, DbName, "bar/baz", "new_value"), + {Status, Events} = request_updates(Host, DbName, Headers), + ?assertEqual(200, Status), + ?assertEqual([<<"created">>, <<"updated">>], Events) + end). + +create_doc(Host, DbName, Id) -> + Headers = [?AUTH], + Url = Host ++ "/" ++ escape(DbName) ++ "/" ++ escape(Id), + Body = jiffy:encode({[ + {key, "value"} + ]}), + {ok, Status, _Headers, _Body} = test_request:put(Url, Headers, Body), + ?assert(Status =:= 201 orelse Status =:= 202), + timer:sleep(1000), + ok. + +update_doc(Host, DbName, Id, Value) -> + Headers = [?AUTH], + Url = Host ++ "/" ++ escape(DbName) ++ "/" ++ escape(Id), + {ok, 200, _Headers0, BinBody} = test_request:get(Url, Headers), + [Rev] = decode_response(BinBody, [<<"_rev">>]), + Body = jiffy:encode({[ + {key, Value}, + {'_rev', Rev} + ]}), + {ok, Status, _Headers1, _Body} = test_request:put(Url, Headers, Body), + ?assert(Status =:= 201 orelse Status =:= 202), + timer:sleep(1000), + ok. + +request_updates(Host, DbName, Headers) -> + Url = Host ++ "/_db_updates", + {ok, Status, _Headers, BinBody} = test_request:get(Url, Headers), + [Results] = decode_response(BinBody, [<<"results">>]), + ToDecode = [<<"db_name">>, <<"type">>], + Values = [decode_result(Result, ToDecode) || Result <- Results], + Result = [Type || [DB, Type] <- Values, DB == ?l2b(DbName)], + {Status, lists:sort(Result)}. + +decode_result({Props}, ToDecode) -> + [couch_util:get_value(Key, Props) || Key <- ToDecode]. + +decode_response(BinBody, ToDecode) -> + {Body} = jiffy:decode(BinBody), + [couch_util:get_value(Key, Body) || Key <- ToDecode]. + +add_admin(User, Pass) -> + Hashed = couch_passwords:hash_admin_password(Pass), + config:set("admins", User, ?b2l(Hashed), _Persist=false). + +delete_admin(User) -> + config:delete("admins", User, false). + +get_host() -> + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)), + "http://" ++ Addr ++ ":" ++ Port. + +escape(Path) -> + re:replace(Path, "/", "%2f", [global, {return, list}]). diff --git a/src/couch/test/eunit/json_stream_parse_tests.erl b/src/couch/test/eunit/json_stream_parse_tests.erl new file mode 100644 index 000000000..e690d7728 --- /dev/null +++ b/src/couch/test/eunit/json_stream_parse_tests.erl @@ -0,0 +1,151 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(json_stream_parse_tests). + +-include_lib("couch/include/couch_eunit.hrl"). + +-define(CASES, + [ + {1, "1", "integer numeric literial"}, + {3.1416, "3.14160", "float numeric literal"}, % text representation may truncate, trail zeroes + {-1, "-1", "negative integer numeric literal"}, + {-3.1416, "-3.14160", "negative float numeric literal"}, + {12.0e10, "1.20000e+11", "float literal in scientific notation"}, + {1.234E+10, "1.23400e+10", "another float literal in scientific notation"}, + {-1.234E-10, "-1.23400e-10", "negative float literal in scientific notation"}, + {10.0, "1.0e+01", "yet another float literal in scientific notation"}, + {123.456, "1.23456E+2", "yet another float literal in scientific notation"}, + {10.0, "1e1", "yet another float literal in scientific notation"}, + {<<"foo">>, "\"foo\"", "string literal"}, + {<<"foo", 5, "bar">>, "\"foo\\u0005bar\"", "string literal with \\u0005"}, + {<<"">>, "\"\"", "empty string literal"}, + {<<"\n\n\n">>, "\"\\n\\n\\n\"", "only new lines literal"}, + {<<"\" \b\f\r\n\t\"">>, "\"\\\" \\b\\f\\r\\n\\t\\\"\"", + "only white spaces string literal"}, + {null, "null", "null literal"}, + {true, "true", "true literal"}, + {false, "false", "false literal"}, + {<<"null">>, "\"null\"", "null string literal"}, + {<<"true">>, "\"true\"", "true string literal"}, + {<<"false">>, "\"false\"", "false string literal"}, + {{[]}, "{}", "empty object literal"}, + {{[{<<"foo">>, <<"bar">>}]}, "{\"foo\":\"bar\"}", + "simple object literal"}, + {{[{<<"foo">>, <<"bar">>}, {<<"baz">>, 123}]}, + "{\"foo\":\"bar\",\"baz\":123}", "another simple object literal"}, + {[], "[]", "empty array literal"}, + {[[]], "[[]]", "empty array literal inside a single element array literal"}, + {[1, <<"foo">>], "[1,\"foo\"]", "simple non-empty array literal"}, + {[1199344435545.0, 1], "[1199344435545.0,1]", + "another simple non-empty array literal"}, + {[false, true, 321, null], "[false, true, 321, null]", "array of literals"}, + {{[{<<"foo">>, [123]}]}, "{\"foo\":[123]}", + "object literal with an array valued property"}, + {{[{<<"foo">>, {[{<<"bar">>, true}]}}]}, + "{\"foo\":{\"bar\":true}}", "nested object literal"}, + {{[{<<"foo">>, []}, {<<"bar">>, {[{<<"baz">>, true}]}}, + {<<"alice">>, <<"bob">>}]}, + "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}", + "complex object literal"}, + {[-123, <<"foo">>, {[{<<"bar">>, []}]}, null], + "[-123,\"foo\",{\"bar\":[]},null]", + "complex array literal"} + ] +). + + +raw_json_input_test_() -> + Tests = lists:map( + fun({EJson, JsonString, Desc}) -> + {Desc, + ?_assert(equiv(EJson, json_stream_parse:to_ejson(JsonString)))} + end, ?CASES), + {"Tests with raw JSON string as the input", Tests}. + +one_byte_data_fun_test_() -> + Tests = lists:map( + fun({EJson, JsonString, Desc}) -> + DataFun = fun() -> single_byte_data_fun(JsonString) end, + {Desc, + ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))} + end, ?CASES), + {"Tests with a 1 byte output data function as the input", Tests}. + +test_multiple_bytes_data_fun_test_() -> + Tests = lists:map( + fun({EJson, JsonString, Desc}) -> + DataFun = fun() -> multiple_bytes_data_fun(JsonString) end, + {Desc, + ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))} + end, ?CASES), + {"Tests with a multiple bytes output data function as the input", Tests}. + + +%% Test for equivalence of Erlang terms. +%% Due to arbitrary order of construction, equivalent objects might +%% compare unequal as erlang terms, so we need to carefully recurse +%% through aggregates (tuples and objects). +equiv({Props1}, {Props2}) -> + equiv_object(Props1, Props2); +equiv(L1, L2) when is_list(L1), is_list(L2) -> + equiv_list(L1, L2); +equiv(N1, N2) when is_number(N1), is_number(N2) -> + N1 == N2; +equiv(B1, B2) when is_binary(B1), is_binary(B2) -> + B1 == B2; +equiv(true, true) -> + true; +equiv(false, false) -> + true; +equiv(null, null) -> + true. + +%% Object representation and traversal order is unknown. +%% Use the sledgehammer and sort property lists. +equiv_object(Props1, Props2) -> + L1 = lists:keysort(1, Props1), + L2 = lists:keysort(1, Props2), + Pairs = lists:zip(L1, L2), + true = lists:all( + fun({{K1, V1}, {K2, V2}}) -> + equiv(K1, K2) andalso equiv(V1, V2) + end, + Pairs). + +%% Recursively compare tuple elements for equivalence. +equiv_list([], []) -> + true; +equiv_list([V1 | L1], [V2 | L2]) -> + equiv(V1, V2) andalso equiv_list(L1, L2). + +single_byte_data_fun([]) -> + done; +single_byte_data_fun([H | T]) -> + {<>, fun() -> single_byte_data_fun(T) end}. + +multiple_bytes_data_fun([]) -> + done; +multiple_bytes_data_fun(L) -> + N = couch_rand:uniform(7) - 1, + {Part, Rest} = split(L, N), + {list_to_binary(Part), fun() -> multiple_bytes_data_fun(Rest) end}. + +split(L, N) when length(L) =< N -> + {L, []}; +split(L, N) -> + take(N, L, []). + +take(0, L, Acc) -> + {lists:reverse(Acc), L}; +take(N, [H|L], Acc) -> + take(N - 1, L, [H | Acc]). diff --git a/src/couch/test/eunit/test_web.erl b/src/couch/test/eunit/test_web.erl new file mode 100644 index 000000000..b1b3e65c9 --- /dev/null +++ b/src/couch/test/eunit/test_web.erl @@ -0,0 +1,114 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(test_web). +-behaviour(gen_server). + +-compile(tuple_calls). + +-include_lib("couch/include/couch_eunit.hrl"). + +-export([start_link/0, stop/0, loop/1, get_port/0, set_assert/1, check_last/0]). +-export([init/1, terminate/2, code_change/3]). +-export([handle_call/3, handle_cast/2, handle_info/2]). + +-define(SERVER, test_web_server). +-define(HANDLER, test_web_handler). +-define(DELAY, 500). + +start_link() -> + gen_server:start({local, ?HANDLER}, ?MODULE, [], []), + mochiweb_http:start([ + {name, ?SERVER}, + {loop, {?MODULE, loop}}, + {port, 0} + ]). + +loop(Req) -> + %?debugFmt("Handling request: ~p", [Req]), + case gen_server:call(?HANDLER, {check_request, Req}) of + {ok, RespInfo} -> + {ok, Req:respond(RespInfo)}; + {raw, {Status, Headers, BodyChunks}} -> + Resp = Req:start_response({Status, Headers}), + lists:foreach(fun(C) -> Resp:send(C) end, BodyChunks), + erlang:put(mochiweb_request_force_close, true), + {ok, Resp}; + {chunked, {Status, Headers, BodyChunks}} -> + Resp = Req:respond({Status, Headers, chunked}), + timer:sleep(?DELAY), + lists:foreach(fun(C) -> Resp:write_chunk(C) end, BodyChunks), + Resp:write_chunk([]), + {ok, Resp}; + {error, Reason} -> + ?debugFmt("Error: ~p", [Reason]), + Body = lists:flatten(io_lib:format("Error: ~p", [Reason])), + {ok, Req:respond({200, [], Body})} + end. + +get_port() -> + mochiweb_socket_server:get(?SERVER, port). + +set_assert(Fun) -> + ?assertEqual(ok, gen_server:call(?HANDLER, {set_assert, Fun})). + +check_last() -> + gen_server:call(?HANDLER, last_status). + +init(_) -> + {ok, nil}. + +terminate(_Reason, _State) -> + ok. + +stop() -> + mochiweb_http:stop(?SERVER). + + +handle_call({check_request, Req}, _From, State) when is_function(State, 1) -> + Resp2 = case (catch State(Req)) of + {ok, Resp} -> + {reply, {ok, Resp}, was_ok}; + {raw, Resp} -> + {reply, {raw, Resp}, was_ok}; + {chunked, Resp} -> + {reply, {chunked, Resp}, was_ok}; + Error -> + {reply, {error, Error}, not_ok} + end, + Req:cleanup(), + Resp2; +handle_call({check_request, _Req}, _From, _State) -> + {reply, {error, no_assert_function}, not_ok}; +handle_call(last_status, _From, State) when is_atom(State) -> + {reply, State, nil}; +handle_call(last_status, _From, State) -> + {reply, {error, not_checked}, State}; +handle_call({set_assert, Fun}, _From, nil) -> + {reply, ok, Fun}; +handle_call({set_assert, _}, _From, State) -> + {reply, {error, assert_function_set}, State}; +handle_call(Msg, _From, State) -> + {reply, {ignored, Msg}, State}. + +handle_cast(stop, State) -> + {stop, normal, State}; +handle_cast(Msg, State) -> + ?debugFmt("Ignoring cast message: ~p", [Msg]), + {noreply, State}. + +handle_info(Msg, State) -> + ?debugFmt("Ignoring info message: ~p", [Msg]), + {noreply, State}. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. diff --git a/src/couch/test/fixtures/3b835456c235b1827e012e25666152f3.view b/src/couch/test/fixtures/3b835456c235b1827e012e25666152f3.view deleted file mode 100644 index 9c67648be..000000000 Binary files a/src/couch/test/fixtures/3b835456c235b1827e012e25666152f3.view and /dev/null differ diff --git a/src/couch/test/fixtures/couch_stats_aggregates.cfg b/src/couch/test/fixtures/couch_stats_aggregates.cfg deleted file mode 100644 index 30e475da8..000000000 --- a/src/couch/test/fixtures/couch_stats_aggregates.cfg +++ /dev/null @@ -1,19 +0,0 @@ -% Licensed to the Apache Software Foundation (ASF) under one -% or more contributor license agreements. See the NOTICE file -% distributed with this work for additional information -% regarding copyright ownership. The ASF licenses this file -% to you under the Apache License, Version 2.0 (the -% "License"); you may not use this file except in compliance -% with the License. You may obtain a copy of the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, -% software distributed under the License is distributed on an -% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -% KIND, either express or implied. See the License for the -% specific language governing permissions and limitations -% under the License. - -{testing, stuff, "yay description"}. -{number, '11', "randomosity"}. diff --git a/src/couch/test/fixtures/couch_stats_aggregates.ini b/src/couch/test/fixtures/couch_stats_aggregates.ini deleted file mode 100644 index cc5cd2187..000000000 --- a/src/couch/test/fixtures/couch_stats_aggregates.ini +++ /dev/null @@ -1,20 +0,0 @@ -; Licensed to the Apache Software Foundation (ASF) under one -; or more contributor license agreements. See the NOTICE file -; distributed with this work for additional information -; regarding copyright ownership. The ASF licenses this file -; to you under the Apache License, Version 2.0 (the -; "License"); you may not use this file except in compliance -; with the License. You may obtain a copy of the License at -; -; http://www.apache.org/licenses/LICENSE-2.0 -; -; Unless required by applicable law or agreed to in writing, -; software distributed under the License is distributed on an -; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -; KIND, either express or implied. See the License for the -; specific language governing permissions and limitations -; under the License. - -[stats] -rate = 10000000 ; We call collect_sample in testing -samples = [0, 1] diff --git a/src/couch/test/fixtures/db_non_partitioned.couch b/src/couch/test/fixtures/db_non_partitioned.couch deleted file mode 100644 index 327d9bb5d..000000000 Binary files a/src/couch/test/fixtures/db_non_partitioned.couch and /dev/null differ diff --git a/src/couch/test/fixtures/db_v6_with_1_purge_req.couch b/src/couch/test/fixtures/db_v6_with_1_purge_req.couch deleted file mode 100644 index b0d39c9ec..000000000 Binary files a/src/couch/test/fixtures/db_v6_with_1_purge_req.couch and /dev/null differ diff --git a/src/couch/test/fixtures/db_v6_with_1_purge_req_for_2_docs.couch b/src/couch/test/fixtures/db_v6_with_1_purge_req_for_2_docs.couch deleted file mode 100644 index b584fce31..000000000 Binary files a/src/couch/test/fixtures/db_v6_with_1_purge_req_for_2_docs.couch and /dev/null differ diff --git a/src/couch/test/fixtures/db_v6_with_2_purge_req.couch b/src/couch/test/fixtures/db_v6_with_2_purge_req.couch deleted file mode 100644 index ee4e11b7f..000000000 Binary files a/src/couch/test/fixtures/db_v6_with_2_purge_req.couch and /dev/null differ diff --git a/src/couch/test/fixtures/db_v6_without_purge_req.couch b/src/couch/test/fixtures/db_v6_without_purge_req.couch deleted file mode 100644 index 814feb8e1..000000000 Binary files a/src/couch/test/fixtures/db_v6_without_purge_req.couch and /dev/null differ diff --git a/src/couch/test/fixtures/db_v7_with_1_purge_req.couch b/src/couch/test/fixtures/db_v7_with_1_purge_req.couch deleted file mode 100644 index cab8331db..000000000 Binary files a/src/couch/test/fixtures/db_v7_with_1_purge_req.couch and /dev/null differ diff --git a/src/couch/test/fixtures/db_v7_with_1_purge_req_for_2_docs.couch b/src/couch/test/fixtures/db_v7_with_1_purge_req_for_2_docs.couch deleted file mode 100644 index b613646b1..000000000 Binary files a/src/couch/test/fixtures/db_v7_with_1_purge_req_for_2_docs.couch and /dev/null differ diff --git a/src/couch/test/fixtures/db_v7_with_2_purge_req.couch b/src/couch/test/fixtures/db_v7_with_2_purge_req.couch deleted file mode 100644 index 126fc919e..000000000 Binary files a/src/couch/test/fixtures/db_v7_with_2_purge_req.couch and /dev/null differ diff --git a/src/couch/test/fixtures/db_v7_without_purge_req.couch b/src/couch/test/fixtures/db_v7_without_purge_req.couch deleted file mode 100644 index 762dc8dad..000000000 Binary files a/src/couch/test/fixtures/db_v7_without_purge_req.couch and /dev/null differ diff --git a/src/couch/test/fixtures/logo.png b/src/couch/test/fixtures/logo.png deleted file mode 100644 index d21ac025b..000000000 Binary files a/src/couch/test/fixtures/logo.png and /dev/null differ diff --git a/src/couch/test/fixtures/multipart.http b/src/couch/test/fixtures/multipart.http deleted file mode 100644 index fe9f271cc..000000000 --- a/src/couch/test/fixtures/multipart.http +++ /dev/null @@ -1,13 +0,0 @@ -{ - "_id": "our document goes here" -} - ---multipart_related_boundary~~~~~~~~~~~~~~~~~~~~ -Content-Type: application/json - -{"value":0,"_id":"doc0","_rev":"1-7e97409c987eac3a99385a17ad4cbabe","_attachments":{"plus1":{"stub":false,"follows":true,"content_type":"application/json","length":14}},".cache":{"plus1":{"timestamp":"2012-08-13T13:59:27.826Z"}}} ---multipart_related_boundary~~~~~~~~~~~~~~~~~~~~ - -{"value":"01"} ---multipart_related_boundary~~~~~~~~~~~~~~~~~~~~-- - diff --git a/src/couch/test/fixtures/os_daemon_bad_perm.sh b/src/couch/test/fixtures/os_daemon_bad_perm.sh deleted file mode 100644 index 345c8b40b..000000000 --- a/src/couch/test/fixtures/os_daemon_bad_perm.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh -e -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy of -# the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. -# -# Please do not make this file executable as that's the error being tested. - -sleep 5 diff --git a/src/couch/test/fixtures/os_daemon_can_reboot.sh b/src/couch/test/fixtures/os_daemon_can_reboot.sh deleted file mode 100755 index 5bc10e83f..000000000 --- a/src/couch/test/fixtures/os_daemon_can_reboot.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh -e -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy of -# the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -sleep 2 diff --git a/src/couch/test/fixtures/os_daemon_configer.escript b/src/couch/test/fixtures/os_daemon_configer.escript deleted file mode 100755 index f146b8314..000000000 --- a/src/couch/test/fixtures/os_daemon_configer.escript +++ /dev/null @@ -1,97 +0,0 @@ -#! /usr/bin/env escript - -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --include("../../include/couch_eunit.hrl"). - -read() -> - case io:get_line('') of - eof -> - stop; - Data -> - jiffy:decode(Data) - end. - -write(Mesg) -> - Data = iolist_to_binary(jiffy:encode(Mesg)), - io:format(binary_to_list(Data) ++ "\n", []). - -get_cfg(Section) -> - write([<<"get">>, Section]), - read(). - -get_cfg(Section, Name) -> - write([<<"get">>, Section, Name]), - read(). - -log(Mesg) -> - write([<<"log">>, Mesg]). - -log(Mesg, Level) -> - write([<<"log">>, Mesg, {[{<<"level">>, Level}]}]). - -test_get_cfg1() -> - Path = list_to_binary(?FILE), - FileName = list_to_binary(filename:basename(?FILE)), - {[{FileName, Path}]} = get_cfg(<<"os_daemons">>). - -test_get_cfg2() -> - Path = list_to_binary(?FILE), - FileName = list_to_binary(filename:basename(?FILE)), - Path = get_cfg(<<"os_daemons">>, FileName), - <<"sequential">> = get_cfg(<<"uuids">>, <<"algorithm">>). - - -test_get_unknown_cfg() -> - {[]} = get_cfg(<<"aal;3p4">>), - null = get_cfg(<<"aal;3p4">>, <<"313234kjhsdfl">>). - -test_log() -> - log(<<"foobar!">>), - log(<<"some stuff!">>, <<"debug">>), - log(2), - log(true), - write([<<"log">>, <<"stuff">>, 2]), - write([<<"log">>, 3, null]), - write([<<"log">>, [1, 2], {[{<<"level">>, <<"debug">>}]}]), - write([<<"log">>, <<"true">>, {[]}]). - -do_tests() -> - test_get_cfg1(), - test_get_cfg2(), - test_get_unknown_cfg(), - test_log(), - loop(io:read("")). - -loop({ok, _}) -> - loop(io:read("")); -loop(eof) -> - init:stop(); -loop({error, _Reason}) -> - init:stop(). - -main([]) -> - init_code_path(), - do_tests(). - -init_code_path() -> - Paths = [ - "couchdb", - "jiffy", - "ibrowse", - "mochiweb", - "snappy" - ], - lists:foreach(fun(Name) -> - code:add_patha(filename:join([?BUILDDIR(), "src", Name, "ebin"])) - end, Paths). diff --git a/src/couch/test/fixtures/os_daemon_die_on_boot.sh b/src/couch/test/fixtures/os_daemon_die_on_boot.sh deleted file mode 100755 index 256ee7935..000000000 --- a/src/couch/test/fixtures/os_daemon_die_on_boot.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh -e -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy of -# the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -exit 1 diff --git a/src/couch/test/fixtures/os_daemon_die_quickly.sh b/src/couch/test/fixtures/os_daemon_die_quickly.sh deleted file mode 100755 index f5a13684e..000000000 --- a/src/couch/test/fixtures/os_daemon_die_quickly.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh -e -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy of -# the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -sleep 1 diff --git a/src/couch/test/fixtures/os_daemon_looper.escript b/src/couch/test/fixtures/os_daemon_looper.escript deleted file mode 100755 index 73974e905..000000000 --- a/src/couch/test/fixtures/os_daemon_looper.escript +++ /dev/null @@ -1,26 +0,0 @@ -#! /usr/bin/env escript - -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - -loop() -> - loop(io:read("")). - -loop({ok, _}) -> - loop(io:read("")); -loop(eof) -> - stop; -loop({error, Reason}) -> - throw({error, Reason}). - -main([]) -> - loop(). diff --git a/src/couch/test/fixtures/test.couch b/src/couch/test/fixtures/test.couch deleted file mode 100644 index 32c79af32..000000000 Binary files a/src/couch/test/fixtures/test.couch and /dev/null differ diff --git a/src/couch/test/global_changes_tests.erl b/src/couch/test/global_changes_tests.erl deleted file mode 100644 index 4392aafac..000000000 --- a/src/couch/test/global_changes_tests.erl +++ /dev/null @@ -1,159 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(global_changes_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(USER, "admin"). --define(PASS, "pass"). --define(AUTH, {basic_auth, {?USER, ?PASS}}). - -setup() -> - Host = get_host(), - ok = add_admin(?USER, ?PASS), - DbName = "foo/" ++ ?b2l(?tempdb()), - ok = http_create_db(DbName), - {Host, DbName}. - -teardown({_, DbName}) -> - ok = http_delete_db(DbName), - delete_admin(?USER), - ok. - -http_create_db(Name) -> - {ok, Status, _, _} = test_request:put(db_url(Name), [?AUTH], ""), - true = lists:member(Status, [201, 202]), - ok. - -http_delete_db(Name) -> - {ok, Status, _, _} = test_request:delete(db_url(Name), [?AUTH]), - true = lists:member(Status, [200, 202]), - ok. - -db_url(Name) -> - get_host() ++ "/" ++ escape(Name). - -start_couch() -> - Ctx = test_util:start_couch([chttpd, global_changes]), - ok = ensure_db_exists("_global_changes"), - Ctx. - -ensure_db_exists(Name) -> - case fabric:create_db(Name) of - ok -> - ok; - {error, file_exists} -> - ok - end. - -global_changes_test_() -> - { - "Checking global_changes endpoint", - { - setup, - fun start_couch/0, - fun test_util:stop/1, - [ - check_response() - ] - } - }. - -check_response() -> - { - "Check response", - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_return_correct_response_on_create/1, - fun should_return_correct_response_on_update/1 - ] - } - }. - -should_return_correct_response_on_create({Host, DbName}) -> - ?_test(begin - Headers = [?AUTH], - create_doc(Host, DbName, "bar/baz"), - {Status, Events} = request_updates(Host, DbName, Headers), - ?assertEqual(200, Status), - ?assertEqual([<<"created">>, <<"updated">>], Events) - end). - -should_return_correct_response_on_update({Host, DbName}) -> - ?_test(begin - Headers = [?AUTH], - create_doc(Host, DbName, "bar/baz"), - update_doc(Host, DbName, "bar/baz", "new_value"), - {Status, Events} = request_updates(Host, DbName, Headers), - ?assertEqual(200, Status), - ?assertEqual([<<"created">>, <<"updated">>], Events) - end). - -create_doc(Host, DbName, Id) -> - Headers = [?AUTH], - Url = Host ++ "/" ++ escape(DbName) ++ "/" ++ escape(Id), - Body = jiffy:encode({[ - {key, "value"} - ]}), - {ok, Status, _Headers, _Body} = test_request:put(Url, Headers, Body), - ?assert(Status =:= 201 orelse Status =:= 202), - timer:sleep(1000), - ok. - -update_doc(Host, DbName, Id, Value) -> - Headers = [?AUTH], - Url = Host ++ "/" ++ escape(DbName) ++ "/" ++ escape(Id), - {ok, 200, _Headers0, BinBody} = test_request:get(Url, Headers), - [Rev] = decode_response(BinBody, [<<"_rev">>]), - Body = jiffy:encode({[ - {key, Value}, - {'_rev', Rev} - ]}), - {ok, Status, _Headers1, _Body} = test_request:put(Url, Headers, Body), - ?assert(Status =:= 201 orelse Status =:= 202), - timer:sleep(1000), - ok. - -request_updates(Host, DbName, Headers) -> - Url = Host ++ "/_db_updates", - {ok, Status, _Headers, BinBody} = test_request:get(Url, Headers), - [Results] = decode_response(BinBody, [<<"results">>]), - ToDecode = [<<"db_name">>, <<"type">>], - Values = [decode_result(Result, ToDecode) || Result <- Results], - Result = [Type || [DB, Type] <- Values, DB == ?l2b(DbName)], - {Status, lists:sort(Result)}. - -decode_result({Props}, ToDecode) -> - [couch_util:get_value(Key, Props) || Key <- ToDecode]. - -decode_response(BinBody, ToDecode) -> - {Body} = jiffy:decode(BinBody), - [couch_util:get_value(Key, Body) || Key <- ToDecode]. - -add_admin(User, Pass) -> - Hashed = couch_passwords:hash_admin_password(Pass), - config:set("admins", User, ?b2l(Hashed), _Persist=false). - -delete_admin(User) -> - config:delete("admins", User, false). - -get_host() -> - Addr = config:get("httpd", "bind_address", "127.0.0.1"), - Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)), - "http://" ++ Addr ++ ":" ++ Port. - -escape(Path) -> - re:replace(Path, "/", "%2f", [global, {return, list}]). diff --git a/src/couch/test/json_stream_parse_tests.erl b/src/couch/test/json_stream_parse_tests.erl deleted file mode 100644 index e690d7728..000000000 --- a/src/couch/test/json_stream_parse_tests.erl +++ /dev/null @@ -1,151 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(json_stream_parse_tests). - --include_lib("couch/include/couch_eunit.hrl"). - --define(CASES, - [ - {1, "1", "integer numeric literial"}, - {3.1416, "3.14160", "float numeric literal"}, % text representation may truncate, trail zeroes - {-1, "-1", "negative integer numeric literal"}, - {-3.1416, "-3.14160", "negative float numeric literal"}, - {12.0e10, "1.20000e+11", "float literal in scientific notation"}, - {1.234E+10, "1.23400e+10", "another float literal in scientific notation"}, - {-1.234E-10, "-1.23400e-10", "negative float literal in scientific notation"}, - {10.0, "1.0e+01", "yet another float literal in scientific notation"}, - {123.456, "1.23456E+2", "yet another float literal in scientific notation"}, - {10.0, "1e1", "yet another float literal in scientific notation"}, - {<<"foo">>, "\"foo\"", "string literal"}, - {<<"foo", 5, "bar">>, "\"foo\\u0005bar\"", "string literal with \\u0005"}, - {<<"">>, "\"\"", "empty string literal"}, - {<<"\n\n\n">>, "\"\\n\\n\\n\"", "only new lines literal"}, - {<<"\" \b\f\r\n\t\"">>, "\"\\\" \\b\\f\\r\\n\\t\\\"\"", - "only white spaces string literal"}, - {null, "null", "null literal"}, - {true, "true", "true literal"}, - {false, "false", "false literal"}, - {<<"null">>, "\"null\"", "null string literal"}, - {<<"true">>, "\"true\"", "true string literal"}, - {<<"false">>, "\"false\"", "false string literal"}, - {{[]}, "{}", "empty object literal"}, - {{[{<<"foo">>, <<"bar">>}]}, "{\"foo\":\"bar\"}", - "simple object literal"}, - {{[{<<"foo">>, <<"bar">>}, {<<"baz">>, 123}]}, - "{\"foo\":\"bar\",\"baz\":123}", "another simple object literal"}, - {[], "[]", "empty array literal"}, - {[[]], "[[]]", "empty array literal inside a single element array literal"}, - {[1, <<"foo">>], "[1,\"foo\"]", "simple non-empty array literal"}, - {[1199344435545.0, 1], "[1199344435545.0,1]", - "another simple non-empty array literal"}, - {[false, true, 321, null], "[false, true, 321, null]", "array of literals"}, - {{[{<<"foo">>, [123]}]}, "{\"foo\":[123]}", - "object literal with an array valued property"}, - {{[{<<"foo">>, {[{<<"bar">>, true}]}}]}, - "{\"foo\":{\"bar\":true}}", "nested object literal"}, - {{[{<<"foo">>, []}, {<<"bar">>, {[{<<"baz">>, true}]}}, - {<<"alice">>, <<"bob">>}]}, - "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}", - "complex object literal"}, - {[-123, <<"foo">>, {[{<<"bar">>, []}]}, null], - "[-123,\"foo\",{\"bar\":[]},null]", - "complex array literal"} - ] -). - - -raw_json_input_test_() -> - Tests = lists:map( - fun({EJson, JsonString, Desc}) -> - {Desc, - ?_assert(equiv(EJson, json_stream_parse:to_ejson(JsonString)))} - end, ?CASES), - {"Tests with raw JSON string as the input", Tests}. - -one_byte_data_fun_test_() -> - Tests = lists:map( - fun({EJson, JsonString, Desc}) -> - DataFun = fun() -> single_byte_data_fun(JsonString) end, - {Desc, - ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))} - end, ?CASES), - {"Tests with a 1 byte output data function as the input", Tests}. - -test_multiple_bytes_data_fun_test_() -> - Tests = lists:map( - fun({EJson, JsonString, Desc}) -> - DataFun = fun() -> multiple_bytes_data_fun(JsonString) end, - {Desc, - ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))} - end, ?CASES), - {"Tests with a multiple bytes output data function as the input", Tests}. - - -%% Test for equivalence of Erlang terms. -%% Due to arbitrary order of construction, equivalent objects might -%% compare unequal as erlang terms, so we need to carefully recurse -%% through aggregates (tuples and objects). -equiv({Props1}, {Props2}) -> - equiv_object(Props1, Props2); -equiv(L1, L2) when is_list(L1), is_list(L2) -> - equiv_list(L1, L2); -equiv(N1, N2) when is_number(N1), is_number(N2) -> - N1 == N2; -equiv(B1, B2) when is_binary(B1), is_binary(B2) -> - B1 == B2; -equiv(true, true) -> - true; -equiv(false, false) -> - true; -equiv(null, null) -> - true. - -%% Object representation and traversal order is unknown. -%% Use the sledgehammer and sort property lists. -equiv_object(Props1, Props2) -> - L1 = lists:keysort(1, Props1), - L2 = lists:keysort(1, Props2), - Pairs = lists:zip(L1, L2), - true = lists:all( - fun({{K1, V1}, {K2, V2}}) -> - equiv(K1, K2) andalso equiv(V1, V2) - end, - Pairs). - -%% Recursively compare tuple elements for equivalence. -equiv_list([], []) -> - true; -equiv_list([V1 | L1], [V2 | L2]) -> - equiv(V1, V2) andalso equiv_list(L1, L2). - -single_byte_data_fun([]) -> - done; -single_byte_data_fun([H | T]) -> - {<>, fun() -> single_byte_data_fun(T) end}. - -multiple_bytes_data_fun([]) -> - done; -multiple_bytes_data_fun(L) -> - N = couch_rand:uniform(7) - 1, - {Part, Rest} = split(L, N), - {list_to_binary(Part), fun() -> multiple_bytes_data_fun(Rest) end}. - -split(L, N) when length(L) =< N -> - {L, []}; -split(L, N) -> - take(N, L, []). - -take(0, L, Acc) -> - {lists:reverse(Acc), L}; -take(N, [H|L], Acc) -> - take(N - 1, L, [H | Acc]). diff --git a/src/couch/test/test_web.erl b/src/couch/test/test_web.erl deleted file mode 100644 index b1b3e65c9..000000000 --- a/src/couch/test/test_web.erl +++ /dev/null @@ -1,114 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(test_web). --behaviour(gen_server). - --compile(tuple_calls). - --include_lib("couch/include/couch_eunit.hrl"). - --export([start_link/0, stop/0, loop/1, get_port/0, set_assert/1, check_last/0]). --export([init/1, terminate/2, code_change/3]). --export([handle_call/3, handle_cast/2, handle_info/2]). - --define(SERVER, test_web_server). --define(HANDLER, test_web_handler). --define(DELAY, 500). - -start_link() -> - gen_server:start({local, ?HANDLER}, ?MODULE, [], []), - mochiweb_http:start([ - {name, ?SERVER}, - {loop, {?MODULE, loop}}, - {port, 0} - ]). - -loop(Req) -> - %?debugFmt("Handling request: ~p", [Req]), - case gen_server:call(?HANDLER, {check_request, Req}) of - {ok, RespInfo} -> - {ok, Req:respond(RespInfo)}; - {raw, {Status, Headers, BodyChunks}} -> - Resp = Req:start_response({Status, Headers}), - lists:foreach(fun(C) -> Resp:send(C) end, BodyChunks), - erlang:put(mochiweb_request_force_close, true), - {ok, Resp}; - {chunked, {Status, Headers, BodyChunks}} -> - Resp = Req:respond({Status, Headers, chunked}), - timer:sleep(?DELAY), - lists:foreach(fun(C) -> Resp:write_chunk(C) end, BodyChunks), - Resp:write_chunk([]), - {ok, Resp}; - {error, Reason} -> - ?debugFmt("Error: ~p", [Reason]), - Body = lists:flatten(io_lib:format("Error: ~p", [Reason])), - {ok, Req:respond({200, [], Body})} - end. - -get_port() -> - mochiweb_socket_server:get(?SERVER, port). - -set_assert(Fun) -> - ?assertEqual(ok, gen_server:call(?HANDLER, {set_assert, Fun})). - -check_last() -> - gen_server:call(?HANDLER, last_status). - -init(_) -> - {ok, nil}. - -terminate(_Reason, _State) -> - ok. - -stop() -> - mochiweb_http:stop(?SERVER). - - -handle_call({check_request, Req}, _From, State) when is_function(State, 1) -> - Resp2 = case (catch State(Req)) of - {ok, Resp} -> - {reply, {ok, Resp}, was_ok}; - {raw, Resp} -> - {reply, {raw, Resp}, was_ok}; - {chunked, Resp} -> - {reply, {chunked, Resp}, was_ok}; - Error -> - {reply, {error, Error}, not_ok} - end, - Req:cleanup(), - Resp2; -handle_call({check_request, _Req}, _From, _State) -> - {reply, {error, no_assert_function}, not_ok}; -handle_call(last_status, _From, State) when is_atom(State) -> - {reply, State, nil}; -handle_call(last_status, _From, State) -> - {reply, {error, not_checked}, State}; -handle_call({set_assert, Fun}, _From, nil) -> - {reply, ok, Fun}; -handle_call({set_assert, _}, _From, State) -> - {reply, {error, assert_function_set}, State}; -handle_call(Msg, _From, State) -> - {reply, {ignored, Msg}, State}. - -handle_cast(stop, State) -> - {stop, normal, State}; -handle_cast(Msg, State) -> - ?debugFmt("Ignoring cast message: ~p", [Msg]), - {noreply, State}. - -handle_info(Msg, State) -> - ?debugFmt("Ignoring info message: ~p", [Msg]), - {noreply, State}. - -code_change(_OldVsn, State, _Extra) -> - {ok, State}. diff --git a/src/couch_epi/test/couch_epi_basic_test.erl b/src/couch_epi/test/couch_epi_basic_test.erl deleted file mode 100644 index 587d1564e..000000000 --- a/src/couch_epi/test/couch_epi_basic_test.erl +++ /dev/null @@ -1,137 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_epi_basic_test). - --export([ - start_link/0 -]). - --export([ - app/0, - providers/0, - services/0, - data_providers/0, - data_subscriptions/0, - processes/0, - notify/3 -]). - --define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}). - - -start_link() -> ok. - - -%% BEGIN couch_epi_plugin behaviour callbacks - - -app() -> test_app. - - -providers() -> - [ - {my_service, provider1}, - {my_service, provider2} - ]. - - -services() -> - [ - {my_service, ?MODULE} - ]. - - -data_providers() -> - [ - {{test_app, descriptions}, {static_module, ?MODULE}, [{interval, 100}]} - ]. - - -data_subscriptions() -> - [ - {test_app, descriptions} - ]. - - -processes() -> - [ - {?MODULE, [?CHILD(extra_process, worker)]}, - {?MODULE, [{to_replace, {new, start_link, [bar]}, - permanent, 5000, worker, [bar]}]} - ]. - - -notify(_Key, _OldData, _NewData) -> - ok. - - -%% END couch_epi_plugin behaviour callbacks - - -parse_child_id(Id) when is_atom(Id) -> - Id; -parse_child_id(Id) -> - ["couch_epi_codechange_monitor", ServiceName, KindStr] - = string:tokens(Id, "|"), - Kind = list_to_atom(KindStr), - case string:tokens(ServiceName, ":") of - [ServiceId, Key] -> - {{list_to_atom(ServiceId), list_to_atom(Key)}, Kind}; - [Key] -> - {list_to_atom(Key), Kind} - end. - - --include_lib("eunit/include/eunit.hrl"). - -basic_test() -> - Expected = lists:sort([ - {extra_process, [], [extra_process]}, - {to_replace, [bar], [bar]}, - {{my_service, providers}, - [couch_epi_functions_gen_my_service], - [couch_epi_codechange_monitor, couch_epi_functions_gen_my_service, - provider1, provider2]}, - {{my_service, services}, - [couch_epi_functions_gen_my_service], - lists:sort([couch_epi_codechange_monitor, - couch_epi_functions_gen_my_service, ?MODULE])}, - {{{test_app, descriptions}, data_subscriptions}, - [couch_epi_data_gen_test_app_descriptions], - lists:sort([couch_epi_codechange_monitor, - couch_epi_data_gen_test_app_descriptions, ?MODULE])}, - {{{test_app, descriptions}, data_providers}, - [couch_epi_data_gen_test_app_descriptions], - lists:sort([couch_epi_codechange_monitor, - couch_epi_data_gen_test_app_descriptions, ?MODULE])} - ]), - - ToReplace = {to_replace, - {old, start_link, [foo]}, permanent, 5000, worker, [foo]}, - Children = lists:sort(couch_epi_sup:plugin_childspecs( - ?MODULE, [?MODULE], [ToReplace])), - Results = [ - {parse_child_id(Id), Args, lists:sort(Modules)} - || {Id, {_M, _F, Args}, _, _, _, Modules} <- Children - ], - - Tests = lists:zip(Expected, Results), - [?assertEqual(Expect, Result) || {Expect, Result} <- Tests], - - ExpectedChild = {to_replace, {new, start_link, [bar]}, - permanent, 5000, worker, [bar]}, - ?assertEqual( - ExpectedChild, - lists:keyfind(to_replace, 1, Children)), - - ok. diff --git a/src/couch_epi/test/couch_epi_tests.erl b/src/couch_epi/test/couch_epi_tests.erl deleted file mode 100644 index 042753215..000000000 --- a/src/couch_epi/test/couch_epi_tests.erl +++ /dev/null @@ -1,690 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_epi_tests). - --include_lib("couch/include/couch_eunit.hrl"). - --define(DATA_FILE1, ?ABS_PATH("test/fixtures/app_data1.cfg")). --define(DATA_FILE2, ?ABS_PATH("test/fixtures/app_data2.cfg")). - --export([notify_cb/4, save/3, get/2]). - --record(ctx, {file, handle, pid, kv, key, modules = []}). - --define(TIMEOUT, 5000). --define(RELOAD_WAIT, 1000). - --define(temp_atom, - fun() -> - {A, B, C} = os:timestamp(), - list_to_atom(lists:flatten(io_lib:format("~p~p~p", [A, B, C]))) - end). - --define(MODULE1(Name), " - -export([inc/2, fail/2]). - - inc(KV, A) -> - Reply = A + 1, - couch_epi_tests:save(KV, inc1, Reply), - [KV, Reply]. - - fail(KV, A) -> - inc(KV, A). -"). - --define(MODULE2(Name), " - -export([inc/2, fail/2]). - - inc(KV, A) -> - Reply = A + 1, - couch_epi_tests:save(KV, inc2, Reply), - [KV, Reply]. - - fail(KV, _A) -> - couch_epi_tests:save(KV, inc2, check_error), - throw(check_error). -"). - --define(DATA_MODULE1(Name), " - -export([data/0]). - - data() -> - [ - {[complex, key, 1], [ - {type, counter}, - {desc, foo} - ]} - ]. -"). - --define(DATA_MODULE2(Name), " - -export([data/0]). - - data() -> - [ - {[complex, key, 2], [ - {type, counter}, - {desc, bar} - ]}, - {[complex, key, 1], [ - {type, counter}, - {desc, updated_foo} - ]} - ]. -"). - --define(DATA_MODULE3(Name, Kv), " - -export([data/0]). - -data() -> - {ok, Data} = couch_epi_tests:get('" ++ atom_to_list(Kv) ++ "', data), - Data. -"). - -%% ------------------------------------------------------------------ -%% couch_epi_plugin behaviour -%% ------------------------------------------------------------------ - -plugin_module([KV, Spec]) when is_tuple(Spec) -> - SpecStr = io_lib:format("~w", [Spec]), - KVStr = "'" ++ atom_to_list(KV) ++ "'", - " - -compile([export_all]). - - app() -> test_app. - providers() -> - []. - - services() -> - []. - - data_providers() -> - [ - {{test_app, descriptions}, " ++ SpecStr ++ ", [{interval, 100}]} - ]. - - data_subscriptions() -> - [ - {test_app, descriptions} - ]. - - processes() -> []. - - notify(Key, OldData, Data) -> - couch_epi_tests:notify_cb(Key, OldData, Data, " ++ KVStr ++ "). - "; -plugin_module([KV, Provider]) when is_atom(Provider) -> - KVStr = "'" ++ atom_to_list(KV) ++ "'", - " - -compile([export_all]). - - app() -> test_app. - providers() -> - [ - {my_service, " ++ atom_to_list(Provider) ++ "} - ]. - - services() -> - [ - {my_service, " ++ atom_to_list(Provider) ++ "} - ]. - - data_providers() -> - []. - - data_subscriptions() -> - []. - - processes() -> []. - - notify(Key, OldData, Data) -> - couch_epi_tests:notify_cb(Key, OldData, Data, " ++ KVStr ++ "). - ". - - -notify_cb(Key, OldData, Data, KV) -> - save(KV, is_called, {Key, OldData, Data}). - -start_epi(Plugins) -> - application:load(couch_epi), - PluginsModules = lists:map(fun({Module, Body}) -> - ok = generate_module(Module, Body), - Module - end, Plugins), - application:set_env(couch_epi, plugins, PluginsModules), - application:start(couch_epi). - -setup(data_file) -> - error_logger:tty(false), - - Key = {test_app, descriptions}, - File = ?tempfile(), - {ok, _} = file:copy(?DATA_FILE1, File), - KV = start_state_storage(), - - ok = start_epi([{provider_epi, plugin_module([KV, {file, File}])}]), - - Pid = whereis(couch_epi:get_handle(Key)), - - - #ctx{ - file = File, - key = Key, - handle = couch_epi:get_handle(Key), - kv = KV, - pid = Pid}; -setup(static_data_module) -> - error_logger:tty(false), - - Key = {test_app, descriptions}, - - ok = generate_module(provider, ?DATA_MODULE1(provider)), - KV = start_state_storage(), - - ok = start_epi([{provider_epi, plugin_module([KV, {static_module, provider}])}]), - - Pid = whereis(couch_epi:get_handle(Key)), - Handle = couch_epi:get_handle(Key), - - #ctx{ - key = Key, - handle = Handle, - modules = [Handle, provider], - kv = KV, - pid = Pid}; -setup(callback_data_module) -> - error_logger:tty(false), - - Key = {test_app, descriptions}, - - KV = start_state_storage(), - Value = [ - {[complex, key, 1], [ - {type, counter}, - {desc, foo} - ]} - ], - save(KV, data, Value), - - ok = generate_module(provider, ?DATA_MODULE3(provider, KV)), - - ok = start_epi([{provider_epi, plugin_module([KV, {callback_module, provider}])}]), - - Pid = whereis(couch_epi:get_handle(Key)), - Handle = couch_epi:get_handle(Key), - - #ctx{ - key = Key, - handle = Handle, - modules = [Handle, provider], - kv = KV, - pid = Pid}; -setup(functions) -> - Key = my_service, - error_logger:tty(false), - - ok = generate_module(provider1, ?MODULE1(provider1)), - ok = generate_module(provider2, ?MODULE2(provider2)), - - KV = start_state_storage(), - - ok = start_epi([ - {provider_epi1, plugin_module([KV, provider1])}, - {provider_epi2, plugin_module([KV, provider2])} - ]), - - Pid = whereis(couch_epi:get_handle(Key)), - Handle = couch_epi:get_handle(Key), - - #ctx{ - key = Key, - handle = Handle, - modules = [Handle, provider1, provider2], - kv = KV, - pid = Pid}; -setup({options, _Opts}) -> - setup(functions). - -teardown(_Case, #ctx{} = Ctx) -> - teardown(Ctx). - -teardown(#ctx{file = File} = Ctx) when File /= undefined -> - file:delete(File), - teardown(Ctx#ctx{file = undefined}); -teardown(#ctx{kv = KV}) -> - call(KV, stop), - application:stop(couch_epi), - ok. - -upgrade_release(Pid, Modules) -> - sys:suspend(Pid), - [ok = sys:change_code(Pid, M, undefined, []) || M <- Modules], - sys:resume(Pid), - ok. - -epi_config_update_test_() -> - Funs = [ - fun ensure_notified_when_changed/2, - fun ensure_not_notified_when_no_change/2 - ], - Cases = [ - data_file, - static_data_module, - callback_data_module, - functions - ], - { - "config update tests", - [make_case("Check notifications for: ", Cases, Funs)] - }. - -epi_data_source_test_() -> - Funs = [ - fun check_dump/2, - fun check_get/2, - fun check_get_value/2, - fun check_by_key/2, - fun check_by_source/2, - fun check_keys/2, - fun check_subscribers/2 - ], - Cases = [ - data_file, - static_data_module, - callback_data_module - ], - { - "epi data API tests", - [make_case("Check query API for: ", Cases, Funs)] - }. - - -epi_apply_test_() -> - { - "epi dispatch tests", - { - foreach, - fun() -> setup(functions) end, - fun teardown/1, - [ - fun check_pipe/1, - fun check_broken_pipe/1, - fun ensure_fail/1, - fun ensure_fail_pipe/1 - ] - } - }. - -epi_providers_order_test_() -> - { - "epi providers' order test", - { - foreach, - fun() -> setup(functions) end, - fun teardown/1, - [ - fun check_providers_order/1 - ] - } - }. - - -epi_reload_test_() -> - Cases = [ - data_file, - static_data_module, - callback_data_module, - functions - ], - Funs = [ - fun ensure_reload_if_manually_triggered/2, - fun ensure_reload_if_changed/2, - fun ensure_no_reload_when_no_change/2 - ], - { - "epi reload tests", - [make_case("Check reload for: ", Cases, Funs)] - }. - -apply_options_test_() -> - Funs = [fun ensure_apply_is_called/2], - Setups = {options, valid_options_permutations()}, - { - "apply options tests", - [make_case("Apply with options: ", Setups, Funs)] - }. - - -make_case(Msg, {Tag, P}, Funs) -> - Cases = [{Tag, Case} || Case <- P], - make_case(Msg, Cases, Funs); -make_case(Msg, P, Funs) -> - [{format_case_name(Msg, Case), [ - { - foreachx, fun setup/1, fun teardown/2, - [ - {Case, make_fun(Fun, 2)} || Fun <- Funs - ] - } - ]} || Case <- P]. - -make_fun(Fun, Arity) -> - {arity, A} = lists:keyfind(arity, 1, erlang:fun_info(Fun)), - make_fun(Fun, Arity, A). - -make_fun(Fun, A, A) -> Fun; -make_fun(Fun, 2, 1) -> fun(_, A) -> Fun(A) end; -make_fun(Fun, 1, 2) -> fun(A) -> Fun(undefined, A) end. - -format_case_name(Msg, Case) -> - lists:flatten(Msg ++ io_lib:format("~p", [Case])). - -valid_options_permutations() -> - [ - [], - [ignore_errors], - [pipe], - [pipe, ignore_errors], - [concurrent], - [concurrent, ignore_errors] - ]. - -ensure_notified_when_changed(functions, #ctx{key = Key} = Ctx) -> - ?_test(begin - subscribe(Ctx, test_app, Key), - update(functions, Ctx), - Result = get(Ctx, is_called), - ExpectedDefs = [ - {provider1,[{inc,2},{fail,2}]}, - {provider2,[{inc,2},{fail,2}]} - ], - ?assertEqual({ok, {Key, ExpectedDefs, ExpectedDefs}}, Result), - ok - end); -ensure_notified_when_changed(Case, #ctx{key = Key} = Ctx) -> - ?_test(begin - subscribe(Ctx, test_app, Key), - update(Case, Ctx), - ExpectedData = lists:usort([ - {[complex, key, 1], [{type, counter}, {desc, updated_foo}]}, - {[complex, key, 2], [{type, counter}, {desc, bar}]} - ]), - Result = get(Ctx, is_called), - ?assertMatch({ok, {Key, _OldData, _Data}}, Result), - {ok, {Key, OldData, Data}} = Result, - ?assertMatch(ExpectedData, lists:usort(Data)), - ?assertMatch( - [{[complex, key, 1], [{type, counter}, {desc, foo}]}], - lists:usort(OldData)) - end). - -ensure_not_notified_when_no_change(_Case, #ctx{key = Key} = Ctx) -> - ?_test(begin - subscribe(Ctx, test_app, Key), - timer:sleep(?RELOAD_WAIT), - ?assertMatch(error, get(Ctx, is_called)) - end). - -ensure_apply_is_called({options, Opts}, #ctx{handle = Handle, kv = KV, key = Key} = Ctx) -> - ?_test(begin - couch_epi:apply(Handle, Key, inc, [KV, 2], Opts), - maybe_wait(Opts), - ?assertMatch({ok, _}, get(Ctx, inc1)), - ?assertMatch({ok, _}, get(Ctx, inc2)), - ok - end); -ensure_apply_is_called(undefined, #ctx{} = Ctx) -> - ensure_apply_is_called({options, []}, Ctx). - -check_pipe(#ctx{handle = Handle, kv = KV, key = Key}) -> - ?_test(begin - Result = couch_epi:apply(Handle, Key, inc, [KV, 2], [pipe]), - ?assertMatch([KV, 4], Result), - ok - end). - -check_broken_pipe(#ctx{handle = Handle, kv = KV, key = Key} = Ctx) -> - ?_test(begin - Result = couch_epi:apply(Handle, Key, fail, [KV, 2], [pipe, ignore_errors]), - ?assertMatch([KV, 3], Result), - ?assertMatch([3, check_error], pipe_state(Ctx)), - ok - end). - -ensure_fail_pipe(#ctx{handle = Handle, kv = KV, key = Key}) -> - ?_test(begin - ?assertThrow(check_error, - couch_epi:apply(Handle, Key, fail, [KV, 2], [pipe])), - ok - end). - -ensure_fail(#ctx{handle = Handle, kv = KV, key = Key}) -> - ?_test(begin - ?assertThrow(check_error, - couch_epi:apply(Handle, Key, fail, [KV, 2], [])), - ok - end). - -pipe_state(Ctx) -> - Trace = [get(Ctx, inc1), get(Ctx, inc2)], - lists:usort([State || {ok, State} <- Trace]). - -check_dump(_Case, #ctx{handle = Handle}) -> - ?_test(begin - ?assertMatch( - [[{type, counter}, {desc, foo}]], - couch_epi:dump(Handle)) - end). - -check_get(_Case, #ctx{handle = Handle}) -> - ?_test(begin - ?assertMatch( - [[{type, counter}, {desc, foo}]], - couch_epi:get(Handle, [complex,key, 1])) - end). - -check_get_value(_Case, #ctx{handle = Handle}) -> - ?_test(begin - ?assertMatch( - [{type, counter}, {desc, foo}], - couch_epi:get_value(Handle, test_app, [complex,key, 1])) - end). - -check_by_key(_Case, #ctx{handle = Handle}) -> - ?_test(begin - ?assertMatch( - [{[complex, key, 1], - [{test_app, [{type, counter}, {desc, foo}]}]}], - couch_epi:by_key(Handle)), - ?assertMatch( - [{test_app, [{type, counter}, {desc, foo}]}], - couch_epi:by_key(Handle, [complex, key, 1])) - end). - -check_by_source(_Case, #ctx{handle = Handle}) -> - ?_test(begin - ?assertMatch( - [{test_app, - [{[complex,key, 1], [{type, counter}, {desc, foo}]}]}], - couch_epi:by_source(Handle)), - ?assertMatch( - [{[complex,key, 1], [{type, counter}, {desc, foo}]}], - couch_epi:by_source(Handle, test_app)) - end). - -check_keys(_Case, #ctx{handle = Handle}) -> - ?_assertMatch([[complex,key,1]], couch_epi:keys(Handle)). - -check_subscribers(_Case, #ctx{handle = Handle}) -> - ?_assertMatch([test_app], couch_epi:subscribers(Handle)). - - -ensure_reload_if_manually_triggered(Case, #ctx{pid = Pid, key = Key} = Ctx) -> - ?_test(begin - subscribe(Ctx, test_app, Key), - update_definitions(Case, Ctx), - couch_epi_module_keeper:reload(Pid), - timer:sleep(?RELOAD_WAIT), - ?assertNotEqual(error, get(Ctx, is_called)) - end). - -ensure_reload_if_changed(data_file = Case, - #ctx{key = Key, handle = Handle} = Ctx) -> - ?_test(begin - Version = Handle:version(), - subscribe(Ctx, test_app, Key), - update_definitions(Case, Ctx), - timer:sleep(?RELOAD_WAIT), - ?assertNotEqual(Version, Handle:version()), - ?assertNotEqual(error, get(Ctx, is_called)) - end); -ensure_reload_if_changed(Case, - #ctx{key = Key, handle = Handle} = Ctx) -> - ?_test(begin - Version = Handle:version(), - subscribe(Ctx, test_app, Key), - update(Case, Ctx), - ?assertNotEqual(Version, Handle:version()), - timer:sleep(?RELOAD_WAIT), %% Allow some time for notify to be called - ?assertNotEqual(error, get(Ctx, is_called)) - end). - -ensure_no_reload_when_no_change(functions, - #ctx{pid = Pid, key = Key, handle = Handle, modules = Modules} = Ctx) -> - ?_test(begin - Version = Handle:version(), - subscribe(Ctx, test_app, Key), - upgrade_release(Pid, Modules), - ?assertEqual(Version, Handle:version()), - ?assertEqual(error, get(Ctx, is_called)) - end); -ensure_no_reload_when_no_change(_Case, - #ctx{key = Key, handle = Handle} = Ctx) -> - ?_test(begin - Version = Handle:version(), - subscribe(Ctx, test_app, Key), - timer:sleep(?RELOAD_WAIT), - ?assertEqual(Version, Handle:version()), - ?assertEqual(error, get(Ctx, is_called)) - end). - -check_providers_order(#ctx{handle = Handle, kv = KV, key = Key} = Ctx) -> - ?_test(begin - Result = couch_epi:apply(Handle, Key, inc, [KV, 2], [pipe]), - ?assertMatch([KV, 4], Result), - Order = [element(2, get(Ctx, K)) || K <- [inc1, inc2]], - ?assertEqual(Order, [3, 4]), - ok - end). - -%% ------------------------------------------------------------------ -%% Internal Function Definitions -%% ------------------------------------------------------------------ - -generate_module(Name, Body) -> - Tokens = couch_epi_codegen:scan(Body), - couch_epi_codegen:generate(Name, Tokens). - -update(Case, #ctx{pid = Pid, modules = Modules} = Ctx) -> - update_definitions(Case, Ctx), - upgrade_release(Pid, Modules), - wait_update(Ctx). - -update_definitions(data_file, #ctx{file = File}) -> - {ok, _} = file:copy(?DATA_FILE2, File), - ok; -update_definitions(static_data_module, #ctx{}) -> - ok = generate_module(provider, ?DATA_MODULE2(provider)); -update_definitions(callback_data_module, #ctx{kv = Kv}) -> - Value = [ - {[complex, key, 2], [ - {type, counter}, - {desc, bar} - ]}, - {[complex, key, 1], [ - {type, counter}, - {desc, updated_foo} - ]} - ], - save(Kv, data, Value), - ok; -update_definitions(functions, #ctx{}) -> - ok = generate_module(provider1, ?MODULE2(provider1)). - -subscribe(#ctx{kv = Kv}, _App, _Key) -> - call(Kv, empty), - ok. - -maybe_wait(Opts) -> - case lists:member(concurrent, Opts) of - true -> - timer:sleep(?RELOAD_WAIT); - false -> - ok - end. - -wait_update(Ctx) -> - case get(Ctx, is_called) of - error -> - timer:sleep(?RELOAD_WAIT), - wait_update(Ctx); - _ -> ok - end. - -%% ------------ -%% State tracer - -save(Kv, Key, Value) -> - call(Kv, {set, Key, Value}). - -get(#ctx{kv = Kv}, Key) -> - call(Kv, {get, Key}); -get(Kv, Key) -> - call(Kv, {get, Key}). - -call(Server, Msg) -> - Ref = make_ref(), - Server ! {{Ref, self()}, Msg}, - receive - {reply, Ref, Reply} -> - Reply - after ?TIMEOUT -> - {error, {timeout, Msg}} - end. - -reply({Ref, From}, Msg) -> - From ! {reply, Ref, Msg}. - -start_state_storage() -> - Pid = state_storage(), - Name = ?temp_atom(), - register(Name, Pid), - Name. - -state_storage() -> - spawn_link(fun() -> state_storage(dict:new()) end). - -state_storage(Dict) -> - receive - {From, {set, Key, Value}} -> - reply(From, ok), - state_storage(dict:store(Key, Value, Dict)); - {From, {get, Key}} -> - reply(From, dict:find(Key, Dict)), - state_storage(Dict); - {From, empty} -> - reply(From, ok), - state_storage(dict:new()); - {From, stop} -> - reply(From, ok) - end. diff --git a/src/couch_epi/test/eunit/couch_epi_basic_test.erl b/src/couch_epi/test/eunit/couch_epi_basic_test.erl new file mode 100644 index 000000000..587d1564e --- /dev/null +++ b/src/couch_epi/test/eunit/couch_epi_basic_test.erl @@ -0,0 +1,137 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_epi_basic_test). + +-export([ + start_link/0 +]). + +-export([ + app/0, + providers/0, + services/0, + data_providers/0, + data_subscriptions/0, + processes/0, + notify/3 +]). + +-define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}). + + +start_link() -> ok. + + +%% BEGIN couch_epi_plugin behaviour callbacks + + +app() -> test_app. + + +providers() -> + [ + {my_service, provider1}, + {my_service, provider2} + ]. + + +services() -> + [ + {my_service, ?MODULE} + ]. + + +data_providers() -> + [ + {{test_app, descriptions}, {static_module, ?MODULE}, [{interval, 100}]} + ]. + + +data_subscriptions() -> + [ + {test_app, descriptions} + ]. + + +processes() -> + [ + {?MODULE, [?CHILD(extra_process, worker)]}, + {?MODULE, [{to_replace, {new, start_link, [bar]}, + permanent, 5000, worker, [bar]}]} + ]. + + +notify(_Key, _OldData, _NewData) -> + ok. + + +%% END couch_epi_plugin behaviour callbacks + + +parse_child_id(Id) when is_atom(Id) -> + Id; +parse_child_id(Id) -> + ["couch_epi_codechange_monitor", ServiceName, KindStr] + = string:tokens(Id, "|"), + Kind = list_to_atom(KindStr), + case string:tokens(ServiceName, ":") of + [ServiceId, Key] -> + {{list_to_atom(ServiceId), list_to_atom(Key)}, Kind}; + [Key] -> + {list_to_atom(Key), Kind} + end. + + +-include_lib("eunit/include/eunit.hrl"). + +basic_test() -> + Expected = lists:sort([ + {extra_process, [], [extra_process]}, + {to_replace, [bar], [bar]}, + {{my_service, providers}, + [couch_epi_functions_gen_my_service], + [couch_epi_codechange_monitor, couch_epi_functions_gen_my_service, + provider1, provider2]}, + {{my_service, services}, + [couch_epi_functions_gen_my_service], + lists:sort([couch_epi_codechange_monitor, + couch_epi_functions_gen_my_service, ?MODULE])}, + {{{test_app, descriptions}, data_subscriptions}, + [couch_epi_data_gen_test_app_descriptions], + lists:sort([couch_epi_codechange_monitor, + couch_epi_data_gen_test_app_descriptions, ?MODULE])}, + {{{test_app, descriptions}, data_providers}, + [couch_epi_data_gen_test_app_descriptions], + lists:sort([couch_epi_codechange_monitor, + couch_epi_data_gen_test_app_descriptions, ?MODULE])} + ]), + + ToReplace = {to_replace, + {old, start_link, [foo]}, permanent, 5000, worker, [foo]}, + Children = lists:sort(couch_epi_sup:plugin_childspecs( + ?MODULE, [?MODULE], [ToReplace])), + Results = [ + {parse_child_id(Id), Args, lists:sort(Modules)} + || {Id, {_M, _F, Args}, _, _, _, Modules} <- Children + ], + + Tests = lists:zip(Expected, Results), + [?assertEqual(Expect, Result) || {Expect, Result} <- Tests], + + ExpectedChild = {to_replace, {new, start_link, [bar]}, + permanent, 5000, worker, [bar]}, + ?assertEqual( + ExpectedChild, + lists:keyfind(to_replace, 1, Children)), + + ok. diff --git a/src/couch_epi/test/eunit/couch_epi_tests.erl b/src/couch_epi/test/eunit/couch_epi_tests.erl new file mode 100644 index 000000000..12d8610c1 --- /dev/null +++ b/src/couch_epi/test/eunit/couch_epi_tests.erl @@ -0,0 +1,690 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_epi_tests). + +-include_lib("couch/include/couch_eunit.hrl"). + +-define(DATA_FILE1, ?ABS_PATH("test/eunit/fixtures/app_data1.cfg")). +-define(DATA_FILE2, ?ABS_PATH("test/eunit/fixtures/app_data2.cfg")). + +-export([notify_cb/4, save/3, get/2]). + +-record(ctx, {file, handle, pid, kv, key, modules = []}). + +-define(TIMEOUT, 5000). +-define(RELOAD_WAIT, 1000). + +-define(temp_atom, + fun() -> + {A, B, C} = os:timestamp(), + list_to_atom(lists:flatten(io_lib:format("~p~p~p", [A, B, C]))) + end). + +-define(MODULE1(Name), " + -export([inc/2, fail/2]). + + inc(KV, A) -> + Reply = A + 1, + couch_epi_tests:save(KV, inc1, Reply), + [KV, Reply]. + + fail(KV, A) -> + inc(KV, A). +"). + +-define(MODULE2(Name), " + -export([inc/2, fail/2]). + + inc(KV, A) -> + Reply = A + 1, + couch_epi_tests:save(KV, inc2, Reply), + [KV, Reply]. + + fail(KV, _A) -> + couch_epi_tests:save(KV, inc2, check_error), + throw(check_error). +"). + +-define(DATA_MODULE1(Name), " + -export([data/0]). + + data() -> + [ + {[complex, key, 1], [ + {type, counter}, + {desc, foo} + ]} + ]. +"). + +-define(DATA_MODULE2(Name), " + -export([data/0]). + + data() -> + [ + {[complex, key, 2], [ + {type, counter}, + {desc, bar} + ]}, + {[complex, key, 1], [ + {type, counter}, + {desc, updated_foo} + ]} + ]. +"). + +-define(DATA_MODULE3(Name, Kv), " + -export([data/0]). + +data() -> + {ok, Data} = couch_epi_tests:get('" ++ atom_to_list(Kv) ++ "', data), + Data. +"). + +%% ------------------------------------------------------------------ +%% couch_epi_plugin behaviour +%% ------------------------------------------------------------------ + +plugin_module([KV, Spec]) when is_tuple(Spec) -> + SpecStr = io_lib:format("~w", [Spec]), + KVStr = "'" ++ atom_to_list(KV) ++ "'", + " + -compile([export_all]). + + app() -> test_app. + providers() -> + []. + + services() -> + []. + + data_providers() -> + [ + {{test_app, descriptions}, " ++ SpecStr ++ ", [{interval, 100}]} + ]. + + data_subscriptions() -> + [ + {test_app, descriptions} + ]. + + processes() -> []. + + notify(Key, OldData, Data) -> + couch_epi_tests:notify_cb(Key, OldData, Data, " ++ KVStr ++ "). + "; +plugin_module([KV, Provider]) when is_atom(Provider) -> + KVStr = "'" ++ atom_to_list(KV) ++ "'", + " + -compile([export_all]). + + app() -> test_app. + providers() -> + [ + {my_service, " ++ atom_to_list(Provider) ++ "} + ]. + + services() -> + [ + {my_service, " ++ atom_to_list(Provider) ++ "} + ]. + + data_providers() -> + []. + + data_subscriptions() -> + []. + + processes() -> []. + + notify(Key, OldData, Data) -> + couch_epi_tests:notify_cb(Key, OldData, Data, " ++ KVStr ++ "). + ". + + +notify_cb(Key, OldData, Data, KV) -> + save(KV, is_called, {Key, OldData, Data}). + +start_epi(Plugins) -> + application:load(couch_epi), + PluginsModules = lists:map(fun({Module, Body}) -> + ok = generate_module(Module, Body), + Module + end, Plugins), + application:set_env(couch_epi, plugins, PluginsModules), + application:start(couch_epi). + +setup(data_file) -> + error_logger:tty(false), + + Key = {test_app, descriptions}, + File = ?tempfile(), + {ok, _} = file:copy(?DATA_FILE1, File), + KV = start_state_storage(), + + ok = start_epi([{provider_epi, plugin_module([KV, {file, File}])}]), + + Pid = whereis(couch_epi:get_handle(Key)), + + + #ctx{ + file = File, + key = Key, + handle = couch_epi:get_handle(Key), + kv = KV, + pid = Pid}; +setup(static_data_module) -> + error_logger:tty(false), + + Key = {test_app, descriptions}, + + ok = generate_module(provider, ?DATA_MODULE1(provider)), + KV = start_state_storage(), + + ok = start_epi([{provider_epi, plugin_module([KV, {static_module, provider}])}]), + + Pid = whereis(couch_epi:get_handle(Key)), + Handle = couch_epi:get_handle(Key), + + #ctx{ + key = Key, + handle = Handle, + modules = [Handle, provider], + kv = KV, + pid = Pid}; +setup(callback_data_module) -> + error_logger:tty(false), + + Key = {test_app, descriptions}, + + KV = start_state_storage(), + Value = [ + {[complex, key, 1], [ + {type, counter}, + {desc, foo} + ]} + ], + save(KV, data, Value), + + ok = generate_module(provider, ?DATA_MODULE3(provider, KV)), + + ok = start_epi([{provider_epi, plugin_module([KV, {callback_module, provider}])}]), + + Pid = whereis(couch_epi:get_handle(Key)), + Handle = couch_epi:get_handle(Key), + + #ctx{ + key = Key, + handle = Handle, + modules = [Handle, provider], + kv = KV, + pid = Pid}; +setup(functions) -> + Key = my_service, + error_logger:tty(false), + + ok = generate_module(provider1, ?MODULE1(provider1)), + ok = generate_module(provider2, ?MODULE2(provider2)), + + KV = start_state_storage(), + + ok = start_epi([ + {provider_epi1, plugin_module([KV, provider1])}, + {provider_epi2, plugin_module([KV, provider2])} + ]), + + Pid = whereis(couch_epi:get_handle(Key)), + Handle = couch_epi:get_handle(Key), + + #ctx{ + key = Key, + handle = Handle, + modules = [Handle, provider1, provider2], + kv = KV, + pid = Pid}; +setup({options, _Opts}) -> + setup(functions). + +teardown(_Case, #ctx{} = Ctx) -> + teardown(Ctx). + +teardown(#ctx{file = File} = Ctx) when File /= undefined -> + file:delete(File), + teardown(Ctx#ctx{file = undefined}); +teardown(#ctx{kv = KV}) -> + call(KV, stop), + application:stop(couch_epi), + ok. + +upgrade_release(Pid, Modules) -> + sys:suspend(Pid), + [ok = sys:change_code(Pid, M, undefined, []) || M <- Modules], + sys:resume(Pid), + ok. + +epi_config_update_test_() -> + Funs = [ + fun ensure_notified_when_changed/2, + fun ensure_not_notified_when_no_change/2 + ], + Cases = [ + data_file, + static_data_module, + callback_data_module, + functions + ], + { + "config update tests", + [make_case("Check notifications for: ", Cases, Funs)] + }. + +epi_data_source_test_() -> + Funs = [ + fun check_dump/2, + fun check_get/2, + fun check_get_value/2, + fun check_by_key/2, + fun check_by_source/2, + fun check_keys/2, + fun check_subscribers/2 + ], + Cases = [ + data_file, + static_data_module, + callback_data_module + ], + { + "epi data API tests", + [make_case("Check query API for: ", Cases, Funs)] + }. + + +epi_apply_test_() -> + { + "epi dispatch tests", + { + foreach, + fun() -> setup(functions) end, + fun teardown/1, + [ + fun check_pipe/1, + fun check_broken_pipe/1, + fun ensure_fail/1, + fun ensure_fail_pipe/1 + ] + } + }. + +epi_providers_order_test_() -> + { + "epi providers' order test", + { + foreach, + fun() -> setup(functions) end, + fun teardown/1, + [ + fun check_providers_order/1 + ] + } + }. + + +epi_reload_test_() -> + Cases = [ + data_file, + static_data_module, + callback_data_module, + functions + ], + Funs = [ + fun ensure_reload_if_manually_triggered/2, + fun ensure_reload_if_changed/2, + fun ensure_no_reload_when_no_change/2 + ], + { + "epi reload tests", + [make_case("Check reload for: ", Cases, Funs)] + }. + +apply_options_test_() -> + Funs = [fun ensure_apply_is_called/2], + Setups = {options, valid_options_permutations()}, + { + "apply options tests", + [make_case("Apply with options: ", Setups, Funs)] + }. + + +make_case(Msg, {Tag, P}, Funs) -> + Cases = [{Tag, Case} || Case <- P], + make_case(Msg, Cases, Funs); +make_case(Msg, P, Funs) -> + [{format_case_name(Msg, Case), [ + { + foreachx, fun setup/1, fun teardown/2, + [ + {Case, make_fun(Fun, 2)} || Fun <- Funs + ] + } + ]} || Case <- P]. + +make_fun(Fun, Arity) -> + {arity, A} = lists:keyfind(arity, 1, erlang:fun_info(Fun)), + make_fun(Fun, Arity, A). + +make_fun(Fun, A, A) -> Fun; +make_fun(Fun, 2, 1) -> fun(_, A) -> Fun(A) end; +make_fun(Fun, 1, 2) -> fun(A) -> Fun(undefined, A) end. + +format_case_name(Msg, Case) -> + lists:flatten(Msg ++ io_lib:format("~p", [Case])). + +valid_options_permutations() -> + [ + [], + [ignore_errors], + [pipe], + [pipe, ignore_errors], + [concurrent], + [concurrent, ignore_errors] + ]. + +ensure_notified_when_changed(functions, #ctx{key = Key} = Ctx) -> + ?_test(begin + subscribe(Ctx, test_app, Key), + update(functions, Ctx), + Result = get(Ctx, is_called), + ExpectedDefs = [ + {provider1,[{inc,2},{fail,2}]}, + {provider2,[{inc,2},{fail,2}]} + ], + ?assertEqual({ok, {Key, ExpectedDefs, ExpectedDefs}}, Result), + ok + end); +ensure_notified_when_changed(Case, #ctx{key = Key} = Ctx) -> + ?_test(begin + subscribe(Ctx, test_app, Key), + update(Case, Ctx), + ExpectedData = lists:usort([ + {[complex, key, 1], [{type, counter}, {desc, updated_foo}]}, + {[complex, key, 2], [{type, counter}, {desc, bar}]} + ]), + Result = get(Ctx, is_called), + ?assertMatch({ok, {Key, _OldData, _Data}}, Result), + {ok, {Key, OldData, Data}} = Result, + ?assertMatch(ExpectedData, lists:usort(Data)), + ?assertMatch( + [{[complex, key, 1], [{type, counter}, {desc, foo}]}], + lists:usort(OldData)) + end). + +ensure_not_notified_when_no_change(_Case, #ctx{key = Key} = Ctx) -> + ?_test(begin + subscribe(Ctx, test_app, Key), + timer:sleep(?RELOAD_WAIT), + ?assertMatch(error, get(Ctx, is_called)) + end). + +ensure_apply_is_called({options, Opts}, #ctx{handle = Handle, kv = KV, key = Key} = Ctx) -> + ?_test(begin + couch_epi:apply(Handle, Key, inc, [KV, 2], Opts), + maybe_wait(Opts), + ?assertMatch({ok, _}, get(Ctx, inc1)), + ?assertMatch({ok, _}, get(Ctx, inc2)), + ok + end); +ensure_apply_is_called(undefined, #ctx{} = Ctx) -> + ensure_apply_is_called({options, []}, Ctx). + +check_pipe(#ctx{handle = Handle, kv = KV, key = Key}) -> + ?_test(begin + Result = couch_epi:apply(Handle, Key, inc, [KV, 2], [pipe]), + ?assertMatch([KV, 4], Result), + ok + end). + +check_broken_pipe(#ctx{handle = Handle, kv = KV, key = Key} = Ctx) -> + ?_test(begin + Result = couch_epi:apply(Handle, Key, fail, [KV, 2], [pipe, ignore_errors]), + ?assertMatch([KV, 3], Result), + ?assertMatch([3, check_error], pipe_state(Ctx)), + ok + end). + +ensure_fail_pipe(#ctx{handle = Handle, kv = KV, key = Key}) -> + ?_test(begin + ?assertThrow(check_error, + couch_epi:apply(Handle, Key, fail, [KV, 2], [pipe])), + ok + end). + +ensure_fail(#ctx{handle = Handle, kv = KV, key = Key}) -> + ?_test(begin + ?assertThrow(check_error, + couch_epi:apply(Handle, Key, fail, [KV, 2], [])), + ok + end). + +pipe_state(Ctx) -> + Trace = [get(Ctx, inc1), get(Ctx, inc2)], + lists:usort([State || {ok, State} <- Trace]). + +check_dump(_Case, #ctx{handle = Handle}) -> + ?_test(begin + ?assertMatch( + [[{type, counter}, {desc, foo}]], + couch_epi:dump(Handle)) + end). + +check_get(_Case, #ctx{handle = Handle}) -> + ?_test(begin + ?assertMatch( + [[{type, counter}, {desc, foo}]], + couch_epi:get(Handle, [complex,key, 1])) + end). + +check_get_value(_Case, #ctx{handle = Handle}) -> + ?_test(begin + ?assertMatch( + [{type, counter}, {desc, foo}], + couch_epi:get_value(Handle, test_app, [complex,key, 1])) + end). + +check_by_key(_Case, #ctx{handle = Handle}) -> + ?_test(begin + ?assertMatch( + [{[complex, key, 1], + [{test_app, [{type, counter}, {desc, foo}]}]}], + couch_epi:by_key(Handle)), + ?assertMatch( + [{test_app, [{type, counter}, {desc, foo}]}], + couch_epi:by_key(Handle, [complex, key, 1])) + end). + +check_by_source(_Case, #ctx{handle = Handle}) -> + ?_test(begin + ?assertMatch( + [{test_app, + [{[complex,key, 1], [{type, counter}, {desc, foo}]}]}], + couch_epi:by_source(Handle)), + ?assertMatch( + [{[complex,key, 1], [{type, counter}, {desc, foo}]}], + couch_epi:by_source(Handle, test_app)) + end). + +check_keys(_Case, #ctx{handle = Handle}) -> + ?_assertMatch([[complex,key,1]], couch_epi:keys(Handle)). + +check_subscribers(_Case, #ctx{handle = Handle}) -> + ?_assertMatch([test_app], couch_epi:subscribers(Handle)). + + +ensure_reload_if_manually_triggered(Case, #ctx{pid = Pid, key = Key} = Ctx) -> + ?_test(begin + subscribe(Ctx, test_app, Key), + update_definitions(Case, Ctx), + couch_epi_module_keeper:reload(Pid), + timer:sleep(?RELOAD_WAIT), + ?assertNotEqual(error, get(Ctx, is_called)) + end). + +ensure_reload_if_changed(data_file = Case, + #ctx{key = Key, handle = Handle} = Ctx) -> + ?_test(begin + Version = Handle:version(), + subscribe(Ctx, test_app, Key), + update_definitions(Case, Ctx), + timer:sleep(?RELOAD_WAIT), + ?assertNotEqual(Version, Handle:version()), + ?assertNotEqual(error, get(Ctx, is_called)) + end); +ensure_reload_if_changed(Case, + #ctx{key = Key, handle = Handle} = Ctx) -> + ?_test(begin + Version = Handle:version(), + subscribe(Ctx, test_app, Key), + update(Case, Ctx), + ?assertNotEqual(Version, Handle:version()), + timer:sleep(?RELOAD_WAIT), %% Allow some time for notify to be called + ?assertNotEqual(error, get(Ctx, is_called)) + end). + +ensure_no_reload_when_no_change(functions, + #ctx{pid = Pid, key = Key, handle = Handle, modules = Modules} = Ctx) -> + ?_test(begin + Version = Handle:version(), + subscribe(Ctx, test_app, Key), + upgrade_release(Pid, Modules), + ?assertEqual(Version, Handle:version()), + ?assertEqual(error, get(Ctx, is_called)) + end); +ensure_no_reload_when_no_change(_Case, + #ctx{key = Key, handle = Handle} = Ctx) -> + ?_test(begin + Version = Handle:version(), + subscribe(Ctx, test_app, Key), + timer:sleep(?RELOAD_WAIT), + ?assertEqual(Version, Handle:version()), + ?assertEqual(error, get(Ctx, is_called)) + end). + +check_providers_order(#ctx{handle = Handle, kv = KV, key = Key} = Ctx) -> + ?_test(begin + Result = couch_epi:apply(Handle, Key, inc, [KV, 2], [pipe]), + ?assertMatch([KV, 4], Result), + Order = [element(2, get(Ctx, K)) || K <- [inc1, inc2]], + ?assertEqual(Order, [3, 4]), + ok + end). + +%% ------------------------------------------------------------------ +%% Internal Function Definitions +%% ------------------------------------------------------------------ + +generate_module(Name, Body) -> + Tokens = couch_epi_codegen:scan(Body), + couch_epi_codegen:generate(Name, Tokens). + +update(Case, #ctx{pid = Pid, modules = Modules} = Ctx) -> + update_definitions(Case, Ctx), + upgrade_release(Pid, Modules), + wait_update(Ctx). + +update_definitions(data_file, #ctx{file = File}) -> + {ok, _} = file:copy(?DATA_FILE2, File), + ok; +update_definitions(static_data_module, #ctx{}) -> + ok = generate_module(provider, ?DATA_MODULE2(provider)); +update_definitions(callback_data_module, #ctx{kv = Kv}) -> + Value = [ + {[complex, key, 2], [ + {type, counter}, + {desc, bar} + ]}, + {[complex, key, 1], [ + {type, counter}, + {desc, updated_foo} + ]} + ], + save(Kv, data, Value), + ok; +update_definitions(functions, #ctx{}) -> + ok = generate_module(provider1, ?MODULE2(provider1)). + +subscribe(#ctx{kv = Kv}, _App, _Key) -> + call(Kv, empty), + ok. + +maybe_wait(Opts) -> + case lists:member(concurrent, Opts) of + true -> + timer:sleep(?RELOAD_WAIT); + false -> + ok + end. + +wait_update(Ctx) -> + case get(Ctx, is_called) of + error -> + timer:sleep(?RELOAD_WAIT), + wait_update(Ctx); + _ -> ok + end. + +%% ------------ +%% State tracer + +save(Kv, Key, Value) -> + call(Kv, {set, Key, Value}). + +get(#ctx{kv = Kv}, Key) -> + call(Kv, {get, Key}); +get(Kv, Key) -> + call(Kv, {get, Key}). + +call(Server, Msg) -> + Ref = make_ref(), + Server ! {{Ref, self()}, Msg}, + receive + {reply, Ref, Reply} -> + Reply + after ?TIMEOUT -> + {error, {timeout, Msg}} + end. + +reply({Ref, From}, Msg) -> + From ! {reply, Ref, Msg}. + +start_state_storage() -> + Pid = state_storage(), + Name = ?temp_atom(), + register(Name, Pid), + Name. + +state_storage() -> + spawn_link(fun() -> state_storage(dict:new()) end). + +state_storage(Dict) -> + receive + {From, {set, Key, Value}} -> + reply(From, ok), + state_storage(dict:store(Key, Value, Dict)); + {From, {get, Key}} -> + reply(From, dict:find(Key, Dict)), + state_storage(Dict); + {From, empty} -> + reply(From, ok), + state_storage(dict:new()); + {From, stop} -> + reply(From, ok) + end. diff --git a/src/couch_epi/test/eunit/fixtures/app_data1.cfg b/src/couch_epi/test/eunit/fixtures/app_data1.cfg new file mode 100644 index 000000000..4c9f3fe2d --- /dev/null +++ b/src/couch_epi/test/eunit/fixtures/app_data1.cfg @@ -0,0 +1,4 @@ +{[complex, key, 1], [ + {type, counter}, + {desc, foo} +]}. diff --git a/src/couch_epi/test/eunit/fixtures/app_data2.cfg b/src/couch_epi/test/eunit/fixtures/app_data2.cfg new file mode 100644 index 000000000..e5a5ffb8c --- /dev/null +++ b/src/couch_epi/test/eunit/fixtures/app_data2.cfg @@ -0,0 +1,8 @@ +{[complex, key, 2], [ + {type, counter}, + {desc, bar} +]}. +{[complex, key, 1], [ + {type, counter}, + {desc, updated_foo} +]}. diff --git a/src/couch_epi/test/fixtures/app_data1.cfg b/src/couch_epi/test/fixtures/app_data1.cfg deleted file mode 100644 index 4c9f3fe2d..000000000 --- a/src/couch_epi/test/fixtures/app_data1.cfg +++ /dev/null @@ -1,4 +0,0 @@ -{[complex, key, 1], [ - {type, counter}, - {desc, foo} -]}. diff --git a/src/couch_epi/test/fixtures/app_data2.cfg b/src/couch_epi/test/fixtures/app_data2.cfg deleted file mode 100644 index e5a5ffb8c..000000000 --- a/src/couch_epi/test/fixtures/app_data2.cfg +++ /dev/null @@ -1,8 +0,0 @@ -{[complex, key, 2], [ - {type, counter}, - {desc, bar} -]}. -{[complex, key, 1], [ - {type, counter}, - {desc, updated_foo} -]}. diff --git a/src/couch_index/test/couch_index_compaction_tests.erl b/src/couch_index/test/couch_index_compaction_tests.erl deleted file mode 100644 index 53316d944..000000000 --- a/src/couch_index/test/couch_index_compaction_tests.erl +++ /dev/null @@ -1,106 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_index_compaction_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(WAIT_TIMEOUT, 1000). - -setup() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - couch_db:close(Db), - fake_index(DbName), - {ok, IndexerPid} = couch_index_server:get_index(test_index, Db, undefined), - ?assertNot(is_opened(Db)), - {Db, IndexerPid}. - -fake_index(DbName) -> - ok = meck:new([test_index], [non_strict]), - ok = meck:expect(test_index, init, ['_', '_'], {ok, 10}), - ok = meck:expect(test_index, open, fun(_Db, State) -> - {ok, State} - end), - ok = meck:expect(test_index, compact, ['_', '_', '_'], - meck:seq([{ok, 9}, {ok, 10}])), %% to trigger recompaction - ok = meck:expect(test_index, commit, ['_'], ok), - ok = meck:expect(test_index, get, fun - (db_name, _) -> - DbName; - (idx_name, _) -> - <<"idx_name">>; - (signature, _) -> - <<61,237,157,230,136,93,96,201,204,17,137,186,50,249,44,135>>; - (update_seq, Seq) -> - Seq - end), - ok = meck:expect(test_index, close, ['_'], ok), - ok = meck:expect(test_index, swap_compacted, fun(_, NewState) -> - {ok, NewState} - end). - -teardown(_) -> - meck:unload(test_index). - -compaction_test_() -> - { - "Check compaction", - { - setup, - fun() -> test_util:start_couch([]) end, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun hold_db_for_recompaction/1 - ] - } - } - }. - - -hold_db_for_recompaction({Db, Idx}) -> - ?_test(begin - ?assertNot(is_opened(Db)), - ok = meck:reset(test_index), - {ok, Monitor} = couch_index:compact(Idx, [monitor]), - - %% we expect Mod:commit/1 to be called twice - %% once for compact and once for recompact - meck:wait(2, test_index, commit, ['_'], 5000), - ?assertEqual(1, meck:num_calls(test_index, compact, ['_', '_', []])), - ?assertEqual(1, meck:num_calls(test_index, compact, ['_', '_', [recompact]])), - - %% wait compaction finish - receive - {'DOWN', Monitor, _, _, _} -> ok - after 5000 -> - throw(timeout) - end, - - ?assertEqual(ok, wait_db_close(Db)), - ok - end). - -wait_db_close(Db) -> - test_util:wait(fun() -> - case is_opened(Db) of - false -> ok; - true -> wait - end - end, ?WAIT_TIMEOUT). - -is_opened(Db) -> - Monitors = [M || M <- couch_db:monitored_by(Db), M =/= self()], - Monitors /= []. diff --git a/src/couch_index/test/couch_index_ddoc_updated_tests.erl b/src/couch_index/test/couch_index_ddoc_updated_tests.erl deleted file mode 100644 index 0e23adf91..000000000 --- a/src/couch_index/test/couch_index_ddoc_updated_tests.erl +++ /dev/null @@ -1,145 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_index_ddoc_updated_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - - -start() -> - fake_index(), - Ctx = test_util:start_couch([mem3, fabric]), - DbName = ?tempdb(), - ok = fabric:create_db(DbName, [?ADMIN_CTX]), - {Ctx, DbName}. - - -stop({Ctx, DbName}) -> - meck:unload(test_index), - ok = fabric:delete_db(DbName, [?ADMIN_CTX]), - DbDir = config:get("couchdb", "database_dir", "."), - WaitFun = fun() -> - filelib:fold_files(DbDir, <<".*", DbName/binary, "\.[0-9]+.*">>, - true, fun(_F, _A) -> wait end, ok) - end, - ok = test_util:wait(WaitFun), - test_util:stop_couch(Ctx), - ok. - - -ddoc_update_test_() -> - { - "Check ddoc update actions", - { - setup, - fun start/0, fun stop/1, - fun check_all_indexers_exit_on_ddoc_change/1 - } - }. - - -check_all_indexers_exit_on_ddoc_change({_Ctx, DbName}) -> - ?_test(begin - [DbShard1 | RestDbShards] = lists:map(fun(Sh) -> - {ok, ShardDb} = couch_db:open(mem3:name(Sh), []), - ShardDb - end, mem3:local_shards(mem3:dbname(DbName))), - - % create a DDoc on Db1 - DDocID = <<"idx_name">>, - DDocJson = couch_doc:from_json_obj({[ - {<<"_id">>, DDocID}, - {<<"value">>, 1} - ]}), - {ok, _Rev} = couch_db:update_doc(DbShard1, DDocJson, []), - {ok, DbShard} = couch_db:reopen(DbShard1), - {ok, DDoc} = couch_db:open_doc( - DbShard, DDocID, [ejson_body, ?ADMIN_CTX]), - DbShards = [DbShard | RestDbShards], - N = length(DbShards), - - % run couch_index process for each shard database - ok = meck:reset(test_index), - lists:foreach(fun(ShardDb) -> - couch_index_server:get_index(test_index, ShardDb, DDoc) - end, DbShards), - - IndexesBefore = get_indexes_by_ddoc(DDocID, N), - ?assertEqual(N, length(IndexesBefore)), - - AliveBefore = lists:filter(fun erlang:is_process_alive/1, IndexesBefore), - ?assertEqual(N, length(AliveBefore)), - - % update ddoc - DDocJson2 = couch_doc:from_json_obj({[ - {<<"_id">>, DDocID}, - {<<"value">>, 2}, - {<<"_rev">>, couch_doc:rev_to_str(DDoc#doc.revs)} - ]}), - {ok, _} = couch_db:update_doc(DbShard, DDocJson2, []), - - % assert that all index processes exit after ddoc updated - ok = meck:reset(test_index), - couch_index_server:handle_db_event( - couch_db:name(DbShard), {ddoc_updated, DDocID}, {st, ""}), - - ok = meck:wait(N, test_index, init, ['_', '_'], 5000), - IndexesAfter = get_indexes_by_ddoc(DDocID, 0), - ?assertEqual(0, length(IndexesAfter)), - - %% assert that previously running indexes are gone - AliveAfter = lists:filter(fun erlang:is_process_alive/1, IndexesBefore), - ?assertEqual(0, length(AliveAfter)), - ok - end). - - -fake_index() -> - ok = meck:new([test_index], [non_strict]), - ok = meck:expect(test_index, init, fun(Db, DDoc) -> - {ok, {couch_db:name(Db), DDoc}} - end), - ok = meck:expect(test_index, open, fun(_Db, State) -> - {ok, State} - end), - ok = meck:expect(test_index, get, fun - (db_name, {DbName, _DDoc}) -> - DbName; - (idx_name, {_DbName, DDoc}) -> - DDoc#doc.id; - (signature, {_DbName, DDoc}) -> - couch_hash:md5_hash(term_to_binary(DDoc)); - (update_seq, Seq) -> - Seq - end), - ok = meck:expect(test_index, shutdown, ['_'], ok). - - -get_indexes_by_ddoc(DDocID, N) -> - Indexes = test_util:wait(fun() -> - Indxs = ets:match_object( - couchdb_indexes_by_db, {'$1', {DDocID, '$2'}}), - case length(Indxs) == N of - true -> - Indxs; - false -> - wait - end - end), - lists:foldl(fun({DbName, {_DDocID, Sig}}, Acc) -> - case ets:lookup(couchdb_indexes_by_sig, {DbName, Sig}) of - [{_, Pid}] -> [Pid|Acc]; - _ -> Acc - end - end, [], Indexes). - diff --git a/src/couch_index/test/eunit/couch_index_compaction_tests.erl b/src/couch_index/test/eunit/couch_index_compaction_tests.erl new file mode 100644 index 000000000..53316d944 --- /dev/null +++ b/src/couch_index/test/eunit/couch_index_compaction_tests.erl @@ -0,0 +1,106 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_index_compaction_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(WAIT_TIMEOUT, 1000). + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + couch_db:close(Db), + fake_index(DbName), + {ok, IndexerPid} = couch_index_server:get_index(test_index, Db, undefined), + ?assertNot(is_opened(Db)), + {Db, IndexerPid}. + +fake_index(DbName) -> + ok = meck:new([test_index], [non_strict]), + ok = meck:expect(test_index, init, ['_', '_'], {ok, 10}), + ok = meck:expect(test_index, open, fun(_Db, State) -> + {ok, State} + end), + ok = meck:expect(test_index, compact, ['_', '_', '_'], + meck:seq([{ok, 9}, {ok, 10}])), %% to trigger recompaction + ok = meck:expect(test_index, commit, ['_'], ok), + ok = meck:expect(test_index, get, fun + (db_name, _) -> + DbName; + (idx_name, _) -> + <<"idx_name">>; + (signature, _) -> + <<61,237,157,230,136,93,96,201,204,17,137,186,50,249,44,135>>; + (update_seq, Seq) -> + Seq + end), + ok = meck:expect(test_index, close, ['_'], ok), + ok = meck:expect(test_index, swap_compacted, fun(_, NewState) -> + {ok, NewState} + end). + +teardown(_) -> + meck:unload(test_index). + +compaction_test_() -> + { + "Check compaction", + { + setup, + fun() -> test_util:start_couch([]) end, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun hold_db_for_recompaction/1 + ] + } + } + }. + + +hold_db_for_recompaction({Db, Idx}) -> + ?_test(begin + ?assertNot(is_opened(Db)), + ok = meck:reset(test_index), + {ok, Monitor} = couch_index:compact(Idx, [monitor]), + + %% we expect Mod:commit/1 to be called twice + %% once for compact and once for recompact + meck:wait(2, test_index, commit, ['_'], 5000), + ?assertEqual(1, meck:num_calls(test_index, compact, ['_', '_', []])), + ?assertEqual(1, meck:num_calls(test_index, compact, ['_', '_', [recompact]])), + + %% wait compaction finish + receive + {'DOWN', Monitor, _, _, _} -> ok + after 5000 -> + throw(timeout) + end, + + ?assertEqual(ok, wait_db_close(Db)), + ok + end). + +wait_db_close(Db) -> + test_util:wait(fun() -> + case is_opened(Db) of + false -> ok; + true -> wait + end + end, ?WAIT_TIMEOUT). + +is_opened(Db) -> + Monitors = [M || M <- couch_db:monitored_by(Db), M =/= self()], + Monitors /= []. diff --git a/src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl b/src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl new file mode 100644 index 000000000..0e23adf91 --- /dev/null +++ b/src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl @@ -0,0 +1,145 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_index_ddoc_updated_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + + +start() -> + fake_index(), + Ctx = test_util:start_couch([mem3, fabric]), + DbName = ?tempdb(), + ok = fabric:create_db(DbName, [?ADMIN_CTX]), + {Ctx, DbName}. + + +stop({Ctx, DbName}) -> + meck:unload(test_index), + ok = fabric:delete_db(DbName, [?ADMIN_CTX]), + DbDir = config:get("couchdb", "database_dir", "."), + WaitFun = fun() -> + filelib:fold_files(DbDir, <<".*", DbName/binary, "\.[0-9]+.*">>, + true, fun(_F, _A) -> wait end, ok) + end, + ok = test_util:wait(WaitFun), + test_util:stop_couch(Ctx), + ok. + + +ddoc_update_test_() -> + { + "Check ddoc update actions", + { + setup, + fun start/0, fun stop/1, + fun check_all_indexers_exit_on_ddoc_change/1 + } + }. + + +check_all_indexers_exit_on_ddoc_change({_Ctx, DbName}) -> + ?_test(begin + [DbShard1 | RestDbShards] = lists:map(fun(Sh) -> + {ok, ShardDb} = couch_db:open(mem3:name(Sh), []), + ShardDb + end, mem3:local_shards(mem3:dbname(DbName))), + + % create a DDoc on Db1 + DDocID = <<"idx_name">>, + DDocJson = couch_doc:from_json_obj({[ + {<<"_id">>, DDocID}, + {<<"value">>, 1} + ]}), + {ok, _Rev} = couch_db:update_doc(DbShard1, DDocJson, []), + {ok, DbShard} = couch_db:reopen(DbShard1), + {ok, DDoc} = couch_db:open_doc( + DbShard, DDocID, [ejson_body, ?ADMIN_CTX]), + DbShards = [DbShard | RestDbShards], + N = length(DbShards), + + % run couch_index process for each shard database + ok = meck:reset(test_index), + lists:foreach(fun(ShardDb) -> + couch_index_server:get_index(test_index, ShardDb, DDoc) + end, DbShards), + + IndexesBefore = get_indexes_by_ddoc(DDocID, N), + ?assertEqual(N, length(IndexesBefore)), + + AliveBefore = lists:filter(fun erlang:is_process_alive/1, IndexesBefore), + ?assertEqual(N, length(AliveBefore)), + + % update ddoc + DDocJson2 = couch_doc:from_json_obj({[ + {<<"_id">>, DDocID}, + {<<"value">>, 2}, + {<<"_rev">>, couch_doc:rev_to_str(DDoc#doc.revs)} + ]}), + {ok, _} = couch_db:update_doc(DbShard, DDocJson2, []), + + % assert that all index processes exit after ddoc updated + ok = meck:reset(test_index), + couch_index_server:handle_db_event( + couch_db:name(DbShard), {ddoc_updated, DDocID}, {st, ""}), + + ok = meck:wait(N, test_index, init, ['_', '_'], 5000), + IndexesAfter = get_indexes_by_ddoc(DDocID, 0), + ?assertEqual(0, length(IndexesAfter)), + + %% assert that previously running indexes are gone + AliveAfter = lists:filter(fun erlang:is_process_alive/1, IndexesBefore), + ?assertEqual(0, length(AliveAfter)), + ok + end). + + +fake_index() -> + ok = meck:new([test_index], [non_strict]), + ok = meck:expect(test_index, init, fun(Db, DDoc) -> + {ok, {couch_db:name(Db), DDoc}} + end), + ok = meck:expect(test_index, open, fun(_Db, State) -> + {ok, State} + end), + ok = meck:expect(test_index, get, fun + (db_name, {DbName, _DDoc}) -> + DbName; + (idx_name, {_DbName, DDoc}) -> + DDoc#doc.id; + (signature, {_DbName, DDoc}) -> + couch_hash:md5_hash(term_to_binary(DDoc)); + (update_seq, Seq) -> + Seq + end), + ok = meck:expect(test_index, shutdown, ['_'], ok). + + +get_indexes_by_ddoc(DDocID, N) -> + Indexes = test_util:wait(fun() -> + Indxs = ets:match_object( + couchdb_indexes_by_db, {'$1', {DDocID, '$2'}}), + case length(Indxs) == N of + true -> + Indxs; + false -> + wait + end + end), + lists:foldl(fun({DbName, {_DDocID, Sig}}, Acc) -> + case ets:lookup(couchdb_indexes_by_sig, {DbName, Sig}) of + [{_, Pid}] -> [Pid|Acc]; + _ -> Acc + end + end, [], Indexes). + diff --git a/src/couch_log/test/couch_log_config_listener_test.erl b/src/couch_log/test/couch_log_config_listener_test.erl deleted file mode 100644 index 07abae1ff..000000000 --- a/src/couch_log/test/couch_log_config_listener_test.erl +++ /dev/null @@ -1,82 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_log_config_listener_test). - - --include_lib("couch_log/include/couch_log.hrl"). --include_lib("eunit/include/eunit.hrl"). - --define(TIMEOUT, 1000). - -couch_log_config_test_() -> - {setup, - fun couch_log_test_util:start/0, - fun couch_log_test_util:stop/1, - [ - fun check_restart_listener/0, - fun check_ignore_non_log/0 - ] - }. - - -check_restart_listener() -> - Listener1 = get_listener(), - ?assert(is_process_alive(Listener1)), - - Handler1 = get_handler(), - ?assertNotEqual(not_found, Handler1), - Ref = erlang:monitor(process, Listener1), - ok = gen_event:delete_handler(config_event, get_handler(), testing), - - receive - {'DOWN', Ref, process, _, _} -> - ?assertNot(is_process_alive(Listener1)) - after ?TIMEOUT -> - erlang:error({timeout, config_listener_mon_death}) - end, - - NewHandler = test_util:wait(fun() -> - case get_handler() of - not_found -> wait; - Reply -> Reply - end - end, ?TIMEOUT, 20), - ?assertEqual(Handler1, NewHandler), - - Listener2 = get_listener(), - ?assert(is_process_alive(Listener2)), - ?assertNotEqual(Listener1, Listener2), - ok. - -check_ignore_non_log() -> - Run = fun() -> - couch_log_test_util:with_config_listener(fun() -> - config:set("foo", "bar", "baz"), - couch_log_test_util:wait_for_config() - end) - end, - ?assertError(config_change_timeout, Run()). - - -get_handler() -> - FoldFun = fun - ({config_listener, {couch_log_sup, _}} = H, not_found) -> - H; - (_, Acc) -> - Acc - end, - lists:foldl(FoldFun, not_found, gen_event:which_handlers(config_event)). - -get_listener() -> - Children = supervisor:which_children(couch_log_sup), - hd([Pid || {config_listener_mon, Pid, _, _} <- Children]). diff --git a/src/couch_log/test/couch_log_config_test.erl b/src/couch_log/test/couch_log_config_test.erl deleted file mode 100644 index c4677f37f..000000000 --- a/src/couch_log/test/couch_log_config_test.erl +++ /dev/null @@ -1,110 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_log_config_test). - - --include_lib("couch_log/include/couch_log.hrl"). --include_lib("eunit/include/eunit.hrl"). - - -couch_log_config_test_() -> - {setup, - fun couch_log_test_util:start/0, - fun couch_log_test_util:stop/1, - [ - fun check_level/0, - fun check_max_message_size/0, - fun check_bad_level/0, - fun check_bad_max_message_size/0 - ] - }. - - -check_level() -> - % Default level is info - ?assertEqual(info, couch_log_config:get(level)), - ?assertEqual(2, couch_log_config:get(level_int)), - - couch_log_test_util:with_config_listener(fun() -> - config:set("log", "level", "emerg"), - couch_log_test_util:wait_for_config(), - ?assertEqual(emergency, couch_log_config:get(level)), - ?assertEqual(8, couch_log_config:get(level_int)), - - config:set("log", "level", "debug"), - couch_log_test_util:wait_for_config(), - ?assertEqual(debug, couch_log_config:get(level)), - ?assertEqual(1, couch_log_config:get(level_int)), - - config:delete("log", "level"), - couch_log_test_util:wait_for_config(), - ?assertEqual(info, couch_log_config:get(level)), - ?assertEqual(2, couch_log_config:get(level_int)) - end). - - -check_max_message_size() -> - % Default is 16000 - ?assertEqual(16000, couch_log_config:get(max_message_size)), - - couch_log_test_util:with_config_listener(fun() -> - config:set("log", "max_message_size", "1024"), - couch_log_test_util:wait_for_config(), - ?assertEqual(1024, couch_log_config:get(max_message_size)), - - config:delete("log", "max_message_size"), - couch_log_test_util:wait_for_config(), - ?assertEqual(16000, couch_log_config:get(max_message_size)) - end). - - -check_bad_level() -> - % Default level is info - ?assertEqual(info, couch_log_config:get(level)), - ?assertEqual(2, couch_log_config:get(level_int)), - - couch_log_test_util:with_config_listener(fun() -> - config:set("log", "level", "debug"), - couch_log_test_util:wait_for_config(), - ?assertEqual(debug, couch_log_config:get(level)), - ?assertEqual(1, couch_log_config:get(level_int)), - - config:set("log", "level", "this is not a valid level name"), - couch_log_test_util:wait_for_config(), - ?assertEqual(info, couch_log_config:get(level)), - ?assertEqual(2, couch_log_config:get(level_int)), - - config:delete("log", "level"), - couch_log_test_util:wait_for_config(), - ?assertEqual(info, couch_log_config:get(level)), - ?assertEqual(2, couch_log_config:get(level_int)) - end). - - -check_bad_max_message_size() -> - % Default level is 16000 - ?assertEqual(16000, couch_log_config:get(max_message_size)), - - couch_log_test_util:with_config_listener(fun() -> - config:set("log", "max_message_size", "1024"), - couch_log_test_util:wait_for_config(), - ?assertEqual(1024, couch_log_config:get(max_message_size)), - - config:set("log", "max_message_size", "this is not a valid size"), - couch_log_test_util:wait_for_config(), - ?assertEqual(16000, couch_log_config:get(max_message_size)), - - config:delete("log", "max_message_size"), - couch_log_test_util:wait_for_config(), - ?assertEqual(16000, couch_log_config:get(max_message_size)) - end). diff --git a/src/couch_log/test/couch_log_error_logger_h_test.erl b/src/couch_log/test/couch_log_error_logger_h_test.erl deleted file mode 100644 index b78598fa4..000000000 --- a/src/couch_log/test/couch_log_error_logger_h_test.erl +++ /dev/null @@ -1,45 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_log_error_logger_h_test). - - --include_lib("eunit/include/eunit.hrl"). - - --define(HANDLER, couch_log_error_logger_h). - - -couch_log_error_logger_h_test_() -> - {setup, - fun couch_log_test_util:start/0, - fun couch_log_test_util:stop/1, - [ - fun handler_ignores_unknown_messages/0, - fun coverage_test/0 - ] - }. - - -handler_ignores_unknown_messages() -> - Handlers1 = gen_event:which_handlers(error_logger), - ?assert(lists:member(?HANDLER, Handlers1)), - ?assertEqual(ignored, gen_event:call(error_logger, ?HANDLER, foo)), - - error_logger ! this_is_a_message, - Handlers2 = gen_event:which_handlers(error_logger), - ?assert(lists:member(?HANDLER, Handlers2)). - - -coverage_test() -> - Resp = couch_log_error_logger_h:code_change(foo, bazinga, baz), - ?assertEqual({ok, bazinga}, Resp). diff --git a/src/couch_log/test/couch_log_formatter_test.erl b/src/couch_log/test/couch_log_formatter_test.erl deleted file mode 100644 index 795efcf29..000000000 --- a/src/couch_log/test/couch_log_formatter_test.erl +++ /dev/null @@ -1,875 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_log_formatter_test). - - --include("couch_log.hrl"). --include_lib("eunit/include/eunit.hrl"). - - -truncate_fmt_test() -> - Msg = [0 || _ <- lists:seq(1, 1048576)], - Entry = couch_log_formatter:format(info, self(), "~w", [Msg]), - ?assert(length(Entry#log_entry.msg) =< 16000). - - -truncate_test() -> - Msg = [0 || _ <- lists:seq(1, 1048576)], - Entry = couch_log_formatter:format(info, self(), Msg), - ?assert(length(Entry#log_entry.msg) =< 16000). - - -format_reason_test() -> - MsgFmt = "This is a reason: ~r", - Reason = {foo, [{x, k, 3}, {c, d, 2}]}, - Entry = couch_log_formatter:format(info, self(), MsgFmt, [Reason]), - Formatted = "This is a reason: foo at x:k/3 <= c:d/2", - ?assertEqual(Formatted, lists:flatten(Entry#log_entry.msg)). - - -crashing_formatting_test() -> - Pid = self(), - Event = { - error, - erlang:group_leader(), - { - Pid, - "** Generic server and some stuff", - [a_gen_server, {foo, bar}, server_state] % not enough args! - } - }, - ?assertMatch( - #log_entry{ - level = error, - pid = Pid - }, - do_format(Event) - ), - do_matches(do_format(Event), [ - "Encountered error {error,{badmatch" - ]). - - -gen_server_error_test() -> - Pid = self(), - Event = { - error, - erlang:group_leader(), - { - Pid, - "** Generic server and some stuff", - [a_gen_server, {foo, bar}, server_state, some_reason] - } - }, - ?assertMatch( - #log_entry{ - level = error, - pid = Pid - }, - do_format(Event) - ), - do_matches(do_format(Event), [ - "gen_server a_gen_server terminated", - "with reason: some_reason", - "last msg: {foo,bar}", - "state: server_state", - "extra: \\[\\]" - ]). - - -gen_server_error_with_extra_args_test() -> - Pid = self(), - Event = { - error, - erlang:group_leader(), - { - Pid, - "** Generic server and some stuff", - [a_gen_server, {foo, bar}, server_state, some_reason, sad, args] - } - }, - ?assertMatch( - #log_entry{ - level = error, - pid = Pid - }, - do_format(Event) - ), - do_matches(do_format(Event), [ - "gen_server a_gen_server terminated", - "with reason: some_reason", - "last msg: {foo,bar}", - "state: server_state", - "extra: \\[sad,args\\]" - ]). - - -gen_fsm_error_test() -> - Pid = self(), - Event = { - error, - erlang:group_leader(), - { - Pid, - "** State machine did a thing", - [a_gen_fsm, {ohai,there}, state_name, curr_state, barf] - } - }, - ?assertMatch( - #log_entry{ - level = error, - pid = Pid - }, - do_format(Event) - ), - do_matches(do_format(Event), [ - "gen_fsm a_gen_fsm in state state_name", - "with reason: barf", - "last msg: {ohai,there}", - "state: curr_state", - "extra: \\[\\]" - ]). - - -gen_fsm_error_with_extra_args_test() -> - Pid = self(), - Event = { - error, - erlang:group_leader(), - { - Pid, - "** State machine did a thing", - [a_gen_fsm, {ohai,there}, state_name, curr_state, barf, sad, args] - } - }, - ?assertMatch( - #log_entry{ - level = error, - pid = Pid - }, - do_format(Event) - ), - do_matches(do_format(Event), [ - "gen_fsm a_gen_fsm in state state_name", - "with reason: barf", - "last msg: {ohai,there}", - "state: curr_state", - "extra: \\[sad,args\\]" - ]). - - -gen_event_error_test() -> - Pid = self(), - Event = { - error, - erlang:group_leader(), - { - Pid, - "** gen_event handler did a thing", - [ - handler_id, - a_gen_event, - {ohai,there}, - curr_state, - barf - ] - } - }, - ?assertMatch( - #log_entry{ - level = error, - pid = Pid - }, - do_format(Event) - ), - do_matches(do_format(Event), [ - "gen_event handler_id installed in a_gen_event", - "reason: barf", - "last msg: {ohai,there}", - "state: curr_state" - ]). - - -emulator_error_test() -> - Event = { - error, - erlang:group_leader(), - { - emulator, - "~s~n", - ["A process died and stuff\n"] - } - }, - ?assertMatch( - #log_entry{ - level = error, - pid = emulator, - msg = "A process died and stuff" - }, - do_format(Event) - ). - - -normal_error_test() -> - Pid = self(), - Event = { - error, - erlang:group_leader(), - { - Pid, - "format thing: ~w ~w", - [ - first_arg, - second_arg - ] - } - }, - ?assertMatch( - #log_entry{ - level = error, - pid = Pid, - msg = "format thing: first_arg second_arg" - }, - do_format(Event) - ). - - -error_report_std_error_test() -> - Pid = self(), - Event = { - error_report, - erlang:group_leader(), - { - Pid, - std_error, - [foo, {bar, baz}] - } - }, - ?assertMatch( - #log_entry{ - level = error, - pid = Pid, - msg = "foo, bar: baz" - }, - do_format(Event) - ). - - -supervisor_report_test() -> - Pid = self(), - % A standard supervisor report - Event1 = { - error_report, - erlang:group_leader(), - { - Pid, - supervisor_report, - [ - {supervisor, sup_name}, - {offender, [ - {id, sup_child}, - {pid, list_to_pid("<0.1.0>")}, - {mfargs, {some_mod, some_fun, 3}} - ]}, - {reason, a_reason}, - {errorContext, some_context} - ] - } - }, - ?assertMatch( - #log_entry{ - level = error, - pid = Pid - }, - do_format(Event1) - ), - do_matches(do_format(Event1), [ - "Supervisor sup_name", - "had child sup_child started with some_mod:some_fun/3 at <0.1.0> exit", - "with reason a_reason", - "in context some_context" - ]), - % Slightly older using name instead of id - % in the offender blob. - Event2 = { - error_report, - erlang:group_leader(), - { - Pid, - supervisor_report, - [ - {supervisor, sup_name}, - {offender, [ - {name, sup_child}, - {pid, list_to_pid("<0.1.0>")}, - {mfargs, {some_mod, some_fun, 3}} - ]}, - {reason, a_reason}, - {errorContext, some_context} - ] - } - }, - ?assertMatch( - #log_entry{ - level = error, - pid = Pid - }, - do_format(Event2) - ), - do_matches(do_format(Event2), [ - "Supervisor sup_name", - "had child sup_child started with some_mod:some_fun/3 at <0.1.0> exit", - "with reason a_reason", - "in context some_context" - ]), - % A supervisor_bridge - Event3 = { - error_report, - erlang:group_leader(), - { - Pid, - supervisor_report, - [ - {supervisor, sup_name}, - {offender, [ - {mod, bridge_mod}, - {pid, list_to_pid("<0.1.0>")} - ]}, - {reason, a_reason}, - {errorContext, some_context} - ] - } - }, - ?assertMatch( - #log_entry{ - level = error, - pid = Pid - }, - do_format(Event3) - ), - do_matches(do_format(Event3), [ - "Supervisor sup_name", - "had child at module bridge_mod at <0.1.0> exit", - "with reason a_reason", - "in context some_context" - ]), - % Any other supervisor report - Event4 = { - error_report, - erlang:group_leader(), - { - Pid, - supervisor_report, - [foo, {a, thing}, bang] - } - }, - ?assertMatch( - #log_entry{ - level = error, - pid = Pid, - msg = "SUPERVISOR REPORT foo, a: thing, bang" - }, - do_format(Event4) - ). - - -crash_report_test() -> - Pid = self(), - % A standard crash report - Event1 = { - error_report, - erlang:group_leader(), - { - Pid, - crash_report, - [ - [ - {pid, list_to_pid("<0.2.0>")}, - {error_info, { - exit, - undef, - [{mod_name, fun_name, [a, b]}] - }} - ], - [list_to_pid("<0.3.0>"), list_to_pid("<0.4.0>")] - ] - } - }, - ?assertMatch( - #log_entry{ - level = error, - pid = Pid - }, - do_format(Event1) - ), - do_matches(do_format(Event1), [ - "Process <0.2.0>", - "with 2 neighbors", - "exited", - "reason: call to undefined function mod_name:fun_name\\(a, b\\)" - ]), - % A registered process crash report - Event2 = { - error_report, - erlang:group_leader(), - { - Pid, - crash_report, - [ - [ - {pid, list_to_pid("<0.2.0>")}, - {registered_name, couch_log_server}, - {error_info, { - exit, - undef, - [{mod_name, fun_name, [a, b]}] - }} - ], - [list_to_pid("<0.3.0>"), list_to_pid("<0.4.0>")] - ] - } - }, - do_matches(do_format(Event2), [ - "Process couch_log_server \\(<0.2.0>\\)" - ]), - % A non-exit crash report - Event3 = { - error_report, - erlang:group_leader(), - { - Pid, - crash_report, - [ - [ - {pid, list_to_pid("<0.2.0>")}, - {registered_name, couch_log_server}, - {error_info, { - killed, - undef, - [{mod_name, fun_name, [a, b]}] - }} - ], - [list_to_pid("<0.3.0>"), list_to_pid("<0.4.0>")] - ] - } - }, - do_matches(do_format(Event3), [ - "crashed" - ]), - % A extra report info - Event4 = { - error_report, - erlang:group_leader(), - { - Pid, - crash_report, - [ - [ - {pid, list_to_pid("<0.2.0>")}, - {error_info, { - killed, - undef, - [{mod_name, fun_name, [a, b]}] - }}, - {another, entry}, - yep - ], - [list_to_pid("<0.3.0>"), list_to_pid("<0.4.0>")] - ] - } - }, - do_matches(do_format(Event4), [ - "; another: entry, yep" - ]). - - -warning_report_test() -> - Pid = self(), - % A warning message - Event1 = { - warning_msg, - erlang:group_leader(), - { - Pid, - "a ~s string ~w", - ["format", 7] - } - }, - ?assertMatch( - #log_entry{ - level = warning, - pid = Pid, - msg = "a format string 7" - }, - do_format(Event1) - ), - % A warning report - Event2 = { - warning_report, - erlang:group_leader(), - { - Pid, - std_warning, - [list, 'of', {things, indeed}] - } - }, - ?assertMatch( - #log_entry{ - level = warning, - pid = Pid, - msg = "list, of, things: indeed" - }, - do_format(Event2) - ). - - -info_report_test() -> - Pid = self(), - % An info message - Event1 = { - info_msg, - erlang:group_leader(), - { - Pid, - "an info ~s string ~w", - ["format", 7] - } - }, - ?assertMatch( - #log_entry{ - level = info, - pid = Pid, - msg = "an info format string 7" - }, - do_format(Event1) - ), - % Application exit info - Event2 = { - info_report, - erlang:group_leader(), - { - Pid, - std_info, - [ - {type, no_idea}, - {application, couch_log}, - {exited, red_sox_are_on} - ] - } - }, - ?assertMatch( - #log_entry{ - level = info, - pid = Pid, - msg = "Application couch_log exited with reason: red_sox_are_on" - }, - do_format(Event2) - ), - % Any other std_info message - Event3 = { - info_report, - erlang:group_leader(), - { - Pid, - std_info, - [ - {type, no_idea}, - {application, couch_log} - ] - } - }, - ?assertMatch( - #log_entry{ - level = info, - pid = Pid, - msg = "type: no_idea, application: couch_log" - }, - do_format(Event3) - ), - % Non-list other report - Event4 = { - info_report, - erlang:group_leader(), - { - Pid, - std_info, - dang - } - }, - ?assertMatch( - #log_entry{ - level = info, - pid = Pid, - msg = "dang" - }, - do_format(Event4) - ). - - -progress_report_test() -> - Pid = self(), - % Application started - Event1 = { - info_report, - erlang:group_leader(), - { - Pid, - progress, - [{started_at, 'nonode@nohost'}, {application, app_name}] - } - }, - ?assertMatch( - #log_entry{ - level = info, - pid = Pid, - msg = "Application app_name started on node nonode@nohost" - }, - do_format(Event1) - ), - % Supervisor started child - Event2 = { - info_report, - erlang:group_leader(), - { - Pid, - progress, - [ - {supervisor, sup_dude}, - {started, [ - {mfargs, {mod_name, fun_name, 1}}, - {pid, list_to_pid("<0.5.0>")} - ]} - ] - } - }, - ?assertMatch( - #log_entry{ - level = debug, - pid = Pid, - msg = "Supervisor sup_dude started mod_name:fun_name/1" - " at pid <0.5.0>" - }, - do_format(Event2) - ), - % Other progress report - Event3 = { - info_report, - erlang:group_leader(), - { - Pid, - progress, - [a, {thing, boop}, here] - } - }, - ?assertMatch( - #log_entry{ - level = info, - pid = Pid, - msg = "PROGRESS REPORT a, thing: boop, here" - }, - do_format(Event3) - ). - - -log_unknown_event_test() -> - Pid = self(), - ?assertMatch( - #log_entry{ - level = warning, - pid = Pid, - msg = "Unexpected error_logger event an_unknown_event" - }, - do_format(an_unknown_event) - ). - - -format_reason_test_() -> - Cases = [ - { - {'function not exported', [{a, b, 2}, {c, d, 1}, {e, f, 2}]}, - "call to unexported function a:b/2 at c:d/1 <= e:f/2" - }, - { - {'function not exported', [{a, b, 2, []}, {c, d, 1}, {e, f, 2}]}, - "call to unexported function a:b/2 at c:d/1 <= e:f/2" - }, - { - {undef, [{a, b, 2, []}, {c, d, 1}, {e, f, 2}]}, - "call to undefined function a:b/2 at c:d/1 <= e:f/2" - }, - { - {bad_return, {{a, b, 2}, {'EXIT', killed}}}, - "bad return value {'EXIT',killed} from a:b/2" - }, - { - {bad_return_value, foo}, - "bad return value foo" - }, - { - {{bad_return_value, foo}, {h, i, 0}}, - "bad return value foo at h:i/0" - }, - { - {{badrecord, {foo, 1, 4}}, [{h, i, 0}, {j, k, [a, b]}]}, - "bad record {foo,1,4} at h:i/0 <= j:k/2" - }, - { - {{case_clause, bingo}, [{j, k, 3}, {z, z, 0}]}, - "no case clause matching bingo at j:k/3 <= z:z/0" - }, - { - {function_clause, [{j, k, [a, 2]}, {y, x, 1}]}, - "no function clause matching j:k(a, 2) at y:x/1" - }, - { - {if_clause, [{j, k, [a, 2]}, {y, x, 1}]}, - "no true branch found while evaluating if expression at j:k/2 <= y:x/1" - }, - { - {{try_clause, bango}, [{j, k, [a, 2]}, {y, x, 1}]}, - "no try clause matching bango at j:k/2 <= y:x/1" - }, - { - {badarith, [{j, k, [a, 2]}, {y, x, 1}]}, - "bad arithmetic expression at j:k/2 <= y:x/1" - }, - { - {{badmatch, bongo}, [{j, k, [a, 2]}, {y, x, 1}]}, - "no match of right hand value bongo at j:k/2 <= y:x/1" - }, - { - {emfile, [{j, k, [a, 2]}, {y, x, 1}]}, - "maximum number of file descriptors exhausted, check ulimit -n; j:k/2 <= y:x/1" - }, - { - {system_limit, [{erlang, open_port, []}, {y, x, 1}]}, - "system limit: maximum number of ports exceeded at y:x/1" - }, - { - {system_limit, [{erlang, spawn, []}, {y, x, 1}]}, - "system limit: maximum number of processes exceeded at y:x/1" - }, - { - {system_limit, [{erlang, spawn_opt, []}, {y, x, 1}]}, - "system limit: maximum number of processes exceeded at y:x/1" - }, - { - {system_limit, [{erlang, list_to_atom, ["foo"]}, {y, x, 1}]}, - "system limit: tried to create an atom larger than 255, or maximum atom count exceeded at y:x/1" - }, - { - {system_limit, [{ets, new, []}, {y, x, 1}]}, - "system limit: maximum number of ETS tables exceeded at y:x/1" - }, - { - {system_limit, [{couch_log, totes_logs, []}, {y, x, 1}]}, - "system limit: couch_log:totes_logs() at y:x/1" - }, - { - {badarg, [{j, k, [a, 2]}, {y, x, 1}]}, - "bad argument in call to j:k(a, 2) at y:x/1" - }, - { - {{badarg, [{j, k, [a, 2]}, {y, x, 1}]}, some_ignored_thing}, - "bad argument in call to j:k(a, 2) at y:x/1" - }, - { - {{badarity, {fun erlang:spawn/1, [a, b]}}, [{y, x, 1}]}, - "function called with wrong arity of 2 instead of 1 at y:x/1" - }, - { - {noproc, [{y, x, 1}]}, - "no such process or port in call to y:x/1" - }, - { - {{badfun, 2}, [{y, x, 1}]}, - "bad function 2 called at y:x/1" - }, - { - {a_reason, [{y, x, 1}]}, - "a_reason at y:x/1" - }, - { - {a_reason, [{y, x, 1, [{line, 4}]}]}, - "a_reason at y:x/1(line:4)" - } - ], - [ - {Msg, fun() -> ?assertEqual( - Msg, - lists:flatten(couch_log_formatter:format_reason(Reason)) - ) end} - || {Reason, Msg} <- Cases - ]. - - -coverage_test() -> - % MFA's that aren't - ?assertEqual(["foo"], couch_log_formatter:format_mfa(foo)), - - % Traces with line numbers - Trace = [{x, y, [a], [{line, 4}]}], - ?assertEqual( - "x:y/1(line:4)", - lists:flatten(couch_log_formatter:format_trace(Trace)) - ), - - % Excercising print_silly_list - ?assertMatch( - #log_entry{ - level = error, - msg = "foobar" - }, - do_format({ - error_report, - erlang:group_leader(), - {self(), std_error, "foobar"} - }) - ), - - % Excercising print_silly_list - ?assertMatch( - #log_entry{ - level = error, - msg = "dang" - }, - do_format({ - error_report, - erlang:group_leader(), - {self(), std_error, dang} - }) - ). - - -do_format(Event) -> - E = couch_log_formatter:format(Event), - E#log_entry{ - msg = lists:flatten(E#log_entry.msg), - msg_id = lists:flatten(E#log_entry.msg_id), - time_stamp = lists:flatten(E#log_entry.time_stamp) - }. - - -do_matches(_, []) -> - ok; - -do_matches(#log_entry{msg = Msg} = E, [Pattern | RestPatterns]) -> - case re:run(Msg, Pattern) of - {match, _} -> - ok; - nomatch -> - Err1 = io_lib:format("'~s' does not match '~s'", [Pattern, Msg]), - Err2 = lists:flatten(Err1), - ?assertEqual(nomatch, Err2) - end, - do_matches(E, RestPatterns). diff --git a/src/couch_log/test/couch_log_monitor_test.erl b/src/couch_log/test/couch_log_monitor_test.erl deleted file mode 100644 index eec008522..000000000 --- a/src/couch_log/test/couch_log_monitor_test.erl +++ /dev/null @@ -1,67 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_log_monitor_test). - - --include_lib("eunit/include/eunit.hrl"). - - --define(HANDLER, couch_log_error_logger_h). - - -couch_log_monitor_test_() -> - {setup, - fun couch_log_test_util:start/0, - fun couch_log_test_util:stop/1, - [ - fun monitor_ignores_unknown_messages/0, - fun monitor_restarts_handler/0, - fun coverage_test/0 - ] - }. - - -monitor_ignores_unknown_messages() -> - Pid1 = get_monitor_pid(), - - ?assertEqual(ignored, gen_server:call(Pid1, do_foo_please)), - - gen_server:cast(Pid1, do_bar_please), - Pid1 ! do_baz_please, - timer:sleep(250), - ?assert(is_process_alive(Pid1)). - - -monitor_restarts_handler() -> - Pid1 = get_monitor_pid(), - error_logger:delete_report_handler(?HANDLER), - timer:sleep(250), - - ?assert(not is_process_alive(Pid1)), - - Pid2 = get_monitor_pid(), - ?assert(is_process_alive(Pid2)), - - Handlers = gen_event:which_handlers(error_logger), - ?assert(lists:member(?HANDLER, Handlers)). - - -coverage_test() -> - Resp = couch_log_monitor:code_change(foo, bazinga, baz), - ?assertEqual({ok, bazinga}, Resp). - - -get_monitor_pid() -> - Children = supervisor:which_children(couch_log_sup), - [MonPid] = [Pid || {couch_log_monitor, Pid, _, _} <- Children, is_pid(Pid)], - MonPid. diff --git a/src/couch_log/test/couch_log_server_test.erl b/src/couch_log/test/couch_log_server_test.erl deleted file mode 100644 index 7af570e90..000000000 --- a/src/couch_log/test/couch_log_server_test.erl +++ /dev/null @@ -1,118 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_log_server_test). - - --include("couch_log.hrl"). --include_lib("eunit/include/eunit.hrl"). - - -couch_log_server_test_() -> - {setup, - fun couch_log_test_util:start/0, - fun couch_log_test_util:stop/1, - [ - fun check_can_reconfigure/0, - fun check_can_restart/0, - fun check_can_cast_log_entry/0, - fun check_logs_ignored_messages/0 - ] - }. - - -check_can_reconfigure() -> - couch_log:error("a message", []), - ?assertEqual(0, couch_log_test_util:last_log_key()), - ?assertEqual(ok, couch_log_server:reconfigure()), - ?assertEqual('$end_of_table', couch_log_test_util:last_log_key()), - - couch_log_test_util:with_config_listener(fun() -> - couch_log:error("another message", []), - ?assertEqual(0, couch_log_test_util:last_log_key()), - config:set("log", "some_key", "some_val"), - couch_log_test_util:wait_for_config(), - ?assertEqual('$end_of_table', couch_log_test_util:last_log_key()) - end). - - -check_can_restart() -> - Pid1 = whereis(couch_log_server), - Ref = erlang:monitor(process, Pid1), - ?assert(is_process_alive(Pid1)), - - supervisor:terminate_child(couch_log_sup, couch_log_server), - supervisor:restart_child(couch_log_sup, couch_log_server), - - receive - {'DOWN', Ref, _, _, _} -> ok - after 1000 -> - erlang:error(timeout_restarting_couch_log_server) - end, - - ?assert(not is_process_alive(Pid1)), - - Pid2 = whereis(couch_log_server), - ?assertNotEqual(Pid2, Pid1), - ?assert(is_process_alive(Pid2)). - - -check_can_cast_log_entry() -> - Entry = #log_entry{ - level = critical, - pid = self(), - msg = "this will be casted", - msg_id = "----", - time_stamp = "2016-07-20-almost-my-birthday" - }, - ok = gen_server:cast(couch_log_server, {log, Entry}), - timer:sleep(500), % totes gross - ?assertEqual(Entry, couch_log_test_util:last_log()). - - -check_logs_ignored_messages() -> - gen_server:call(couch_log_server, a_call), - ?assertMatch( - #log_entry{ - level = error, - pid = couch_log_server, - msg = "couch_log_server ignored a_call" - }, - couch_log_test_util:last_log() - ), - - gen_server:cast(couch_log_server, a_cast), - timer:sleep(500), % yes gross - ?assertMatch( - #log_entry{ - level = error, - pid = couch_log_server, - msg = "couch_log_server ignored a_cast" - }, - couch_log_test_util:last_log() - ), - - couch_log_server ! an_info, - timer:sleep(500), % still gross - ?assertMatch( - #log_entry{ - level = error, - pid = couch_log_server, - msg = "couch_log_server ignored an_info" - }, - couch_log_test_util:last_log() - ). - - -coverage_test() -> - Resp = couch_log_server:code_change(foo, bazinga, baz), - ?assertEqual({ok, bazinga}, Resp). diff --git a/src/couch_log/test/couch_log_test.erl b/src/couch_log/test/couch_log_test.erl deleted file mode 100644 index c7195f65f..000000000 --- a/src/couch_log/test/couch_log_test.erl +++ /dev/null @@ -1,84 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_log_test). - - --include_lib("couch_log/include/couch_log.hrl"). --include_lib("eunit/include/eunit.hrl"). - - -couch_log_test_() -> - {setup, - fun couch_log_test_util:start/0, - fun couch_log_test_util:stop/1, - gen() ++ [fun check_set_level/0] - }. - - -check_set_level() -> - couch_log:set_level(crit), - ?assertEqual("crit", config:get("log", "level")). - - -levels() -> - [ - debug, - info, - notice, - warning, - error, - critical, - alert, - emergency, - none - ]. - - -gen() -> - lists:map(fun(L) -> - Name = "Test log level: " ++ couch_log_util:level_to_string(L), - {Name, fun() -> check_levels(L, levels()) end} - end, levels() -- [none]). - - -check_levels(_, []) -> - ok; - -check_levels(TestLevel, [CfgLevel | RestLevels]) -> - TestInt = couch_log_util:level_to_integer(TestLevel), - CfgInt = couch_log_util:level_to_integer(CfgLevel), - Pid = self(), - Msg = new_msg(), - LastKey = couch_log_test_util:last_log_key(), - couch_log_test_util:with_level(CfgLevel, fun() -> - couch_log:TestLevel(Msg, []), - case TestInt >= CfgInt of - true -> - ?assertMatch( - #log_entry{ - level = TestLevel, - pid = Pid, - msg = Msg - }, - couch_log_test_util:last_log() - ); - false -> - ?assertEqual(LastKey, couch_log_test_util:last_log_key()) - end - end), - check_levels(TestLevel, RestLevels). - - -new_msg() -> - Bin = list_to_binary([couch_rand:uniform(255) || _ <- lists:seq(1, 16)]), - couch_util:to_hex(Bin). diff --git a/src/couch_log/test/couch_log_test_util.erl b/src/couch_log/test/couch_log_test_util.erl deleted file mode 100644 index 00f3981fc..000000000 --- a/src/couch_log/test/couch_log_test_util.erl +++ /dev/null @@ -1,168 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_log_test_util). - --export([ - start/0, - stop/1, - last_log/0, - last_log_key/0, - wait_for_config/0, - with_config_listener/1, - with_level/2, - with_meck/2 -]). - --include("couch_log.hrl"). - - -start() -> - remove_error_loggers(), - application:set_env(config, ini_files, config_files()), - application:start(config), - ignore_common_loggers(), - application:start(couch_log), - meck:new(couch_stats), - ok = meck:expect(couch_stats, increment_counter, ['_'], ok). - - -stop(_) -> - application:stop(config), - application:stop(couch_log), - meck:unload(couch_stats). - - -with_level(Name, Fun) -> - with_config_listener(fun() -> - try - LevelStr = couch_log_util:level_to_string(Name), - config:set("log", "level", LevelStr, false), - wait_for_config(), - Fun() - after - config:delete("log", "level", false) - end - end). - - -with_config_listener(Fun) -> - Listener = self(), - try - add_listener(Listener), - Fun() - after - rem_listener(Listener) - end. - - -wait_for_config() -> - receive - couch_log_config_change_finished -> ok - after 1000 -> - erlang:error(config_change_timeout) - end. - - -with_meck(Mods, Fun) -> - lists:foreach(fun(M) -> - case M of - {Name, Opts} -> meck:new(Name, Opts); - Name -> meck:new(Name) - end - end, Mods), - try - Fun() - after - lists:foreach(fun(M) -> - case M of - {Name, _} -> meck:unload(Name); - Name -> meck:unload(Name) - end - end, Mods) - end. - - -ignore_common_loggers() -> - IgnoreSet = [ - application_controller, - config, - config_event - ], - lists:foreach(fun(Proc) -> - disable_logs_from(Proc) - end, IgnoreSet). - - -disable_logs_from(Pid) when is_pid(Pid) -> - Ignored = case application:get_env(couch_log, ignored_pids) of - {ok, L} when is_list(L) -> - lists:usort([Pid | L]); - _E -> - [Pid] - end, - IgnoredAlive = [P || P <- Ignored, is_process_alive(P)], - application:set_env(couch_log, ignored_pids, IgnoredAlive); - -disable_logs_from(Name) when is_atom(Name) -> - case whereis(Name) of - P when is_pid(P) -> - disable_logs_from(P); - undefined -> - erlang:error({unknown_pid_name, Name}) - end. - - -last_log_key() -> - ets:last(?COUCH_LOG_TEST_TABLE). - - -last_log() -> - [{_, Entry}] = ets:lookup(?COUCH_LOG_TEST_TABLE, last_log_key()), - Entry. - - -remove_error_loggers() -> - ErrorLoggerPid = whereis(error_logger), - if ErrorLoggerPid == undefined -> ok; true -> - lists:foreach(fun(Handler) -> - error_logger:delete_report_handler(Handler) - end, gen_event:which_handlers(ErrorLoggerPid)) - end. - - -config_files() -> - Path = filename:dirname(code:which(?MODULE)), - Name = filename:join(Path, "couch_log_test.ini"), - ok = file:write_file(Name, "[log]\nwriter = ets\n"), - [Name]. - - -add_listener(Listener) -> - Listeners = case application:get_env(couch_log, config_listeners) of - {ok, L} when is_list(L) -> - lists:usort([Listener | L]); - _ -> - [Listener] - end, - application:set_env(couch_log, config_listeners, Listeners). - - -rem_listener(Listener) -> - Listeners = case application:get_env(couch_lig, config_listeners) of - {ok, L} when is_list(L) -> - L -- [Listener]; - _ -> - [] - end, - application:set_env(couch_log, config_listeners, Listeners). - diff --git a/src/couch_log/test/couch_log_trunc_io_fmt_test.erl b/src/couch_log/test/couch_log_trunc_io_fmt_test.erl deleted file mode 100644 index 77d555440..000000000 --- a/src/couch_log/test/couch_log_trunc_io_fmt_test.erl +++ /dev/null @@ -1,92 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_log_trunc_io_fmt_test). - - --include_lib("eunit/include/eunit.hrl"). - - -format_test_() -> - lists:map(fun({Fmt, Args, Expect}) -> - Name = io_lib:format("~p", [Expect]), - {lists:flatten(Name), - ?_assertEqual( - Expect, - lists:flatten(couch_log_trunc_io_fmt:format(Fmt, Args, 1024)) - ) - } - end, cases()). - - - -chomp_test() -> - R1 = couch_log_trunc_io_fmt:format("\n", [], 1024, [{chomp, true}]), - ?assertEqual("", lists:flatten(R1)), - R2 = couch_log_trunc_io_fmt:format("~n", [], 1024, [{chomp, true}]), - ?assertEqual("", lists:flatten(R2)). - - -cases() -> - [ - {"", [], ""}, - {"stuff\n\t", [], "stuff\n\t"}, - {"~w", [foo], "foo"}, - {"~p", [bar], "bar"}, - {"~W", [{{{2}}}, 2], "{{...}}"}, - {"~P", [{{{ohai}}}, 1], "{...}"}, - {"~s", [[$s, [$t, [$u, [$f, [$f]]]]]], "stuff"}, - {"~4s", ["stuff"], "stuf"}, - {"~8s", ["stuff"], " stuff"}, - {"~.8s", ["stuff"], "stuff "}, - {"~10.4s", ["stuff"], " stuf"}, - {"~10.6s", ["stuff"], " stuff "}, - {"~10.5s", ["stuff"], " stuff"}, - {"~10.10s", ["stuff"], " stuff"}, - {"~r", [{reason, [{x, k, [c, d]}]}], "reason at x:k/2"}, - {"~e", [1.0], "1.00000e+0"}, - {"~f", [1.0], "1.000000"}, - {"~f", [0.000323], "0.000323"}, - {"~f", [31.4], "31.400000"}, - {"~f", [-2.3], "-2.300000"}, - {"~g", [1.0], "1.00000"}, - {"~b", [-15], "-15"}, - {"~b", [15], "15"}, - {"~B", [15], "15"}, - {"~.16b", [15], "f"}, - {"~.16B", [15], "F"}, - {"~.16b", [-15], "-f"}, - {"~.16B", [-15], "-F"}, - {"~.16x", [15, "16#"], "16#f"}, - {"~.16x", [15, '16#'], "16#f"}, - {"~.16x", [-15, "16#"], "-16#f"}, - {"~.16X", [15, "16#"], "16#F"}, - {"~.16X", [15, '16#'], "16#F"}, - {"~.16X", [-15, "16#"], "-16#F"}, - {"~.16#", [15], "16#F"}, - {"~.16+", [15], "16#f"}, - {"~c", [$z], "z"}, - {"~tc", [$g], "g"}, - {"~~", [], "\~"}, - {"~n", [], "\n"}, - {"~2n", [], "\n\n"}, - {"~3n", [], "\n\n\n"}, - {"~i", [ignored], ""}, - {"~2.w", [1], " 1"}, - {"~*w", [2, 1], " 1"}, - {"~-2.w", [1], "1 "}, - {"~2.0. w", [1], " "}, - {"~2.1. w", [1], " 1"}, - {"~2.0.|w", [1], "||"}, - {"~2.1.|w", [1], "|1"}, - {"~2.1.*w", [$q, 1], "q1"} - ]. diff --git a/src/couch_log/test/couch_log_util_test.erl b/src/couch_log/test/couch_log_util_test.erl deleted file mode 100644 index e97911aa9..000000000 --- a/src/couch_log/test/couch_log_util_test.erl +++ /dev/null @@ -1,55 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_log_util_test). - - --include_lib("couch_log/include/couch_log.hrl"). --include_lib("eunit/include/eunit.hrl"). - - -get_message_id_test() -> - ?assertEqual("--------", couch_log_util:get_msg_id()), - erlang:put(nonce, "deadbeef"), - ?assertEqual("deadbeef", couch_log_util:get_msg_id()), - erlang:put(nonce, undefined). - - -level_to_atom_test() -> - lists:foreach(fun(L) -> - ?assert(is_atom(couch_log_util:level_to_atom(L))), - ?assert(is_integer(couch_log_util:level_to_integer(L))), - ?assert(is_list(couch_log_util:level_to_string(L))) - end, levels()). - - -string_p_test() -> - ?assertEqual(false, couch_log_util:string_p([])), - ?assertEqual(false, couch_log_util:string_p([[false]])), - ?assertEqual(true, couch_log_util:string_p([$\n])), - ?assertEqual(true, couch_log_util:string_p([$\r])), - ?assertEqual(true, couch_log_util:string_p([$\t])), - ?assertEqual(true, couch_log_util:string_p([$\v])), - ?assertEqual(true, couch_log_util:string_p([$\b])), - ?assertEqual(true, couch_log_util:string_p([$\f])), - ?assertEqual(true, couch_log_util:string_p([$\e])). - - -levels() -> - [ - 1, 2, 3, 4, 5, 6, 7, 8, 9, - "1", "2", "3", "4", "5", "6", "7", "8", "9", - debug, info, notice, warning, warn, error, err, - critical, crit, alert, emergency, emerg, none, - "debug", "info", "notice", "warning", "warn", "error", "err", - "critical", "crit", "alert", "emergency", "emerg", "none" - ]. diff --git a/src/couch_log/test/couch_log_writer_ets.erl b/src/couch_log/test/couch_log_writer_ets.erl deleted file mode 100644 index d5fd327ac..000000000 --- a/src/couch_log/test/couch_log_writer_ets.erl +++ /dev/null @@ -1,49 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_log_writer_ets). --behaviour(couch_log_writer). - - --export([ - init/0, - terminate/2, - write/2 -]). - - --include("couch_log.hrl"). - - -init() -> - ets:new(?COUCH_LOG_TEST_TABLE, [named_table, public, ordered_set]), - {ok, 0}. - - -terminate(_, _St) -> - ets:delete(?COUCH_LOG_TEST_TABLE), - ok. - - -write(Entry0, St) -> - Entry = Entry0#log_entry{ - msg = lists:flatten(Entry0#log_entry.msg), - time_stamp = lists:flatten(Entry0#log_entry.time_stamp) - }, - Ignored = application:get_env(couch_log, ignored_pids, []), - case lists:member(Entry#log_entry.pid, Ignored) of - true -> - {ok, St}; - false -> - ets:insert(?COUCH_LOG_TEST_TABLE, {St, Entry}), - {ok, St + 1} - end. diff --git a/src/couch_log/test/couch_log_writer_file_test.erl b/src/couch_log/test/couch_log_writer_file_test.erl deleted file mode 100644 index ba042610a..000000000 --- a/src/couch_log/test/couch_log_writer_file_test.erl +++ /dev/null @@ -1,169 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_log_writer_file_test). - - --include_lib("kernel/include/file.hrl"). --include_lib("couch_log/include/couch_log.hrl"). --include_lib("eunit/include/eunit.hrl"). - - --define(WRITER, couch_log_writer_file). - - -couch_log_writer_file_test_() -> - {setup, - fun couch_log_test_util:start/0, - fun couch_log_test_util:stop/1, - [ - fun check_init_terminate/0, - fun() -> - couch_log_test_util:with_meck( - [{filelib, [unstick]}], - fun check_ensure_dir_fail/0 - ) - end, - fun() -> - couch_log_test_util:with_meck( - [{file, [unstick, passthrough]}], - fun check_open_fail/0 - ) - end, - fun() -> - couch_log_test_util:with_meck( - [{file, [unstick, passthrough]}], - fun check_read_file_info_fail/0 - ) - end, - fun check_file_write/0, - fun check_buffered_file_write/0, - fun check_reopen/0 - ] - }. - - -check_init_terminate() -> - {ok, St} = ?WRITER:init(), - ok = ?WRITER:terminate(stop, St). - - -check_ensure_dir_fail() -> - meck:expect(filelib, ensure_dir, 1, {error, eperm}), - ?assertEqual({error, eperm}, ?WRITER:init()), - ?assert(meck:called(filelib, ensure_dir, 1)), - ?assert(meck:validate(filelib)). - - -check_open_fail() -> - meck:expect(file, open, 2, {error, enotfound}), - ?assertEqual({error, enotfound}, ?WRITER:init()), - ?assert(meck:called(file, open, 2)), - ?assert(meck:validate(file)). - - -check_read_file_info_fail() -> - RFI = fun - ("./couch.log") -> {error, enoent}; - (Path) -> meck:passthrough([Path]) - end, - meck:expect(file, read_file_info, RFI), - ?assertEqual({error, enoent}, ?WRITER:init()), - ?assert(meck:called(file, read_file_info, 1)), - ?assert(meck:validate(file)). - - -check_file_write() -> - % Make sure we have an empty log for this test - IsFile = filelib:is_file("./couch.log"), - if not IsFile -> ok; true -> - file:delete("./couch.log") - end, - - Entry = #log_entry{ - level = info, - pid = list_to_pid("<0.1.0>"), - msg = "stuff", - msg_id = "msg_id", - time_stamp = "time_stamp" - }, - {ok, St} = ?WRITER:init(), - {ok, NewSt} = ?WRITER:write(Entry, St), - ok = ?WRITER:terminate(stop, NewSt), - - {ok, Data} = file:read_file("./couch.log"), - Expect = <<"[info] time_stamp nonode@nohost <0.1.0> msg_id stuff\n">>, - ?assertEqual(Expect, Data). - - -check_buffered_file_write() -> - % Make sure we have an empty log for this test - IsFile = filelib:is_file("./couch.log"), - if not IsFile -> ok; true -> - file:delete("./couch.log") - end, - - config:set("log", "write_buffer", "1024"), - config:set("log", "write_delay", "10"), - - try - Entry = #log_entry{ - level = info, - pid = list_to_pid("<0.1.0>"), - msg = "stuff", - msg_id = "msg_id", - time_stamp = "time_stamp" - }, - {ok, St} = ?WRITER:init(), - {ok, NewSt} = ?WRITER:write(Entry, St), - ok = ?WRITER:terminate(stop, NewSt) - after - config:delete("log", "write_buffer"), - config:delete("log", "write_delay") - end, - - {ok, Data} = file:read_file("./couch.log"), - Expect = <<"[info] time_stamp nonode@nohost <0.1.0> msg_id stuff\n">>, - ?assertEqual(Expect, Data). - - -check_reopen() -> - {ok, St1} = clear_clock(?WRITER:init()), - {ok, St2} = clear_clock(couch_log_writer_file:maybe_reopen(St1)), - ?assertEqual(St1, St2), - - case os:type() of - {win32, _} -> - % Windows file handling doesn't work the same - % as Unix where you can move or delete an open - % file so these tests make no sense there. - yay_we_pass; - _ -> - % Delete file - file:delete("./couch.log"), - {ok, St3} = clear_clock(couch_log_writer_file:maybe_reopen(St2)), - ?assert(element(3, St3) /= element(3, St2)), - - % Recreate file - file:delete("./couch.log"), - file:write_file("./couch.log", ""), - {ok, St4} = clear_clock(couch_log_writer_file:maybe_reopen(St3)), - ?assert(element(3, St4) /= element(3, St2)) - end. - - -clear_clock({ok, St}) -> - {ok, clear_clock(St)}; - -clear_clock(St) -> - {st, Path, Fd, INode, _} = St, - {st, Path, Fd, INode, {0, 0, 0}}. diff --git a/src/couch_log/test/couch_log_writer_stderr_test.erl b/src/couch_log/test/couch_log_writer_stderr_test.erl deleted file mode 100644 index 1e99263dd..000000000 --- a/src/couch_log/test/couch_log_writer_stderr_test.erl +++ /dev/null @@ -1,58 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_log_writer_stderr_test). - - --include_lib("couch_log/include/couch_log.hrl"). --include_lib("eunit/include/eunit.hrl"). - - --define(WRITER, couch_log_writer_stderr). - - -couch_log_writer_stderr_test_() -> - {setup, - fun couch_log_test_util:start/0, - fun couch_log_test_util:stop/1, - [ - fun check_init_terminate/0, - fun() -> - couch_log_test_util:with_meck( - [{io, [unstick]}], - fun check_write/0 - ) - end - ] - }. - - -check_init_terminate() -> - {ok, St} = ?WRITER:init(), - ok = ?WRITER:terminate(stop, St). - - -check_write() -> - meck:expect(io, format, 3, ok), - - Entry = #log_entry{ - level = debug, - pid = list_to_pid("<0.1.0>"), - msg = "stuff", - msg_id = "msg_id", - time_stamp = "time_stamp" - }, - {ok, St} = ?WRITER:init(), - {ok, NewSt} = ?WRITER:write(Entry, St), - ok = ?WRITER:terminate(stop, NewSt), - - ?assert(meck:validate(io)). diff --git a/src/couch_log/test/couch_log_writer_syslog_test.erl b/src/couch_log/test/couch_log_writer_syslog_test.erl deleted file mode 100644 index c32b5c6bf..000000000 --- a/src/couch_log/test/couch_log_writer_syslog_test.erl +++ /dev/null @@ -1,122 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_log_writer_syslog_test). - - --include_lib("couch_log/include/couch_log.hrl"). --include_lib("eunit/include/eunit.hrl"). - - --define(WRITER, couch_log_writer_syslog). - - -couch_log_writer_syslog_test_() -> - {setup, - fun couch_log_test_util:start/0, - fun couch_log_test_util:stop/1, - [ - fun check_init_terminate/0, - fun() -> - couch_log_test_util:with_meck( - [{io, [unstick]}], - fun check_stderr_write/0 - ) - end, - fun() -> - couch_log_test_util:with_meck( - [{gen_udp, [unstick]}], - fun check_udp_send/0 - ) - end - ] - }. - - -check_init_terminate() -> - {ok, St} = ?WRITER:init(), - ok = ?WRITER:terminate(stop, St). - - -check_stderr_write() -> - meck:expect(io, format, 3, ok), - - Entry = #log_entry{ - level = debug, - pid = list_to_pid("<0.1.0>"), - msg = "stuff", - msg_id = "msg_id", - time_stamp = "time_stamp" - }, - {ok, St} = ?WRITER:init(), - {ok, NewSt} = ?WRITER:write(Entry, St), - ok = ?WRITER:terminate(stop, NewSt), - - ?assert(meck:called(io, format, 3)), - ?assert(meck:validate(io)). - - -check_udp_send() -> - meck:expect(gen_udp, open, 1, {ok, socket}), - meck:expect(gen_udp, send, 4, ok), - meck:expect(gen_udp, close, fun(socket) -> ok end), - - config:set("log", "syslog_host", "localhost"), - try - Entry = #log_entry{ - level = debug, - pid = list_to_pid("<0.1.0>"), - msg = "stuff", - msg_id = "msg_id", - time_stamp = "time_stamp" - }, - {ok, St} = ?WRITER:init(), - {ok, NewSt} = ?WRITER:write(Entry, St), - ok = ?WRITER:terminate(stop, NewSt) - after - config:delete("log", "syslog_host") - end, - - ?assert(meck:called(gen_udp, open, 1)), - ?assert(meck:called(gen_udp, send, 4)), - ?assert(meck:called(gen_udp, close, 1)), - ?assert(meck:validate(gen_udp)). - - -facility_test() -> - Names = [ - "kern", "user", "mail", "daemon", "auth", "syslog", "lpr", - "news", "uucp", "clock", "authpriv", "ftp", "ntp", "audit", - "alert", "cron", "local0", "local1", "local2", "local3", - "local4", "local5", "local6", "local7" - ], - lists:foldl(fun(Name, Id) -> - IdStr = lists:flatten(io_lib:format("~w", [Id])), - ?assertEqual(Id bsl 3, couch_log_writer_syslog:get_facility(Name)), - ?assertEqual(Id bsl 3, couch_log_writer_syslog:get_facility(IdStr)), - Id + 1 - end, 0, Names), - ?assertEqual(23 bsl 3, couch_log_writer_syslog:get_facility("foo")), - ?assertEqual(23 bsl 3, couch_log_writer_syslog:get_facility("-1")), - ?assertEqual(23 bsl 3, couch_log_writer_syslog:get_facility("24")). - - -level_test() -> - Levels = [ - emergency, alert, critical, error, - warning, notice, info, debug - ], - lists:foldl(fun(Name, Id) -> - ?assertEqual(Id, couch_log_writer_syslog:get_level(Name)), - Id + 1 - end, 0, Levels), - ?assertEqual(3, couch_log_writer_syslog:get_level(foo)). diff --git a/src/couch_log/test/couch_log_writer_test.erl b/src/couch_log/test/couch_log_writer_test.erl deleted file mode 100644 index d0bb347fe..000000000 --- a/src/couch_log/test/couch_log_writer_test.erl +++ /dev/null @@ -1,54 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_log_writer_test). - - --include_lib("couch_log/include/couch_log.hrl"). --include_lib("eunit/include/eunit.hrl"). - - -couch_log_writer_test_() -> - {setup, - fun couch_log_test_util:start/0, - fun couch_log_test_util:stop/1, - [ - fun check_writer_change/0 - ] - }. - - -check_writer_change() -> - % Change to file and back - couch_log_test_util:with_config_listener(fun() -> - config:set("log", "writer", "file"), - couch_log_test_util:wait_for_config(), - ?assertEqual(undefined, ets:info(?COUCH_LOG_TEST_TABLE)), - ?assert(is_pid(whereis(couch_log_server))), - - config:set("log", "writer", "couch_log_writer_ets"), - couch_log_test_util:wait_for_config(), - ?assertEqual(0, ets:info(?COUCH_LOG_TEST_TABLE, size)) - end), - - % Using a bad setting doesn't break things - couch_log_test_util:with_config_listener(fun() -> - config:set("log", "writer", "hopefully not an atom or module"), - couch_log_test_util:wait_for_config(), - ?assertEqual(undefined, ets:info(?COUCH_LOG_TEST_TABLE)), - ?assert(is_pid(whereis(couch_log_server))), - - config:set("log", "writer", "couch_log_writer_ets"), - couch_log_test_util:wait_for_config(), - ?assertEqual(0, ets:info(?COUCH_LOG_TEST_TABLE, size)) - end). - diff --git a/src/couch_log/test/eunit/couch_log_config_listener_test.erl b/src/couch_log/test/eunit/couch_log_config_listener_test.erl new file mode 100644 index 000000000..07abae1ff --- /dev/null +++ b/src/couch_log/test/eunit/couch_log_config_listener_test.erl @@ -0,0 +1,82 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_log_config_listener_test). + + +-include_lib("couch_log/include/couch_log.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-define(TIMEOUT, 1000). + +couch_log_config_test_() -> + {setup, + fun couch_log_test_util:start/0, + fun couch_log_test_util:stop/1, + [ + fun check_restart_listener/0, + fun check_ignore_non_log/0 + ] + }. + + +check_restart_listener() -> + Listener1 = get_listener(), + ?assert(is_process_alive(Listener1)), + + Handler1 = get_handler(), + ?assertNotEqual(not_found, Handler1), + Ref = erlang:monitor(process, Listener1), + ok = gen_event:delete_handler(config_event, get_handler(), testing), + + receive + {'DOWN', Ref, process, _, _} -> + ?assertNot(is_process_alive(Listener1)) + after ?TIMEOUT -> + erlang:error({timeout, config_listener_mon_death}) + end, + + NewHandler = test_util:wait(fun() -> + case get_handler() of + not_found -> wait; + Reply -> Reply + end + end, ?TIMEOUT, 20), + ?assertEqual(Handler1, NewHandler), + + Listener2 = get_listener(), + ?assert(is_process_alive(Listener2)), + ?assertNotEqual(Listener1, Listener2), + ok. + +check_ignore_non_log() -> + Run = fun() -> + couch_log_test_util:with_config_listener(fun() -> + config:set("foo", "bar", "baz"), + couch_log_test_util:wait_for_config() + end) + end, + ?assertError(config_change_timeout, Run()). + + +get_handler() -> + FoldFun = fun + ({config_listener, {couch_log_sup, _}} = H, not_found) -> + H; + (_, Acc) -> + Acc + end, + lists:foldl(FoldFun, not_found, gen_event:which_handlers(config_event)). + +get_listener() -> + Children = supervisor:which_children(couch_log_sup), + hd([Pid || {config_listener_mon, Pid, _, _} <- Children]). diff --git a/src/couch_log/test/eunit/couch_log_config_test.erl b/src/couch_log/test/eunit/couch_log_config_test.erl new file mode 100644 index 000000000..c4677f37f --- /dev/null +++ b/src/couch_log/test/eunit/couch_log_config_test.erl @@ -0,0 +1,110 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_log_config_test). + + +-include_lib("couch_log/include/couch_log.hrl"). +-include_lib("eunit/include/eunit.hrl"). + + +couch_log_config_test_() -> + {setup, + fun couch_log_test_util:start/0, + fun couch_log_test_util:stop/1, + [ + fun check_level/0, + fun check_max_message_size/0, + fun check_bad_level/0, + fun check_bad_max_message_size/0 + ] + }. + + +check_level() -> + % Default level is info + ?assertEqual(info, couch_log_config:get(level)), + ?assertEqual(2, couch_log_config:get(level_int)), + + couch_log_test_util:with_config_listener(fun() -> + config:set("log", "level", "emerg"), + couch_log_test_util:wait_for_config(), + ?assertEqual(emergency, couch_log_config:get(level)), + ?assertEqual(8, couch_log_config:get(level_int)), + + config:set("log", "level", "debug"), + couch_log_test_util:wait_for_config(), + ?assertEqual(debug, couch_log_config:get(level)), + ?assertEqual(1, couch_log_config:get(level_int)), + + config:delete("log", "level"), + couch_log_test_util:wait_for_config(), + ?assertEqual(info, couch_log_config:get(level)), + ?assertEqual(2, couch_log_config:get(level_int)) + end). + + +check_max_message_size() -> + % Default is 16000 + ?assertEqual(16000, couch_log_config:get(max_message_size)), + + couch_log_test_util:with_config_listener(fun() -> + config:set("log", "max_message_size", "1024"), + couch_log_test_util:wait_for_config(), + ?assertEqual(1024, couch_log_config:get(max_message_size)), + + config:delete("log", "max_message_size"), + couch_log_test_util:wait_for_config(), + ?assertEqual(16000, couch_log_config:get(max_message_size)) + end). + + +check_bad_level() -> + % Default level is info + ?assertEqual(info, couch_log_config:get(level)), + ?assertEqual(2, couch_log_config:get(level_int)), + + couch_log_test_util:with_config_listener(fun() -> + config:set("log", "level", "debug"), + couch_log_test_util:wait_for_config(), + ?assertEqual(debug, couch_log_config:get(level)), + ?assertEqual(1, couch_log_config:get(level_int)), + + config:set("log", "level", "this is not a valid level name"), + couch_log_test_util:wait_for_config(), + ?assertEqual(info, couch_log_config:get(level)), + ?assertEqual(2, couch_log_config:get(level_int)), + + config:delete("log", "level"), + couch_log_test_util:wait_for_config(), + ?assertEqual(info, couch_log_config:get(level)), + ?assertEqual(2, couch_log_config:get(level_int)) + end). + + +check_bad_max_message_size() -> + % Default level is 16000 + ?assertEqual(16000, couch_log_config:get(max_message_size)), + + couch_log_test_util:with_config_listener(fun() -> + config:set("log", "max_message_size", "1024"), + couch_log_test_util:wait_for_config(), + ?assertEqual(1024, couch_log_config:get(max_message_size)), + + config:set("log", "max_message_size", "this is not a valid size"), + couch_log_test_util:wait_for_config(), + ?assertEqual(16000, couch_log_config:get(max_message_size)), + + config:delete("log", "max_message_size"), + couch_log_test_util:wait_for_config(), + ?assertEqual(16000, couch_log_config:get(max_message_size)) + end). diff --git a/src/couch_log/test/eunit/couch_log_error_logger_h_test.erl b/src/couch_log/test/eunit/couch_log_error_logger_h_test.erl new file mode 100644 index 000000000..b78598fa4 --- /dev/null +++ b/src/couch_log/test/eunit/couch_log_error_logger_h_test.erl @@ -0,0 +1,45 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_log_error_logger_h_test). + + +-include_lib("eunit/include/eunit.hrl"). + + +-define(HANDLER, couch_log_error_logger_h). + + +couch_log_error_logger_h_test_() -> + {setup, + fun couch_log_test_util:start/0, + fun couch_log_test_util:stop/1, + [ + fun handler_ignores_unknown_messages/0, + fun coverage_test/0 + ] + }. + + +handler_ignores_unknown_messages() -> + Handlers1 = gen_event:which_handlers(error_logger), + ?assert(lists:member(?HANDLER, Handlers1)), + ?assertEqual(ignored, gen_event:call(error_logger, ?HANDLER, foo)), + + error_logger ! this_is_a_message, + Handlers2 = gen_event:which_handlers(error_logger), + ?assert(lists:member(?HANDLER, Handlers2)). + + +coverage_test() -> + Resp = couch_log_error_logger_h:code_change(foo, bazinga, baz), + ?assertEqual({ok, bazinga}, Resp). diff --git a/src/couch_log/test/eunit/couch_log_formatter_test.erl b/src/couch_log/test/eunit/couch_log_formatter_test.erl new file mode 100644 index 000000000..795efcf29 --- /dev/null +++ b/src/couch_log/test/eunit/couch_log_formatter_test.erl @@ -0,0 +1,875 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_log_formatter_test). + + +-include("couch_log.hrl"). +-include_lib("eunit/include/eunit.hrl"). + + +truncate_fmt_test() -> + Msg = [0 || _ <- lists:seq(1, 1048576)], + Entry = couch_log_formatter:format(info, self(), "~w", [Msg]), + ?assert(length(Entry#log_entry.msg) =< 16000). + + +truncate_test() -> + Msg = [0 || _ <- lists:seq(1, 1048576)], + Entry = couch_log_formatter:format(info, self(), Msg), + ?assert(length(Entry#log_entry.msg) =< 16000). + + +format_reason_test() -> + MsgFmt = "This is a reason: ~r", + Reason = {foo, [{x, k, 3}, {c, d, 2}]}, + Entry = couch_log_formatter:format(info, self(), MsgFmt, [Reason]), + Formatted = "This is a reason: foo at x:k/3 <= c:d/2", + ?assertEqual(Formatted, lists:flatten(Entry#log_entry.msg)). + + +crashing_formatting_test() -> + Pid = self(), + Event = { + error, + erlang:group_leader(), + { + Pid, + "** Generic server and some stuff", + [a_gen_server, {foo, bar}, server_state] % not enough args! + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = Pid + }, + do_format(Event) + ), + do_matches(do_format(Event), [ + "Encountered error {error,{badmatch" + ]). + + +gen_server_error_test() -> + Pid = self(), + Event = { + error, + erlang:group_leader(), + { + Pid, + "** Generic server and some stuff", + [a_gen_server, {foo, bar}, server_state, some_reason] + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = Pid + }, + do_format(Event) + ), + do_matches(do_format(Event), [ + "gen_server a_gen_server terminated", + "with reason: some_reason", + "last msg: {foo,bar}", + "state: server_state", + "extra: \\[\\]" + ]). + + +gen_server_error_with_extra_args_test() -> + Pid = self(), + Event = { + error, + erlang:group_leader(), + { + Pid, + "** Generic server and some stuff", + [a_gen_server, {foo, bar}, server_state, some_reason, sad, args] + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = Pid + }, + do_format(Event) + ), + do_matches(do_format(Event), [ + "gen_server a_gen_server terminated", + "with reason: some_reason", + "last msg: {foo,bar}", + "state: server_state", + "extra: \\[sad,args\\]" + ]). + + +gen_fsm_error_test() -> + Pid = self(), + Event = { + error, + erlang:group_leader(), + { + Pid, + "** State machine did a thing", + [a_gen_fsm, {ohai,there}, state_name, curr_state, barf] + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = Pid + }, + do_format(Event) + ), + do_matches(do_format(Event), [ + "gen_fsm a_gen_fsm in state state_name", + "with reason: barf", + "last msg: {ohai,there}", + "state: curr_state", + "extra: \\[\\]" + ]). + + +gen_fsm_error_with_extra_args_test() -> + Pid = self(), + Event = { + error, + erlang:group_leader(), + { + Pid, + "** State machine did a thing", + [a_gen_fsm, {ohai,there}, state_name, curr_state, barf, sad, args] + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = Pid + }, + do_format(Event) + ), + do_matches(do_format(Event), [ + "gen_fsm a_gen_fsm in state state_name", + "with reason: barf", + "last msg: {ohai,there}", + "state: curr_state", + "extra: \\[sad,args\\]" + ]). + + +gen_event_error_test() -> + Pid = self(), + Event = { + error, + erlang:group_leader(), + { + Pid, + "** gen_event handler did a thing", + [ + handler_id, + a_gen_event, + {ohai,there}, + curr_state, + barf + ] + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = Pid + }, + do_format(Event) + ), + do_matches(do_format(Event), [ + "gen_event handler_id installed in a_gen_event", + "reason: barf", + "last msg: {ohai,there}", + "state: curr_state" + ]). + + +emulator_error_test() -> + Event = { + error, + erlang:group_leader(), + { + emulator, + "~s~n", + ["A process died and stuff\n"] + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = emulator, + msg = "A process died and stuff" + }, + do_format(Event) + ). + + +normal_error_test() -> + Pid = self(), + Event = { + error, + erlang:group_leader(), + { + Pid, + "format thing: ~w ~w", + [ + first_arg, + second_arg + ] + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = Pid, + msg = "format thing: first_arg second_arg" + }, + do_format(Event) + ). + + +error_report_std_error_test() -> + Pid = self(), + Event = { + error_report, + erlang:group_leader(), + { + Pid, + std_error, + [foo, {bar, baz}] + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = Pid, + msg = "foo, bar: baz" + }, + do_format(Event) + ). + + +supervisor_report_test() -> + Pid = self(), + % A standard supervisor report + Event1 = { + error_report, + erlang:group_leader(), + { + Pid, + supervisor_report, + [ + {supervisor, sup_name}, + {offender, [ + {id, sup_child}, + {pid, list_to_pid("<0.1.0>")}, + {mfargs, {some_mod, some_fun, 3}} + ]}, + {reason, a_reason}, + {errorContext, some_context} + ] + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = Pid + }, + do_format(Event1) + ), + do_matches(do_format(Event1), [ + "Supervisor sup_name", + "had child sup_child started with some_mod:some_fun/3 at <0.1.0> exit", + "with reason a_reason", + "in context some_context" + ]), + % Slightly older using name instead of id + % in the offender blob. + Event2 = { + error_report, + erlang:group_leader(), + { + Pid, + supervisor_report, + [ + {supervisor, sup_name}, + {offender, [ + {name, sup_child}, + {pid, list_to_pid("<0.1.0>")}, + {mfargs, {some_mod, some_fun, 3}} + ]}, + {reason, a_reason}, + {errorContext, some_context} + ] + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = Pid + }, + do_format(Event2) + ), + do_matches(do_format(Event2), [ + "Supervisor sup_name", + "had child sup_child started with some_mod:some_fun/3 at <0.1.0> exit", + "with reason a_reason", + "in context some_context" + ]), + % A supervisor_bridge + Event3 = { + error_report, + erlang:group_leader(), + { + Pid, + supervisor_report, + [ + {supervisor, sup_name}, + {offender, [ + {mod, bridge_mod}, + {pid, list_to_pid("<0.1.0>")} + ]}, + {reason, a_reason}, + {errorContext, some_context} + ] + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = Pid + }, + do_format(Event3) + ), + do_matches(do_format(Event3), [ + "Supervisor sup_name", + "had child at module bridge_mod at <0.1.0> exit", + "with reason a_reason", + "in context some_context" + ]), + % Any other supervisor report + Event4 = { + error_report, + erlang:group_leader(), + { + Pid, + supervisor_report, + [foo, {a, thing}, bang] + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = Pid, + msg = "SUPERVISOR REPORT foo, a: thing, bang" + }, + do_format(Event4) + ). + + +crash_report_test() -> + Pid = self(), + % A standard crash report + Event1 = { + error_report, + erlang:group_leader(), + { + Pid, + crash_report, + [ + [ + {pid, list_to_pid("<0.2.0>")}, + {error_info, { + exit, + undef, + [{mod_name, fun_name, [a, b]}] + }} + ], + [list_to_pid("<0.3.0>"), list_to_pid("<0.4.0>")] + ] + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = Pid + }, + do_format(Event1) + ), + do_matches(do_format(Event1), [ + "Process <0.2.0>", + "with 2 neighbors", + "exited", + "reason: call to undefined function mod_name:fun_name\\(a, b\\)" + ]), + % A registered process crash report + Event2 = { + error_report, + erlang:group_leader(), + { + Pid, + crash_report, + [ + [ + {pid, list_to_pid("<0.2.0>")}, + {registered_name, couch_log_server}, + {error_info, { + exit, + undef, + [{mod_name, fun_name, [a, b]}] + }} + ], + [list_to_pid("<0.3.0>"), list_to_pid("<0.4.0>")] + ] + } + }, + do_matches(do_format(Event2), [ + "Process couch_log_server \\(<0.2.0>\\)" + ]), + % A non-exit crash report + Event3 = { + error_report, + erlang:group_leader(), + { + Pid, + crash_report, + [ + [ + {pid, list_to_pid("<0.2.0>")}, + {registered_name, couch_log_server}, + {error_info, { + killed, + undef, + [{mod_name, fun_name, [a, b]}] + }} + ], + [list_to_pid("<0.3.0>"), list_to_pid("<0.4.0>")] + ] + } + }, + do_matches(do_format(Event3), [ + "crashed" + ]), + % A extra report info + Event4 = { + error_report, + erlang:group_leader(), + { + Pid, + crash_report, + [ + [ + {pid, list_to_pid("<0.2.0>")}, + {error_info, { + killed, + undef, + [{mod_name, fun_name, [a, b]}] + }}, + {another, entry}, + yep + ], + [list_to_pid("<0.3.0>"), list_to_pid("<0.4.0>")] + ] + } + }, + do_matches(do_format(Event4), [ + "; another: entry, yep" + ]). + + +warning_report_test() -> + Pid = self(), + % A warning message + Event1 = { + warning_msg, + erlang:group_leader(), + { + Pid, + "a ~s string ~w", + ["format", 7] + } + }, + ?assertMatch( + #log_entry{ + level = warning, + pid = Pid, + msg = "a format string 7" + }, + do_format(Event1) + ), + % A warning report + Event2 = { + warning_report, + erlang:group_leader(), + { + Pid, + std_warning, + [list, 'of', {things, indeed}] + } + }, + ?assertMatch( + #log_entry{ + level = warning, + pid = Pid, + msg = "list, of, things: indeed" + }, + do_format(Event2) + ). + + +info_report_test() -> + Pid = self(), + % An info message + Event1 = { + info_msg, + erlang:group_leader(), + { + Pid, + "an info ~s string ~w", + ["format", 7] + } + }, + ?assertMatch( + #log_entry{ + level = info, + pid = Pid, + msg = "an info format string 7" + }, + do_format(Event1) + ), + % Application exit info + Event2 = { + info_report, + erlang:group_leader(), + { + Pid, + std_info, + [ + {type, no_idea}, + {application, couch_log}, + {exited, red_sox_are_on} + ] + } + }, + ?assertMatch( + #log_entry{ + level = info, + pid = Pid, + msg = "Application couch_log exited with reason: red_sox_are_on" + }, + do_format(Event2) + ), + % Any other std_info message + Event3 = { + info_report, + erlang:group_leader(), + { + Pid, + std_info, + [ + {type, no_idea}, + {application, couch_log} + ] + } + }, + ?assertMatch( + #log_entry{ + level = info, + pid = Pid, + msg = "type: no_idea, application: couch_log" + }, + do_format(Event3) + ), + % Non-list other report + Event4 = { + info_report, + erlang:group_leader(), + { + Pid, + std_info, + dang + } + }, + ?assertMatch( + #log_entry{ + level = info, + pid = Pid, + msg = "dang" + }, + do_format(Event4) + ). + + +progress_report_test() -> + Pid = self(), + % Application started + Event1 = { + info_report, + erlang:group_leader(), + { + Pid, + progress, + [{started_at, 'nonode@nohost'}, {application, app_name}] + } + }, + ?assertMatch( + #log_entry{ + level = info, + pid = Pid, + msg = "Application app_name started on node nonode@nohost" + }, + do_format(Event1) + ), + % Supervisor started child + Event2 = { + info_report, + erlang:group_leader(), + { + Pid, + progress, + [ + {supervisor, sup_dude}, + {started, [ + {mfargs, {mod_name, fun_name, 1}}, + {pid, list_to_pid("<0.5.0>")} + ]} + ] + } + }, + ?assertMatch( + #log_entry{ + level = debug, + pid = Pid, + msg = "Supervisor sup_dude started mod_name:fun_name/1" + " at pid <0.5.0>" + }, + do_format(Event2) + ), + % Other progress report + Event3 = { + info_report, + erlang:group_leader(), + { + Pid, + progress, + [a, {thing, boop}, here] + } + }, + ?assertMatch( + #log_entry{ + level = info, + pid = Pid, + msg = "PROGRESS REPORT a, thing: boop, here" + }, + do_format(Event3) + ). + + +log_unknown_event_test() -> + Pid = self(), + ?assertMatch( + #log_entry{ + level = warning, + pid = Pid, + msg = "Unexpected error_logger event an_unknown_event" + }, + do_format(an_unknown_event) + ). + + +format_reason_test_() -> + Cases = [ + { + {'function not exported', [{a, b, 2}, {c, d, 1}, {e, f, 2}]}, + "call to unexported function a:b/2 at c:d/1 <= e:f/2" + }, + { + {'function not exported', [{a, b, 2, []}, {c, d, 1}, {e, f, 2}]}, + "call to unexported function a:b/2 at c:d/1 <= e:f/2" + }, + { + {undef, [{a, b, 2, []}, {c, d, 1}, {e, f, 2}]}, + "call to undefined function a:b/2 at c:d/1 <= e:f/2" + }, + { + {bad_return, {{a, b, 2}, {'EXIT', killed}}}, + "bad return value {'EXIT',killed} from a:b/2" + }, + { + {bad_return_value, foo}, + "bad return value foo" + }, + { + {{bad_return_value, foo}, {h, i, 0}}, + "bad return value foo at h:i/0" + }, + { + {{badrecord, {foo, 1, 4}}, [{h, i, 0}, {j, k, [a, b]}]}, + "bad record {foo,1,4} at h:i/0 <= j:k/2" + }, + { + {{case_clause, bingo}, [{j, k, 3}, {z, z, 0}]}, + "no case clause matching bingo at j:k/3 <= z:z/0" + }, + { + {function_clause, [{j, k, [a, 2]}, {y, x, 1}]}, + "no function clause matching j:k(a, 2) at y:x/1" + }, + { + {if_clause, [{j, k, [a, 2]}, {y, x, 1}]}, + "no true branch found while evaluating if expression at j:k/2 <= y:x/1" + }, + { + {{try_clause, bango}, [{j, k, [a, 2]}, {y, x, 1}]}, + "no try clause matching bango at j:k/2 <= y:x/1" + }, + { + {badarith, [{j, k, [a, 2]}, {y, x, 1}]}, + "bad arithmetic expression at j:k/2 <= y:x/1" + }, + { + {{badmatch, bongo}, [{j, k, [a, 2]}, {y, x, 1}]}, + "no match of right hand value bongo at j:k/2 <= y:x/1" + }, + { + {emfile, [{j, k, [a, 2]}, {y, x, 1}]}, + "maximum number of file descriptors exhausted, check ulimit -n; j:k/2 <= y:x/1" + }, + { + {system_limit, [{erlang, open_port, []}, {y, x, 1}]}, + "system limit: maximum number of ports exceeded at y:x/1" + }, + { + {system_limit, [{erlang, spawn, []}, {y, x, 1}]}, + "system limit: maximum number of processes exceeded at y:x/1" + }, + { + {system_limit, [{erlang, spawn_opt, []}, {y, x, 1}]}, + "system limit: maximum number of processes exceeded at y:x/1" + }, + { + {system_limit, [{erlang, list_to_atom, ["foo"]}, {y, x, 1}]}, + "system limit: tried to create an atom larger than 255, or maximum atom count exceeded at y:x/1" + }, + { + {system_limit, [{ets, new, []}, {y, x, 1}]}, + "system limit: maximum number of ETS tables exceeded at y:x/1" + }, + { + {system_limit, [{couch_log, totes_logs, []}, {y, x, 1}]}, + "system limit: couch_log:totes_logs() at y:x/1" + }, + { + {badarg, [{j, k, [a, 2]}, {y, x, 1}]}, + "bad argument in call to j:k(a, 2) at y:x/1" + }, + { + {{badarg, [{j, k, [a, 2]}, {y, x, 1}]}, some_ignored_thing}, + "bad argument in call to j:k(a, 2) at y:x/1" + }, + { + {{badarity, {fun erlang:spawn/1, [a, b]}}, [{y, x, 1}]}, + "function called with wrong arity of 2 instead of 1 at y:x/1" + }, + { + {noproc, [{y, x, 1}]}, + "no such process or port in call to y:x/1" + }, + { + {{badfun, 2}, [{y, x, 1}]}, + "bad function 2 called at y:x/1" + }, + { + {a_reason, [{y, x, 1}]}, + "a_reason at y:x/1" + }, + { + {a_reason, [{y, x, 1, [{line, 4}]}]}, + "a_reason at y:x/1(line:4)" + } + ], + [ + {Msg, fun() -> ?assertEqual( + Msg, + lists:flatten(couch_log_formatter:format_reason(Reason)) + ) end} + || {Reason, Msg} <- Cases + ]. + + +coverage_test() -> + % MFA's that aren't + ?assertEqual(["foo"], couch_log_formatter:format_mfa(foo)), + + % Traces with line numbers + Trace = [{x, y, [a], [{line, 4}]}], + ?assertEqual( + "x:y/1(line:4)", + lists:flatten(couch_log_formatter:format_trace(Trace)) + ), + + % Excercising print_silly_list + ?assertMatch( + #log_entry{ + level = error, + msg = "foobar" + }, + do_format({ + error_report, + erlang:group_leader(), + {self(), std_error, "foobar"} + }) + ), + + % Excercising print_silly_list + ?assertMatch( + #log_entry{ + level = error, + msg = "dang" + }, + do_format({ + error_report, + erlang:group_leader(), + {self(), std_error, dang} + }) + ). + + +do_format(Event) -> + E = couch_log_formatter:format(Event), + E#log_entry{ + msg = lists:flatten(E#log_entry.msg), + msg_id = lists:flatten(E#log_entry.msg_id), + time_stamp = lists:flatten(E#log_entry.time_stamp) + }. + + +do_matches(_, []) -> + ok; + +do_matches(#log_entry{msg = Msg} = E, [Pattern | RestPatterns]) -> + case re:run(Msg, Pattern) of + {match, _} -> + ok; + nomatch -> + Err1 = io_lib:format("'~s' does not match '~s'", [Pattern, Msg]), + Err2 = lists:flatten(Err1), + ?assertEqual(nomatch, Err2) + end, + do_matches(E, RestPatterns). diff --git a/src/couch_log/test/eunit/couch_log_monitor_test.erl b/src/couch_log/test/eunit/couch_log_monitor_test.erl new file mode 100644 index 000000000..eec008522 --- /dev/null +++ b/src/couch_log/test/eunit/couch_log_monitor_test.erl @@ -0,0 +1,67 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_log_monitor_test). + + +-include_lib("eunit/include/eunit.hrl"). + + +-define(HANDLER, couch_log_error_logger_h). + + +couch_log_monitor_test_() -> + {setup, + fun couch_log_test_util:start/0, + fun couch_log_test_util:stop/1, + [ + fun monitor_ignores_unknown_messages/0, + fun monitor_restarts_handler/0, + fun coverage_test/0 + ] + }. + + +monitor_ignores_unknown_messages() -> + Pid1 = get_monitor_pid(), + + ?assertEqual(ignored, gen_server:call(Pid1, do_foo_please)), + + gen_server:cast(Pid1, do_bar_please), + Pid1 ! do_baz_please, + timer:sleep(250), + ?assert(is_process_alive(Pid1)). + + +monitor_restarts_handler() -> + Pid1 = get_monitor_pid(), + error_logger:delete_report_handler(?HANDLER), + timer:sleep(250), + + ?assert(not is_process_alive(Pid1)), + + Pid2 = get_monitor_pid(), + ?assert(is_process_alive(Pid2)), + + Handlers = gen_event:which_handlers(error_logger), + ?assert(lists:member(?HANDLER, Handlers)). + + +coverage_test() -> + Resp = couch_log_monitor:code_change(foo, bazinga, baz), + ?assertEqual({ok, bazinga}, Resp). + + +get_monitor_pid() -> + Children = supervisor:which_children(couch_log_sup), + [MonPid] = [Pid || {couch_log_monitor, Pid, _, _} <- Children, is_pid(Pid)], + MonPid. diff --git a/src/couch_log/test/eunit/couch_log_server_test.erl b/src/couch_log/test/eunit/couch_log_server_test.erl new file mode 100644 index 000000000..7af570e90 --- /dev/null +++ b/src/couch_log/test/eunit/couch_log_server_test.erl @@ -0,0 +1,118 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_log_server_test). + + +-include("couch_log.hrl"). +-include_lib("eunit/include/eunit.hrl"). + + +couch_log_server_test_() -> + {setup, + fun couch_log_test_util:start/0, + fun couch_log_test_util:stop/1, + [ + fun check_can_reconfigure/0, + fun check_can_restart/0, + fun check_can_cast_log_entry/0, + fun check_logs_ignored_messages/0 + ] + }. + + +check_can_reconfigure() -> + couch_log:error("a message", []), + ?assertEqual(0, couch_log_test_util:last_log_key()), + ?assertEqual(ok, couch_log_server:reconfigure()), + ?assertEqual('$end_of_table', couch_log_test_util:last_log_key()), + + couch_log_test_util:with_config_listener(fun() -> + couch_log:error("another message", []), + ?assertEqual(0, couch_log_test_util:last_log_key()), + config:set("log", "some_key", "some_val"), + couch_log_test_util:wait_for_config(), + ?assertEqual('$end_of_table', couch_log_test_util:last_log_key()) + end). + + +check_can_restart() -> + Pid1 = whereis(couch_log_server), + Ref = erlang:monitor(process, Pid1), + ?assert(is_process_alive(Pid1)), + + supervisor:terminate_child(couch_log_sup, couch_log_server), + supervisor:restart_child(couch_log_sup, couch_log_server), + + receive + {'DOWN', Ref, _, _, _} -> ok + after 1000 -> + erlang:error(timeout_restarting_couch_log_server) + end, + + ?assert(not is_process_alive(Pid1)), + + Pid2 = whereis(couch_log_server), + ?assertNotEqual(Pid2, Pid1), + ?assert(is_process_alive(Pid2)). + + +check_can_cast_log_entry() -> + Entry = #log_entry{ + level = critical, + pid = self(), + msg = "this will be casted", + msg_id = "----", + time_stamp = "2016-07-20-almost-my-birthday" + }, + ok = gen_server:cast(couch_log_server, {log, Entry}), + timer:sleep(500), % totes gross + ?assertEqual(Entry, couch_log_test_util:last_log()). + + +check_logs_ignored_messages() -> + gen_server:call(couch_log_server, a_call), + ?assertMatch( + #log_entry{ + level = error, + pid = couch_log_server, + msg = "couch_log_server ignored a_call" + }, + couch_log_test_util:last_log() + ), + + gen_server:cast(couch_log_server, a_cast), + timer:sleep(500), % yes gross + ?assertMatch( + #log_entry{ + level = error, + pid = couch_log_server, + msg = "couch_log_server ignored a_cast" + }, + couch_log_test_util:last_log() + ), + + couch_log_server ! an_info, + timer:sleep(500), % still gross + ?assertMatch( + #log_entry{ + level = error, + pid = couch_log_server, + msg = "couch_log_server ignored an_info" + }, + couch_log_test_util:last_log() + ). + + +coverage_test() -> + Resp = couch_log_server:code_change(foo, bazinga, baz), + ?assertEqual({ok, bazinga}, Resp). diff --git a/src/couch_log/test/eunit/couch_log_test.erl b/src/couch_log/test/eunit/couch_log_test.erl new file mode 100644 index 000000000..c7195f65f --- /dev/null +++ b/src/couch_log/test/eunit/couch_log_test.erl @@ -0,0 +1,84 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_log_test). + + +-include_lib("couch_log/include/couch_log.hrl"). +-include_lib("eunit/include/eunit.hrl"). + + +couch_log_test_() -> + {setup, + fun couch_log_test_util:start/0, + fun couch_log_test_util:stop/1, + gen() ++ [fun check_set_level/0] + }. + + +check_set_level() -> + couch_log:set_level(crit), + ?assertEqual("crit", config:get("log", "level")). + + +levels() -> + [ + debug, + info, + notice, + warning, + error, + critical, + alert, + emergency, + none + ]. + + +gen() -> + lists:map(fun(L) -> + Name = "Test log level: " ++ couch_log_util:level_to_string(L), + {Name, fun() -> check_levels(L, levels()) end} + end, levels() -- [none]). + + +check_levels(_, []) -> + ok; + +check_levels(TestLevel, [CfgLevel | RestLevels]) -> + TestInt = couch_log_util:level_to_integer(TestLevel), + CfgInt = couch_log_util:level_to_integer(CfgLevel), + Pid = self(), + Msg = new_msg(), + LastKey = couch_log_test_util:last_log_key(), + couch_log_test_util:with_level(CfgLevel, fun() -> + couch_log:TestLevel(Msg, []), + case TestInt >= CfgInt of + true -> + ?assertMatch( + #log_entry{ + level = TestLevel, + pid = Pid, + msg = Msg + }, + couch_log_test_util:last_log() + ); + false -> + ?assertEqual(LastKey, couch_log_test_util:last_log_key()) + end + end), + check_levels(TestLevel, RestLevels). + + +new_msg() -> + Bin = list_to_binary([couch_rand:uniform(255) || _ <- lists:seq(1, 16)]), + couch_util:to_hex(Bin). diff --git a/src/couch_log/test/eunit/couch_log_test_util.erl b/src/couch_log/test/eunit/couch_log_test_util.erl new file mode 100644 index 000000000..00f3981fc --- /dev/null +++ b/src/couch_log/test/eunit/couch_log_test_util.erl @@ -0,0 +1,168 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_log_test_util). + +-export([ + start/0, + stop/1, + last_log/0, + last_log_key/0, + wait_for_config/0, + with_config_listener/1, + with_level/2, + with_meck/2 +]). + +-include("couch_log.hrl"). + + +start() -> + remove_error_loggers(), + application:set_env(config, ini_files, config_files()), + application:start(config), + ignore_common_loggers(), + application:start(couch_log), + meck:new(couch_stats), + ok = meck:expect(couch_stats, increment_counter, ['_'], ok). + + +stop(_) -> + application:stop(config), + application:stop(couch_log), + meck:unload(couch_stats). + + +with_level(Name, Fun) -> + with_config_listener(fun() -> + try + LevelStr = couch_log_util:level_to_string(Name), + config:set("log", "level", LevelStr, false), + wait_for_config(), + Fun() + after + config:delete("log", "level", false) + end + end). + + +with_config_listener(Fun) -> + Listener = self(), + try + add_listener(Listener), + Fun() + after + rem_listener(Listener) + end. + + +wait_for_config() -> + receive + couch_log_config_change_finished -> ok + after 1000 -> + erlang:error(config_change_timeout) + end. + + +with_meck(Mods, Fun) -> + lists:foreach(fun(M) -> + case M of + {Name, Opts} -> meck:new(Name, Opts); + Name -> meck:new(Name) + end + end, Mods), + try + Fun() + after + lists:foreach(fun(M) -> + case M of + {Name, _} -> meck:unload(Name); + Name -> meck:unload(Name) + end + end, Mods) + end. + + +ignore_common_loggers() -> + IgnoreSet = [ + application_controller, + config, + config_event + ], + lists:foreach(fun(Proc) -> + disable_logs_from(Proc) + end, IgnoreSet). + + +disable_logs_from(Pid) when is_pid(Pid) -> + Ignored = case application:get_env(couch_log, ignored_pids) of + {ok, L} when is_list(L) -> + lists:usort([Pid | L]); + _E -> + [Pid] + end, + IgnoredAlive = [P || P <- Ignored, is_process_alive(P)], + application:set_env(couch_log, ignored_pids, IgnoredAlive); + +disable_logs_from(Name) when is_atom(Name) -> + case whereis(Name) of + P when is_pid(P) -> + disable_logs_from(P); + undefined -> + erlang:error({unknown_pid_name, Name}) + end. + + +last_log_key() -> + ets:last(?COUCH_LOG_TEST_TABLE). + + +last_log() -> + [{_, Entry}] = ets:lookup(?COUCH_LOG_TEST_TABLE, last_log_key()), + Entry. + + +remove_error_loggers() -> + ErrorLoggerPid = whereis(error_logger), + if ErrorLoggerPid == undefined -> ok; true -> + lists:foreach(fun(Handler) -> + error_logger:delete_report_handler(Handler) + end, gen_event:which_handlers(ErrorLoggerPid)) + end. + + +config_files() -> + Path = filename:dirname(code:which(?MODULE)), + Name = filename:join(Path, "couch_log_test.ini"), + ok = file:write_file(Name, "[log]\nwriter = ets\n"), + [Name]. + + +add_listener(Listener) -> + Listeners = case application:get_env(couch_log, config_listeners) of + {ok, L} when is_list(L) -> + lists:usort([Listener | L]); + _ -> + [Listener] + end, + application:set_env(couch_log, config_listeners, Listeners). + + +rem_listener(Listener) -> + Listeners = case application:get_env(couch_lig, config_listeners) of + {ok, L} when is_list(L) -> + L -- [Listener]; + _ -> + [] + end, + application:set_env(couch_log, config_listeners, Listeners). + diff --git a/src/couch_log/test/eunit/couch_log_trunc_io_fmt_test.erl b/src/couch_log/test/eunit/couch_log_trunc_io_fmt_test.erl new file mode 100644 index 000000000..77d555440 --- /dev/null +++ b/src/couch_log/test/eunit/couch_log_trunc_io_fmt_test.erl @@ -0,0 +1,92 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_log_trunc_io_fmt_test). + + +-include_lib("eunit/include/eunit.hrl"). + + +format_test_() -> + lists:map(fun({Fmt, Args, Expect}) -> + Name = io_lib:format("~p", [Expect]), + {lists:flatten(Name), + ?_assertEqual( + Expect, + lists:flatten(couch_log_trunc_io_fmt:format(Fmt, Args, 1024)) + ) + } + end, cases()). + + + +chomp_test() -> + R1 = couch_log_trunc_io_fmt:format("\n", [], 1024, [{chomp, true}]), + ?assertEqual("", lists:flatten(R1)), + R2 = couch_log_trunc_io_fmt:format("~n", [], 1024, [{chomp, true}]), + ?assertEqual("", lists:flatten(R2)). + + +cases() -> + [ + {"", [], ""}, + {"stuff\n\t", [], "stuff\n\t"}, + {"~w", [foo], "foo"}, + {"~p", [bar], "bar"}, + {"~W", [{{{2}}}, 2], "{{...}}"}, + {"~P", [{{{ohai}}}, 1], "{...}"}, + {"~s", [[$s, [$t, [$u, [$f, [$f]]]]]], "stuff"}, + {"~4s", ["stuff"], "stuf"}, + {"~8s", ["stuff"], " stuff"}, + {"~.8s", ["stuff"], "stuff "}, + {"~10.4s", ["stuff"], " stuf"}, + {"~10.6s", ["stuff"], " stuff "}, + {"~10.5s", ["stuff"], " stuff"}, + {"~10.10s", ["stuff"], " stuff"}, + {"~r", [{reason, [{x, k, [c, d]}]}], "reason at x:k/2"}, + {"~e", [1.0], "1.00000e+0"}, + {"~f", [1.0], "1.000000"}, + {"~f", [0.000323], "0.000323"}, + {"~f", [31.4], "31.400000"}, + {"~f", [-2.3], "-2.300000"}, + {"~g", [1.0], "1.00000"}, + {"~b", [-15], "-15"}, + {"~b", [15], "15"}, + {"~B", [15], "15"}, + {"~.16b", [15], "f"}, + {"~.16B", [15], "F"}, + {"~.16b", [-15], "-f"}, + {"~.16B", [-15], "-F"}, + {"~.16x", [15, "16#"], "16#f"}, + {"~.16x", [15, '16#'], "16#f"}, + {"~.16x", [-15, "16#"], "-16#f"}, + {"~.16X", [15, "16#"], "16#F"}, + {"~.16X", [15, '16#'], "16#F"}, + {"~.16X", [-15, "16#"], "-16#F"}, + {"~.16#", [15], "16#F"}, + {"~.16+", [15], "16#f"}, + {"~c", [$z], "z"}, + {"~tc", [$g], "g"}, + {"~~", [], "\~"}, + {"~n", [], "\n"}, + {"~2n", [], "\n\n"}, + {"~3n", [], "\n\n\n"}, + {"~i", [ignored], ""}, + {"~2.w", [1], " 1"}, + {"~*w", [2, 1], " 1"}, + {"~-2.w", [1], "1 "}, + {"~2.0. w", [1], " "}, + {"~2.1. w", [1], " 1"}, + {"~2.0.|w", [1], "||"}, + {"~2.1.|w", [1], "|1"}, + {"~2.1.*w", [$q, 1], "q1"} + ]. diff --git a/src/couch_log/test/eunit/couch_log_util_test.erl b/src/couch_log/test/eunit/couch_log_util_test.erl new file mode 100644 index 000000000..e97911aa9 --- /dev/null +++ b/src/couch_log/test/eunit/couch_log_util_test.erl @@ -0,0 +1,55 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_log_util_test). + + +-include_lib("couch_log/include/couch_log.hrl"). +-include_lib("eunit/include/eunit.hrl"). + + +get_message_id_test() -> + ?assertEqual("--------", couch_log_util:get_msg_id()), + erlang:put(nonce, "deadbeef"), + ?assertEqual("deadbeef", couch_log_util:get_msg_id()), + erlang:put(nonce, undefined). + + +level_to_atom_test() -> + lists:foreach(fun(L) -> + ?assert(is_atom(couch_log_util:level_to_atom(L))), + ?assert(is_integer(couch_log_util:level_to_integer(L))), + ?assert(is_list(couch_log_util:level_to_string(L))) + end, levels()). + + +string_p_test() -> + ?assertEqual(false, couch_log_util:string_p([])), + ?assertEqual(false, couch_log_util:string_p([[false]])), + ?assertEqual(true, couch_log_util:string_p([$\n])), + ?assertEqual(true, couch_log_util:string_p([$\r])), + ?assertEqual(true, couch_log_util:string_p([$\t])), + ?assertEqual(true, couch_log_util:string_p([$\v])), + ?assertEqual(true, couch_log_util:string_p([$\b])), + ?assertEqual(true, couch_log_util:string_p([$\f])), + ?assertEqual(true, couch_log_util:string_p([$\e])). + + +levels() -> + [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, + "1", "2", "3", "4", "5", "6", "7", "8", "9", + debug, info, notice, warning, warn, error, err, + critical, crit, alert, emergency, emerg, none, + "debug", "info", "notice", "warning", "warn", "error", "err", + "critical", "crit", "alert", "emergency", "emerg", "none" + ]. diff --git a/src/couch_log/test/eunit/couch_log_writer_ets.erl b/src/couch_log/test/eunit/couch_log_writer_ets.erl new file mode 100644 index 000000000..d5fd327ac --- /dev/null +++ b/src/couch_log/test/eunit/couch_log_writer_ets.erl @@ -0,0 +1,49 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_log_writer_ets). +-behaviour(couch_log_writer). + + +-export([ + init/0, + terminate/2, + write/2 +]). + + +-include("couch_log.hrl"). + + +init() -> + ets:new(?COUCH_LOG_TEST_TABLE, [named_table, public, ordered_set]), + {ok, 0}. + + +terminate(_, _St) -> + ets:delete(?COUCH_LOG_TEST_TABLE), + ok. + + +write(Entry0, St) -> + Entry = Entry0#log_entry{ + msg = lists:flatten(Entry0#log_entry.msg), + time_stamp = lists:flatten(Entry0#log_entry.time_stamp) + }, + Ignored = application:get_env(couch_log, ignored_pids, []), + case lists:member(Entry#log_entry.pid, Ignored) of + true -> + {ok, St}; + false -> + ets:insert(?COUCH_LOG_TEST_TABLE, {St, Entry}), + {ok, St + 1} + end. diff --git a/src/couch_log/test/eunit/couch_log_writer_file_test.erl b/src/couch_log/test/eunit/couch_log_writer_file_test.erl new file mode 100644 index 000000000..ba042610a --- /dev/null +++ b/src/couch_log/test/eunit/couch_log_writer_file_test.erl @@ -0,0 +1,169 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_log_writer_file_test). + + +-include_lib("kernel/include/file.hrl"). +-include_lib("couch_log/include/couch_log.hrl"). +-include_lib("eunit/include/eunit.hrl"). + + +-define(WRITER, couch_log_writer_file). + + +couch_log_writer_file_test_() -> + {setup, + fun couch_log_test_util:start/0, + fun couch_log_test_util:stop/1, + [ + fun check_init_terminate/0, + fun() -> + couch_log_test_util:with_meck( + [{filelib, [unstick]}], + fun check_ensure_dir_fail/0 + ) + end, + fun() -> + couch_log_test_util:with_meck( + [{file, [unstick, passthrough]}], + fun check_open_fail/0 + ) + end, + fun() -> + couch_log_test_util:with_meck( + [{file, [unstick, passthrough]}], + fun check_read_file_info_fail/0 + ) + end, + fun check_file_write/0, + fun check_buffered_file_write/0, + fun check_reopen/0 + ] + }. + + +check_init_terminate() -> + {ok, St} = ?WRITER:init(), + ok = ?WRITER:terminate(stop, St). + + +check_ensure_dir_fail() -> + meck:expect(filelib, ensure_dir, 1, {error, eperm}), + ?assertEqual({error, eperm}, ?WRITER:init()), + ?assert(meck:called(filelib, ensure_dir, 1)), + ?assert(meck:validate(filelib)). + + +check_open_fail() -> + meck:expect(file, open, 2, {error, enotfound}), + ?assertEqual({error, enotfound}, ?WRITER:init()), + ?assert(meck:called(file, open, 2)), + ?assert(meck:validate(file)). + + +check_read_file_info_fail() -> + RFI = fun + ("./couch.log") -> {error, enoent}; + (Path) -> meck:passthrough([Path]) + end, + meck:expect(file, read_file_info, RFI), + ?assertEqual({error, enoent}, ?WRITER:init()), + ?assert(meck:called(file, read_file_info, 1)), + ?assert(meck:validate(file)). + + +check_file_write() -> + % Make sure we have an empty log for this test + IsFile = filelib:is_file("./couch.log"), + if not IsFile -> ok; true -> + file:delete("./couch.log") + end, + + Entry = #log_entry{ + level = info, + pid = list_to_pid("<0.1.0>"), + msg = "stuff", + msg_id = "msg_id", + time_stamp = "time_stamp" + }, + {ok, St} = ?WRITER:init(), + {ok, NewSt} = ?WRITER:write(Entry, St), + ok = ?WRITER:terminate(stop, NewSt), + + {ok, Data} = file:read_file("./couch.log"), + Expect = <<"[info] time_stamp nonode@nohost <0.1.0> msg_id stuff\n">>, + ?assertEqual(Expect, Data). + + +check_buffered_file_write() -> + % Make sure we have an empty log for this test + IsFile = filelib:is_file("./couch.log"), + if not IsFile -> ok; true -> + file:delete("./couch.log") + end, + + config:set("log", "write_buffer", "1024"), + config:set("log", "write_delay", "10"), + + try + Entry = #log_entry{ + level = info, + pid = list_to_pid("<0.1.0>"), + msg = "stuff", + msg_id = "msg_id", + time_stamp = "time_stamp" + }, + {ok, St} = ?WRITER:init(), + {ok, NewSt} = ?WRITER:write(Entry, St), + ok = ?WRITER:terminate(stop, NewSt) + after + config:delete("log", "write_buffer"), + config:delete("log", "write_delay") + end, + + {ok, Data} = file:read_file("./couch.log"), + Expect = <<"[info] time_stamp nonode@nohost <0.1.0> msg_id stuff\n">>, + ?assertEqual(Expect, Data). + + +check_reopen() -> + {ok, St1} = clear_clock(?WRITER:init()), + {ok, St2} = clear_clock(couch_log_writer_file:maybe_reopen(St1)), + ?assertEqual(St1, St2), + + case os:type() of + {win32, _} -> + % Windows file handling doesn't work the same + % as Unix where you can move or delete an open + % file so these tests make no sense there. + yay_we_pass; + _ -> + % Delete file + file:delete("./couch.log"), + {ok, St3} = clear_clock(couch_log_writer_file:maybe_reopen(St2)), + ?assert(element(3, St3) /= element(3, St2)), + + % Recreate file + file:delete("./couch.log"), + file:write_file("./couch.log", ""), + {ok, St4} = clear_clock(couch_log_writer_file:maybe_reopen(St3)), + ?assert(element(3, St4) /= element(3, St2)) + end. + + +clear_clock({ok, St}) -> + {ok, clear_clock(St)}; + +clear_clock(St) -> + {st, Path, Fd, INode, _} = St, + {st, Path, Fd, INode, {0, 0, 0}}. diff --git a/src/couch_log/test/eunit/couch_log_writer_stderr_test.erl b/src/couch_log/test/eunit/couch_log_writer_stderr_test.erl new file mode 100644 index 000000000..1e99263dd --- /dev/null +++ b/src/couch_log/test/eunit/couch_log_writer_stderr_test.erl @@ -0,0 +1,58 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_log_writer_stderr_test). + + +-include_lib("couch_log/include/couch_log.hrl"). +-include_lib("eunit/include/eunit.hrl"). + + +-define(WRITER, couch_log_writer_stderr). + + +couch_log_writer_stderr_test_() -> + {setup, + fun couch_log_test_util:start/0, + fun couch_log_test_util:stop/1, + [ + fun check_init_terminate/0, + fun() -> + couch_log_test_util:with_meck( + [{io, [unstick]}], + fun check_write/0 + ) + end + ] + }. + + +check_init_terminate() -> + {ok, St} = ?WRITER:init(), + ok = ?WRITER:terminate(stop, St). + + +check_write() -> + meck:expect(io, format, 3, ok), + + Entry = #log_entry{ + level = debug, + pid = list_to_pid("<0.1.0>"), + msg = "stuff", + msg_id = "msg_id", + time_stamp = "time_stamp" + }, + {ok, St} = ?WRITER:init(), + {ok, NewSt} = ?WRITER:write(Entry, St), + ok = ?WRITER:terminate(stop, NewSt), + + ?assert(meck:validate(io)). diff --git a/src/couch_log/test/eunit/couch_log_writer_syslog_test.erl b/src/couch_log/test/eunit/couch_log_writer_syslog_test.erl new file mode 100644 index 000000000..c32b5c6bf --- /dev/null +++ b/src/couch_log/test/eunit/couch_log_writer_syslog_test.erl @@ -0,0 +1,122 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_log_writer_syslog_test). + + +-include_lib("couch_log/include/couch_log.hrl"). +-include_lib("eunit/include/eunit.hrl"). + + +-define(WRITER, couch_log_writer_syslog). + + +couch_log_writer_syslog_test_() -> + {setup, + fun couch_log_test_util:start/0, + fun couch_log_test_util:stop/1, + [ + fun check_init_terminate/0, + fun() -> + couch_log_test_util:with_meck( + [{io, [unstick]}], + fun check_stderr_write/0 + ) + end, + fun() -> + couch_log_test_util:with_meck( + [{gen_udp, [unstick]}], + fun check_udp_send/0 + ) + end + ] + }. + + +check_init_terminate() -> + {ok, St} = ?WRITER:init(), + ok = ?WRITER:terminate(stop, St). + + +check_stderr_write() -> + meck:expect(io, format, 3, ok), + + Entry = #log_entry{ + level = debug, + pid = list_to_pid("<0.1.0>"), + msg = "stuff", + msg_id = "msg_id", + time_stamp = "time_stamp" + }, + {ok, St} = ?WRITER:init(), + {ok, NewSt} = ?WRITER:write(Entry, St), + ok = ?WRITER:terminate(stop, NewSt), + + ?assert(meck:called(io, format, 3)), + ?assert(meck:validate(io)). + + +check_udp_send() -> + meck:expect(gen_udp, open, 1, {ok, socket}), + meck:expect(gen_udp, send, 4, ok), + meck:expect(gen_udp, close, fun(socket) -> ok end), + + config:set("log", "syslog_host", "localhost"), + try + Entry = #log_entry{ + level = debug, + pid = list_to_pid("<0.1.0>"), + msg = "stuff", + msg_id = "msg_id", + time_stamp = "time_stamp" + }, + {ok, St} = ?WRITER:init(), + {ok, NewSt} = ?WRITER:write(Entry, St), + ok = ?WRITER:terminate(stop, NewSt) + after + config:delete("log", "syslog_host") + end, + + ?assert(meck:called(gen_udp, open, 1)), + ?assert(meck:called(gen_udp, send, 4)), + ?assert(meck:called(gen_udp, close, 1)), + ?assert(meck:validate(gen_udp)). + + +facility_test() -> + Names = [ + "kern", "user", "mail", "daemon", "auth", "syslog", "lpr", + "news", "uucp", "clock", "authpriv", "ftp", "ntp", "audit", + "alert", "cron", "local0", "local1", "local2", "local3", + "local4", "local5", "local6", "local7" + ], + lists:foldl(fun(Name, Id) -> + IdStr = lists:flatten(io_lib:format("~w", [Id])), + ?assertEqual(Id bsl 3, couch_log_writer_syslog:get_facility(Name)), + ?assertEqual(Id bsl 3, couch_log_writer_syslog:get_facility(IdStr)), + Id + 1 + end, 0, Names), + ?assertEqual(23 bsl 3, couch_log_writer_syslog:get_facility("foo")), + ?assertEqual(23 bsl 3, couch_log_writer_syslog:get_facility("-1")), + ?assertEqual(23 bsl 3, couch_log_writer_syslog:get_facility("24")). + + +level_test() -> + Levels = [ + emergency, alert, critical, error, + warning, notice, info, debug + ], + lists:foldl(fun(Name, Id) -> + ?assertEqual(Id, couch_log_writer_syslog:get_level(Name)), + Id + 1 + end, 0, Levels), + ?assertEqual(3, couch_log_writer_syslog:get_level(foo)). diff --git a/src/couch_log/test/eunit/couch_log_writer_test.erl b/src/couch_log/test/eunit/couch_log_writer_test.erl new file mode 100644 index 000000000..d0bb347fe --- /dev/null +++ b/src/couch_log/test/eunit/couch_log_writer_test.erl @@ -0,0 +1,54 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_log_writer_test). + + +-include_lib("couch_log/include/couch_log.hrl"). +-include_lib("eunit/include/eunit.hrl"). + + +couch_log_writer_test_() -> + {setup, + fun couch_log_test_util:start/0, + fun couch_log_test_util:stop/1, + [ + fun check_writer_change/0 + ] + }. + + +check_writer_change() -> + % Change to file and back + couch_log_test_util:with_config_listener(fun() -> + config:set("log", "writer", "file"), + couch_log_test_util:wait_for_config(), + ?assertEqual(undefined, ets:info(?COUCH_LOG_TEST_TABLE)), + ?assert(is_pid(whereis(couch_log_server))), + + config:set("log", "writer", "couch_log_writer_ets"), + couch_log_test_util:wait_for_config(), + ?assertEqual(0, ets:info(?COUCH_LOG_TEST_TABLE, size)) + end), + + % Using a bad setting doesn't break things + couch_log_test_util:with_config_listener(fun() -> + config:set("log", "writer", "hopefully not an atom or module"), + couch_log_test_util:wait_for_config(), + ?assertEqual(undefined, ets:info(?COUCH_LOG_TEST_TABLE)), + ?assert(is_pid(whereis(couch_log_server))), + + config:set("log", "writer", "couch_log_writer_ets"), + couch_log_test_util:wait_for_config(), + ?assertEqual(0, ets:info(?COUCH_LOG_TEST_TABLE, size)) + end). + diff --git a/src/couch_mrview/test/couch_mrview_all_docs_tests.erl b/src/couch_mrview/test/couch_mrview_all_docs_tests.erl deleted file mode 100644 index bf8eb7e5b..000000000 --- a/src/couch_mrview/test/couch_mrview_all_docs_tests.erl +++ /dev/null @@ -1,140 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_mrview_all_docs_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 1000). - - - -setup() -> - {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map), - Db. - -teardown(Db) -> - couch_db:close(Db), - couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), - ok. - - -all_docs_test_() -> - { - "_all_docs view tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_query/1, - fun should_query_with_range/1, - fun should_query_with_range_rev/1, - fun should_query_with_limit_and_skip/1, - fun should_query_with_include_docs/1, - fun should_query_empty_views/1 - ] - } - } - }. - - -should_query(Db) -> - Result = run_query(Db, []), - Expect = {ok, [ - {meta, [{total, 11}, {offset, 0}]}, - mk_row(<<"1">>, <<"1-08d53a5760b95fce6df2e2c5b008be39">>), - mk_row(<<"10">>, <<"1-a05b6ea2bc0243949f103d5b4f15f71e">>), - mk_row(<<"2">>, <<"1-b57c77a9e6f7574ca6469f0d6dcd78bb">>), - mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>), - mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>), - mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>), - mk_row(<<"6">>, <<"1-aca21c2e7bc5f8951424fcfc5d1209d8">>), - mk_row(<<"7">>, <<"1-4374aeec17590d82f16e70f318116ad9">>), - mk_row(<<"8">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>), - mk_row(<<"9">>, <<"1-558c8487d9aee25399a91b5d31d90fe2">>), - mk_row(<<"_design/bar">>, <<"1-a44e1dd1994a7717bf89c894ebd1f081">>) - ]}, - ?_assertEqual(Expect, Result). - -should_query_with_range(Db) -> - Result = run_query(Db, [{start_key, <<"3">>}, {end_key, <<"5">>}]), - Expect = {ok, [ - {meta, [{total, 11}, {offset, 3}]}, - mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>), - mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>), - mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>) - ]}, - ?_assertEqual(Expect, Result). - -should_query_with_range_rev(Db) -> - Result = run_query(Db, [ - {direction, rev}, - {start_key, <<"5">>}, {end_key, <<"3">>}, - {inclusive_end, true} - ]), - Expect = {ok, [ - {meta, [{total, 11}, {offset, 5}]}, - mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>), - mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>), - mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>) - ]}, - ?_assertEqual(Expect, Result). - -should_query_with_limit_and_skip(Db) -> - Result = run_query(Db, [ - {start_key, <<"2">>}, - {limit, 3}, - {skip, 3} - ]), - Expect = {ok, [ - {meta, [{total, 11}, {offset, 5}]}, - mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>), - mk_row(<<"6">>, <<"1-aca21c2e7bc5f8951424fcfc5d1209d8">>), - mk_row(<<"7">>, <<"1-4374aeec17590d82f16e70f318116ad9">>) - ]}, - ?_assertEqual(Expect, Result). - -should_query_with_include_docs(Db) -> - Result = run_query(Db, [ - {start_key, <<"8">>}, - {end_key, <<"8">>}, - {include_docs, true} - ]), - Doc = {[ - {<<"_id">>,<<"8">>}, - {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>}, - {<<"val">>, 8} - ]}, - Val = {[{rev, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>}]}, - Expect = {ok, [ - {meta, [{total, 11}, {offset, 8}]}, - {row, [{id, <<"8">>}, {key, <<"8">>}, {value, Val}, {doc, Doc}]} - ]}, - ?_assertEqual(Expect, Result). - -should_query_empty_views(Db) -> - Result = couch_mrview:query_view(Db, <<"_design/bar">>, <<"bing">>), - Expect = {ok, [ - {meta, [{total, 0}, {offset, 0}]} - ]}, - ?_assertEqual(Expect, Result). - - -mk_row(Id, Rev) -> - {row, [{id, Id}, {key, Id}, {value, {[{rev, Rev}]}}]}. - -run_query(Db, Opts) -> - couch_mrview:query_all_docs(Db, Opts). diff --git a/src/couch_mrview/test/couch_mrview_changes_since_tests.erl b/src/couch_mrview/test/couch_mrview_changes_since_tests.erl deleted file mode 100644 index d670e109b..000000000 --- a/src/couch_mrview/test/couch_mrview_changes_since_tests.erl +++ /dev/null @@ -1,210 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_mrview_changes_since_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 1000). - -teardown(Db) -> - couch_db:close(Db), - couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), - ok. - -changes_since_basic_test_() -> - { - "changes_since tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun() -> - Type = {changes, seq_indexed}, - {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), Type), - Db - end, - fun teardown/1, - [ - fun test_basic/1, - fun test_basic_since/1, - fun test_basic_count/1, - fun test_basic_count_since/1, - fun test_compact/1 - ] - } - } - }. - -changes_since_range_test_() -> - { - "changes_since_range tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun() -> - Type = {changes, keyseq_indexed}, - {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), Type), - Db - end, - fun teardown/1, - [ - fun test_range/1, - fun test_range_since/1 - ] - } - } - }. - -changes_since_range_count_test_() -> - { - "changes_since_range_count tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun() -> - Type = {changes, seq_indexed_keyseq_indexed}, - {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), Type), - Db - end, - fun teardown/1, - [ - fun test_range_count/1, - fun test_range_count_since/1, - fun test_remove_key/1 - ] - } - } - }. - -test_basic(Db) -> - Result = run_query(Db, 0, []), - Expect = {ok, [ - {{2, <<"1">>, <<"1">>}, 1}, - {{3, <<"10">>, <<"10">>}, 10}, - {{4, <<"2">>, <<"2">>}, 2}, - {{5, <<"3">>, <<"3">>}, 3}, - {{6, <<"4">>, <<"4">>}, 4}, - {{7, <<"5">>, <<"5">>}, 5}, - {{8, <<"6">>, <<"6">>}, 6}, - {{9, <<"7">>, <<"7">>}, 7}, - {{10, <<"8">>, <<"8">>}, 8}, - {{11, <<"9">>, <<"9">>}, 9} - ]}, - ?_assertEqual(Result, Expect). - - -test_range(Db) -> - Range = [{start_key, <<"3">>}, {end_key, <<"5">>}], - Result = run_query(Db, 0, Range), - Expect = {ok, [ - {{5, <<"3">>, <<"3">>}, 3}, - {{6, <<"4">>, <<"4">>}, 4}, - {{7, <<"5">>, <<"5">>}, 5} - ]}, - ?_assertEqual(Result, Expect). - -test_basic_since(Db) -> - Result = run_query(Db, 5, []), - Expect = {ok, [ - {{6, <<"4">>, <<"4">>}, 4}, - {{7, <<"5">>, <<"5">>}, 5}, - {{8, <<"6">>, <<"6">>}, 6}, - {{9, <<"7">>, <<"7">>}, 7}, - {{10, <<"8">>, <<"8">>}, 8}, - {{11, <<"9">>, <<"9">>}, 9} - ]}, - ?_assertEqual(Result, Expect). - -test_range_since(Db) -> - Range = [{start_key, <<"3">>}, {end_key, <<"5">>}], - Result = run_query(Db, 5, Range), - Expect = {ok, [ - {{6, <<"4">>, <<"4">>}, 4}, - {{7, <<"5">>, <<"5">>}, 5} - ]}, - ?_assertEqual(Result, Expect). - -test_basic_count(Db) -> - Result = run_count_query(Db, 0, []), - ?_assertEqual(Result, 10). - -test_range_count(Db) -> - Range = [{start_key, <<"3">>}, {end_key, <<"5">>}], - Result = run_count_query(Db, 0, Range), - ?_assertEqual(Result, 3). - -test_basic_count_since(Db) -> - Result = run_count_query(Db, 5, []), - ?_assertEqual(Result, 6). - -test_range_count_since(Db) -> - Range = [{start_key, <<"3">>}, {end_key, <<"5">>}], - Result = run_count_query(Db, 5, Range), - ?_assertEqual(Result, 2). - -test_compact(Db) -> - Result = couch_mrview:compact(Db, <<"_design/bar">>), - Count = run_count_query(Db, 0, []), - [?_assertEqual(Result, ok), ?_assertEqual(Count, 10)]. - -test_remove_key(Db) -> - %% add new doc - Doc = couch_mrview_test_util:doc(11), - {ok, Rev} = couch_db:update_doc(Db, Doc, []), - RevStr = couch_doc:rev_to_str(Rev), - {ok, _} = couch_db:ensure_full_commit(Db), - {ok, Db1} = couch_db:reopen(Db), - Result = run_count_query(Db1, 0, []), - %% check new view key - Range = [{start_key, <<"11">>}, {end_key, <<"11">>}], - Result1 = run_query(Db1, 0, Range), - Expect = {ok, [ - {{12, <<"11">>, <<"11">>}, 11} - ]}, - - %% delete doc - Doc2 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"11">>}, - {<<"_rev">>, RevStr}, - {<<"_deleted">>, true} - ]}), - {ok, _} = couch_db:update_doc(Db1, Doc2, []), - {ok, Db2} = couch_db:reopen(Db1), - Result2 = run_count_query(Db2, 0, []), - %% check new view key - Result3 = run_query(Db2, 0, Range), - Expect2 = {ok, [ - {{13, <<"11">>, <<"11">>}, removed} - ]}, - [ - ?_assertEqual(Result, 11), - ?_assertEqual(Result1, Expect), - ?_assertEqual(Result2, 11), - ?_assertEqual(Result3, Expect2) - ]. - -run_query(Db, Since, Opts) -> - Fun = fun(KV, Acc) -> {ok, [KV | Acc]} end, - {ok, R} = couch_mrview:view_changes_since(Db, <<"_design/bar">>, <<"baz">>, - Since, Fun, Opts, []), - {ok, lists:reverse(R)}. - -run_count_query(Db, Since, Opts) -> - couch_mrview:count_view_changes_since(Db, <<"_design/bar">>, <<"baz">>, - Since, Opts). diff --git a/src/couch_mrview/test/couch_mrview_collation_tests.erl b/src/couch_mrview/test/couch_mrview_collation_tests.erl deleted file mode 100644 index 5c8cb54b1..000000000 --- a/src/couch_mrview/test/couch_mrview_collation_tests.erl +++ /dev/null @@ -1,207 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_mrview_collation_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 1000). --define(VALUES, [ - null, - false, - true, - - 1, - 2, - 3.0, - 4, - - <<"a">>, - <<"A">>, - <<"aa">>, - <<"b">>, - <<"B">>, - <<"ba">>, - <<"bb">>, - - % U+200B is a zero-width space, which will be ignored by ICU but will cause - % the raw collator to treat these as three distinct keys - <<"c">>, - unicode:characters_to_binary([$c, 16#200B]), - unicode:characters_to_binary([$c, 16#200B, 16#200B]), - - [<<"a">>], - [<<"b">>], - [<<"b">>, <<"c">>], - [<<"b">>, <<"c">>, <<"a">>], - [<<"b">>, <<"d">>], - [<<"b">>, <<"d">>, <<"e">>], - - {[{<<"a">>, 1}]}, - {[{<<"a">>, 2}]}, - {[{<<"b">>, 1}]}, - {[{<<"b">>, 2}]}, - {[{<<"b">>, 2}, {<<"a">>, 1}]}, - {[{<<"b">>, 2}, {<<"c">>, 2}]} -]). - - -setup() -> - {ok, Db1} = couch_mrview_test_util:new_db(?tempdb(), map), - Docs = [couch_mrview_test_util:ddoc(red) | make_docs()], - {ok, Db2} = couch_mrview_test_util:save_docs(Db1, Docs), - Db2. - -teardown(Db) -> - couch_db:close(Db), - couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), - ok. - - -collation_test_() -> - { - "Collation tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_collate_fwd/1, - fun should_collate_rev/1, - fun should_collate_range_/1, - fun should_collate_with_inclusive_end_fwd/1, - fun should_collate_with_inclusive_end_rev/1, - fun should_collate_without_inclusive_end_fwd/1, - fun should_collate_without_inclusive_end_rev/1, - fun should_collate_with_endkey_docid/1, - fun should_use_collator_for_reduce_grouping/1 - ] - } - } - }. - - -should_collate_fwd(Db) -> - {ok, Results} = run_query(Db, []), - Expect = [{meta, [{total, length(?VALUES)}, {offset, 0}]}] ++ rows(), - ?_assertEquiv(Expect, Results). - -should_collate_rev(Db) -> - {ok, Results} = run_query(Db, [{direction, rev}]), - Expect = [{meta, [{total, length(?VALUES)}, {offset, 0}]}] ++ lists:reverse(rows()), - ?_assertEquiv(Expect, Results). - -should_collate_range_(Db) -> - Index = lists:zip(lists:seq(0, length(?VALUES)-1), ?VALUES), - lists:map(fun(V) -> - {ok, Results} = run_query(Db, [{start_key, V}, {end_key, V}]), - Expect = [ - {meta, [{total, length(?VALUES)}, find_offset(Index, V)]} | - find_matching_rows(Index, V) - ], - ?_assertEquiv(Expect, Results) - end, ?VALUES). - -find_offset(Index, Value) -> - [{Offset, _} | _] = lists:dropwhile(fun({_, V}) -> - couch_ejson_compare:less(Value, V) =/= 0 - end, Index), - {offset, Offset}. - -find_matching_rows(Index, Value) -> - Matches = lists:filter(fun({_, V}) -> - couch_ejson_compare:less(Value, V) =:= 0 - end, Index), - lists:map(fun({Id, V}) -> - {row, [{id, list_to_binary(integer_to_list(Id))}, {key, V}, {value, 0}]} - end, Matches). - -should_collate_with_inclusive_end_fwd(Db) -> - Opts = [{end_key, <<"b">>}, {inclusive_end, true}], - {ok, Rows0} = run_query(Db, Opts), - LastRow = lists:last(Rows0), - Expect = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]}, - ?_assertEqual(Expect, LastRow). - -should_collate_with_inclusive_end_rev(Db) -> - Opts = [{end_key, <<"b">>}, {inclusive_end, true}, {direction, rev}], - {ok, Rows} = run_query(Db, Opts), - LastRow = lists:last(Rows), - Expect = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]}, - ?_assertEqual(Expect, LastRow). - -should_collate_without_inclusive_end_fwd(Db) -> - Opts = [{end_key, <<"b">>}, {inclusive_end, false}], - {ok, Rows0} = run_query(Db, Opts), - LastRow = lists:last(Rows0), - Expect = {row, [{id,<<"9">>}, {key,<<"aa">>}, {value,0}]}, - ?_assertEqual(Expect, LastRow). - -should_collate_without_inclusive_end_rev(Db) -> - Opts = [{end_key, <<"b">>}, {inclusive_end, false}, {direction, rev}], - {ok, Rows} = run_query(Db, Opts), - LastRow = lists:last(Rows), - Expect = {row, [{id,<<"11">>}, {key,<<"B">>}, {value,0}]}, - ?_assertEqual(Expect, LastRow). - -should_collate_with_endkey_docid(Db) -> - ?_test(begin - {ok, Rows0} = run_query(Db, [ - {end_key, <<"b">>}, {end_key_docid, <<"10">>}, - {inclusive_end, false} - ]), - Result0 = lists:last(Rows0), - Expect0 = {row, [{id,<<"9">>}, {key,<<"aa">>}, {value,0}]}, - ?assertEqual(Expect0, Result0), - - {ok, Rows1} = run_query(Db, [ - {end_key, <<"b">>}, {end_key_docid, <<"11">>}, - {inclusive_end, false} - ]), - Result1 = lists:last(Rows1), - Expect1 = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]}, - ?assertEqual(Expect1, Result1) - end). - -should_use_collator_for_reduce_grouping(Db) -> - UniqueKeys = lists:usort(fun(A, B) -> - not couch_ejson_compare:less_json(B, A) - end, ?VALUES), - {ok, [{meta,_} | Rows]} = reduce_query(Db, [{group_level, exact}]), - ?_assertEqual(length(UniqueKeys), length(Rows)). - -make_docs() -> - {Docs, _} = lists:foldl(fun(V, {Docs0, Count}) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, list_to_binary(integer_to_list(Count))}, - {<<"foo">>, V} - ]}), - {[Doc | Docs0], Count+1} - end, {[], 0}, ?VALUES), - Docs. - -rows() -> - {Rows, _} = lists:foldl(fun(V, {Rows0, Count}) -> - Id = list_to_binary(integer_to_list(Count)), - Row = {row, [{id, Id}, {key, V}, {value, 0}]}, - {[Row | Rows0], Count+1} - end, {[], 0}, ?VALUES), - lists:reverse(Rows). - -run_query(Db, Opts) -> - couch_mrview:query_view(Db, <<"_design/bar">>, <<"zing">>, Opts). - -reduce_query(Db, Opts) -> - couch_mrview:query_view(Db, <<"_design/red">>, <<"zing">>, Opts). diff --git a/src/couch_mrview/test/couch_mrview_compact_tests.erl b/src/couch_mrview/test/couch_mrview_compact_tests.erl deleted file mode 100644 index 7664becdc..000000000 --- a/src/couch_mrview/test/couch_mrview_compact_tests.erl +++ /dev/null @@ -1,115 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_mrview_compact_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 1000). - - -setup() -> - {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map, 1000), - ok = meck:new(couch_mrview_compactor, [passthrough]), - Db. - -teardown(Db) -> - meck:unload(), - couch_db:close(Db), - couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), - ok. - - -compaction_test_() -> - { - "Compaction tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_swap/1, - fun should_remove/1 - ] - } - } - }. - - -should_swap(Db) -> - ?_test(begin - couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>), - {ok, QPid} = start_query(Db), - {ok, MonRef} = couch_mrview:compact(Db, <<"_design/bar">>, [monitor]), - receive - {'DOWN', MonRef, process, _, _} -> ok - after ?TIMEOUT -> - erlang:error( - {assertion_failed, - [{module, ?MODULE}, {line, ?LINE}, - {reason, "compaction failed"}]}) - end, - QPid ! {self(), continue}, - receive - {QPid, Count} -> - ?assertEqual(1000, Count) - after ?TIMEOUT -> - erlang:error( - {assertion_failed, - [{module, ?MODULE}, {line, ?LINE}, - {reason, "query failed"}]}) - end - end). - - -should_remove(Db) -> - ?_test(begin - DDoc = <<"_design/bar">>, - {ok, _Results} = couch_mrview:query_view(Db, DDoc, <<"baz">>), - {ok, IndexPid} = couch_index_server:get_index(couch_mrview_index, Db, DDoc), - ok = couch_index:compact(IndexPid, []), - {ok, CompactorPid} = couch_index:get_compactor_pid(IndexPid), - {ok, CompactingPid} = couch_index_compactor:get_compacting_pid(CompactorPid), - MonRef = erlang:monitor(process, CompactingPid), - exit(CompactingPid, crash), - receive - {'DOWN', MonRef, process, _, crash} -> - meck:wait(couch_mrview_compactor, remove_compacted, '_', 100), - ?assertEqual(1, meck:num_calls( - couch_mrview_compactor, remove_compacted, '_', IndexPid)), - ?assert(is_process_alive(IndexPid)), - ?assert(is_process_alive(CompactorPid)) - after ?TIMEOUT -> - erlang:error( - {assertion_failed, [ - {module, ?MODULE}, {line, ?LINE}, - {reason, "compaction didn't exit :/"}]}) - end - end). - - -start_query(Db) -> - Self = self(), - Pid = spawn(fun() -> - CB = fun - (_, wait) -> receive {Self, continue} -> {ok, 0} end; - ({row, _}, Count) -> {ok, Count+1}; - (_, Count) -> {ok, Count} - end, - {ok, Result} = - couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, [], CB, wait), - Self ! {self(), Result} - end), - {ok, Pid}. diff --git a/src/couch_mrview/test/couch_mrview_ddoc_updated_tests.erl b/src/couch_mrview/test/couch_mrview_ddoc_updated_tests.erl deleted file mode 100644 index 4310157eb..000000000 --- a/src/couch_mrview/test/couch_mrview_ddoc_updated_tests.erl +++ /dev/null @@ -1,145 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_mrview_ddoc_updated_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 1000). - - -setup() -> - Name = ?tempdb(), - couch_server:delete(Name, [?ADMIN_CTX]), - {ok, Db} = couch_db:create(Name, [?ADMIN_CTX]), - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/bar">>}, - {<<"views">>, {[ - {<<"baz">>, {[ - {<<"map">>, << - "function(doc) {\n" - " emit(doc.val, doc.val);\n" - "}" - >>} - ]}} - ]}} - ]}), - [Doc1 | Docs999] = couch_mrview_test_util:make_docs(map, 100), - {ok, _} = couch_db:update_docs(Db, [DDoc, Doc1], []), - {ok, Db2} = couch_db:reopen(Db), - - % run a query with 1 doc to initialize couch_index process - CB = fun - ({row, _}, Count) -> {ok, Count+1}; - (_, Count) -> {ok, Count} - end, - {ok, _} = - couch_mrview:query_view(Db2, <<"_design/bar">>, <<"baz">>, [], CB, 0), - - meck:new(couch_index_updater, [passthrough]), - meck:expect(couch_index_updater, update, fun(Idx, Mod, IdxSt) -> - timer:sleep(5000), - meck:passthrough([Idx, Mod, IdxSt]) - end), - - % add more docs - {ok, _} = couch_db:update_docs(Db2, Docs999, []), - {ok, Db3} = couch_db:reopen(Db2), - Db3. - -teardown(Db) -> - meck:unload(couch_index_updater), - couch_db:close(Db), - couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), - ok. - - -ddoc_update_test_() -> - { - "Check ddoc update actions", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun check_indexing_stops_on_ddoc_change/1 - ] - } - } - }. - - -check_indexing_stops_on_ddoc_change(Db) -> - ?_test(begin - DDocID = <<"_design/bar">>, - - IndexesBefore = get_indexes_by_ddoc(DDocID, 1), - ?assertEqual(1, length(IndexesBefore)), - AliveBefore = lists:filter(fun erlang:is_process_alive/1, IndexesBefore), - ?assertEqual(1, length(AliveBefore)), - - {ok, DDoc} = couch_db:open_doc(Db, DDocID, [ejson_body, ?ADMIN_CTX]), - DDocJson2 = couch_doc:from_json_obj({[ - {<<"_id">>, DDocID}, - {<<"_deleted">>, true}, - {<<"_rev">>, couch_doc:rev_to_str(DDoc#doc.revs)} - ]}), - - % spawn a process for query - Self = self(), - QPid = spawn(fun() -> - {ok, Result} = couch_mrview:query_view( - Db, <<"_design/bar">>, <<"baz">>, []), - Self ! {self(), Result} - end), - - % while indexing for the query is in progress, delete DDoc - {ok, _} = couch_db:update_doc(Db, DDocJson2, []), - receive - {QPid, Msg} -> - ?assertEqual(Msg, ddoc_updated) - after ?TIMEOUT -> - erlang:error( - {assertion_failed, [{module, ?MODULE}, {line, ?LINE}, - {reason, "test failed"}]}) - end, - - %% assert that previously running indexes are gone - IndexesAfter = get_indexes_by_ddoc(DDocID, 0), - ?assertEqual(0, length(IndexesAfter)), - AliveAfter = lists:filter(fun erlang:is_process_alive/1, IndexesBefore), - ?assertEqual(0, length(AliveAfter)) - end). - - -get_indexes_by_ddoc(DDocID, N) -> - Indexes = test_util:wait(fun() -> - Indxs = ets:match_object( - couchdb_indexes_by_db, {'$1', {DDocID, '$2'}}), - case length(Indxs) == N of - true -> - Indxs; - false -> - wait - end - end), - lists:foldl(fun({DbName, {_DDocID, Sig}}, Acc) -> - case ets:lookup(couchdb_indexes_by_sig, {DbName, Sig}) of - [{_, Pid}] -> [Pid|Acc]; - _ -> Acc - end - end, [], Indexes). - - diff --git a/src/couch_mrview/test/couch_mrview_ddoc_validation_tests.erl b/src/couch_mrview/test/couch_mrview_ddoc_validation_tests.erl deleted file mode 100644 index ce2be8904..000000000 --- a/src/couch_mrview/test/couch_mrview_ddoc_validation_tests.erl +++ /dev/null @@ -1,422 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_mrview_ddoc_validation_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(LIB, {[{<<"mylib">>, {[{<<"lib1">>, <<"x=42">>}]}}]}). - -setup() -> - {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map), - Db. - -teardown(Db) -> - couch_db:close(Db), - couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), - ok. - -ddoc_validation_test_() -> - { - "ddoc validation tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_reject_invalid_js_map/1, - fun should_reject_invalid_js_reduce/1, - fun should_reject_invalid_builtin_reduce/1, - fun should_reject_non_object_options/1, - fun should_reject_non_object_filters/1, - fun should_accept_obj_in_filters/1, - fun should_reject_non_object_lists/1, - fun should_accept_obj_in_lists/1, - fun should_reject_non_object_shows/1, - fun should_accept_obj_in_shows/1, - fun should_reject_non_object_updates/1, - fun should_accept_obj_in_updates/1, - fun should_reject_non_object_views/1, - fun should_reject_non_string_language/1, - fun should_reject_non_string_validate_doc_update/1, - fun should_accept_string_rewrites/1, - fun should_reject_bad_rewrites/1, - fun should_accept_option/1, - fun should_accept_any_option/1, - fun should_accept_filter/1, - fun should_reject_non_string_or_obj_filter_function/1, - fun should_accept_list/1, - fun should_reject_non_string_or_obj_list_function/1, - fun should_accept_show/1, - fun should_reject_non_string_or_obj_show_function/1, - fun should_accept_update/1, - fun should_reject_non_string_or_obj_update_function/1, - fun should_accept_view/1, - fun should_accept_view_with_reduce/1, - fun should_accept_view_with_lib/1, - fun should_reject_view_that_is_not_an_object/1, - fun should_reject_view_without_map_function/1, - fun should_reject_view_with_non_string_map_function/1, - fun should_reject_view_with_non_string_reduce_function/1, - fun should_accept_any_in_lib/1, - fun should_accept_map_object_for_queries/1, - fun should_reject_map_non_objects_for_queries/1 - ] - } - } - }. - -should_reject_invalid_js_map(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_invalid_js_map">>}, - {<<"views">>, {[ - {<<"foo">>, {[ - {<<"map">>, <<"function(doc) }{">>} - ]}} - ]}} - ]}), - ?_assertThrow( - {bad_request, compilation_error, _}, - couch_db:update_doc(Db, Doc, [])). - -should_reject_invalid_js_reduce(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_invalid_js_reduce">>}, - {<<"views">>, {[ - {<<"foo">>, {[ - {<<"map">>, <<"function(doc) { emit(null); }">>}, - {<<"reduce">>, <<"function(k, v, r) }{}">>} - ]}} - ]}} - ]}), - ?_assertThrow( - {bad_request, compilation_error, _}, - couch_db:update_doc(Db, Doc, [])). - -should_reject_invalid_builtin_reduce(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_invalid_builtin_reduce">>}, - {<<"views">>, {[ - {<<"foo">>, {[ - {<<"map">>, <<"function(doc) { emit(null); }">>}, - {<<"reduce">>, <<"_foobar">>} - ]}} - ]}} - ]}), - ?_assertThrow( - {bad_request, invalid_design_doc, _}, - couch_db:update_doc(Db, Doc, [])). - -should_reject_non_object_options(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_non_object_options">>}, - {<<"options">>, <<"invalid">>} - ]}), - ?_assertThrow({bad_request, invalid_design_doc, _}, - couch_db:update_doc(Db, Doc, [])). - -should_reject_non_object_filters(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_non_object_filters">>}, - {<<"filters">>, <<"invalid">>} - ]}), - ?_assertThrow({bad_request, invalid_design_doc, _}, - couch_db:update_doc(Db, Doc, [])). - -should_accept_obj_in_filters(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_accept_obj_in_filters">>}, - {<<"filters">>, ?LIB} - ]}), - ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])). - -should_reject_non_object_lists(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_non_object_lists">>}, - {<<"lists">>, <<"invalid">>} - ]}), - ?_assertThrow({bad_request, invalid_design_doc, _}, - couch_db:update_doc(Db, Doc, [])). - -should_reject_non_object_shows(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_non_object_shows">>}, - {<<"shows">>, <<"invalid">>} - ]}), - ?_assertThrow({bad_request, invalid_design_doc, _}, - couch_db:update_doc(Db, Doc, [])). - -should_accept_obj_in_shows(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_accept_obj_in_shows">>}, - {<<"shows">>, ?LIB} - ]}), - ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])). - -should_reject_non_object_updates(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_non_object_updates">>}, - {<<"updates">>, <<"invalid">>} - ]}), - ?_assertThrow({bad_request, invalid_design_doc, _}, - couch_db:update_doc(Db, Doc, [])). - -should_accept_obj_in_updates(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_accept_obj_in_updates">>}, - {<<"updates">>, ?LIB} - ]}), - ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])). - -should_reject_non_object_views(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_non_object_views">>}, - {<<"views">>, <<"invalid">>} - ]}), - ?_assertThrow({bad_request, invalid_design_doc, _}, - couch_db:update_doc(Db, Doc, [])). - -should_reject_non_string_language(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_non_string_language">>}, - {<<"language">>, 1} - ]}), - ?_assertThrow({bad_request, invalid_design_doc, _}, - couch_db:update_doc(Db, Doc, [])). - -should_reject_non_string_validate_doc_update(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_non_string_vdu">>}, - {<<"validate_doc_update">>, 1} - ]}), - ?_assertThrow({bad_request, invalid_design_doc, _}, - couch_db:update_doc(Db, Doc, [])). - -should_accept_string_rewrites(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_non_array_rewrites">>}, - {<<"rewrites">>, <<"function(req){}">>} - ]}), - ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). - -should_reject_bad_rewrites(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_non_array_rewrites">>}, - {<<"rewrites">>, 42} - ]}), - ?_assertThrow({bad_request, invalid_design_doc, _}, - couch_db:update_doc(Db, Doc, [])). - -should_accept_option(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_accept_options">>}, - {<<"options">>, {[ {<<"option1">>, <<"function(doc,req){}">>} ]}} - ]}), - ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). - -should_accept_any_option(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_accept_any_option">>}, - {<<"options">>, {[ {<<"option1">>, true} ]}} - ]}), - ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). - -should_accept_filter(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_accept_filters">>}, - {<<"filters">>, {[ {<<"filter1">>, <<"function(doc,req){}">>} ]}} - ]}), - ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). - -should_reject_non_string_or_obj_filter_function(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_non_string_or_obj_filter_function">>}, - {<<"filters">>, {[ {<<"filter1">>, 1} ]}} - ]}), - ?_assertThrow({bad_request, invalid_design_doc, _}, - couch_db:update_doc(Db, Doc, [])). - -should_accept_list(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_accept_lists">>}, - {<<"lists">>, {[ {<<"list1">>, <<"function(doc,req){}">>} ]}} - ]}), - ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). - -should_reject_non_string_or_obj_list_function(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_non_string_or_obj_list_function">>}, - {<<"lists">>, {[ {<<"list1">>, 1} ]}} - ]}), - ?_assertThrow({bad_request, invalid_design_doc, _}, - couch_db:update_doc(Db, Doc, [])). - -should_accept_obj_in_lists(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_accept_obj_in_lists">>}, - {<<"lists">>, ?LIB} - ]}), - ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])). - - -should_accept_show(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_accept_shows">>}, - {<<"shows">>, {[ {<<"show1">>, <<"function(doc,req){}">>} ]}} - ]}), - ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). - -should_reject_non_string_or_obj_show_function(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_non_string_or_obj_show_function">>}, - {<<"shows">>, {[ {<<"show1">>, 1} ]}} - ]}), - ?_assertThrow({bad_request, invalid_design_doc, _}, - couch_db:update_doc(Db, Doc, [])). - -should_accept_update(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_accept_updates">>}, - {<<"updates">>, {[ {<<"update1">>, <<"function(doc,req){}">>} ]}} - ]}), - ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). - -should_reject_non_string_or_obj_update_function(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_non_string_or_obj_update_function">>}, - {<<"updates">>, {[ {<<"update1">>, 1} ]}} - ]}), - ?_assertThrow({bad_request, invalid_design_doc, _}, - couch_db:update_doc(Db, Doc, [])). - -should_accept_view(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_accept_view">>}, - {<<"views">>, {[ - {<<"view1">>, {[{<<"map">>, <<"function(d){}">>}]}} - ]}} - ]}), - ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). - -should_accept_view_with_reduce(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_accept_view_with_reduce">>}, - {<<"views">>, {[ - {<<"view1">>, {[ - {<<"map">>, <<"function(d){}">>}, - {<<"reduce">>,<<"function(d){}">>} - ]}} - ]}} - ]}), - ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). - -should_accept_view_with_lib(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_accept_view_with_lib">>}, - {<<"views">>, {[ - {<<"view1">>, {[ - {<<"map">>, <<"function(d){}">>} - ]}}, - {<<"lib">>, {[ - {<<"lib1">>, <<"x=42">>} - ]}} - ]}} - ]}), - ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). - -should_reject_view_that_is_not_an_object(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_non_object_view">>}, - {<<"views">>, {[{<<"view1">>, <<"thisisbad">>}]}} - ]}), - ?_assertThrow({bad_request, invalid_design_doc, _}, - couch_db:update_doc(Db, Doc, [])). - -should_reject_view_without_map_function(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_accept_view_without_map">>}, - {<<"views">>, {[ - {<<"view1">>, {[]}} - ]}} - ]}), - ?_assertThrow({bad_request, invalid_design_doc, _}, - couch_db:update_doc(Db, Doc, [])). - - -should_reject_view_with_non_string_map_function(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_view_with_nonstr_map">>}, - {<<"views">>, {[ - {<<"view1">>, {[ - {<<"map">>,{[]}} - ]}} - ]}} - ]}), - ?_assertThrow({bad_request, invalid_design_doc, _}, - couch_db:update_doc(Db, Doc, [])). - -should_reject_view_with_non_string_reduce_function(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_view_with_nonstr_reduce">>}, - {<<"views">>, {[ - {<<"view1">>, {[ - {<<"map">>,<<"function(d){}">>}, - {<<"reduce">>,1} - ]}} - ]}} - ]}), - ?_assertThrow({bad_request, invalid_design_doc, _}, - couch_db:update_doc(Db, Doc, [])). - -should_accept_any_in_lib(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_accept_any_in_lib">>}, - {<<"views">>, {[ - {<<"view1">>, {[ - {<<"map">>, <<"function(d){}">>} - ]}}, - {<<"lib">>, {[{<<"lib1">>, {[]}}]}} - ]}} - ]}), - ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). - - -should_accept_map_object_for_queries(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_accept_map_objects_for_queries">>}, - {<<"language">>, <<"query">>}, - {<<"views">>, {[ - {<<"view1">>, {[ - {<<"map">>, {[ - {<<"x">>, <<"y">>} - ]}} - ]}} - ]}} - ]}), - ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). - - -should_reject_map_non_objects_for_queries(Db) -> - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/should_reject_map_non_objects__with_nonstr_reduce">>}, - {<<"language">>, <<"query">>}, - {<<"views">>, {[ - {<<"view1">>, {[ - {<<"map">>, <<"function(d){}">>} - ]}} - ]}} - ]}), - ?_assertThrow({bad_request, invalid_design_doc, _}, - couch_db:update_doc(Db, Doc, [])). diff --git a/src/couch_mrview/test/couch_mrview_design_docs_tests.erl b/src/couch_mrview/test/couch_mrview_design_docs_tests.erl deleted file mode 100644 index aedd42865..000000000 --- a/src/couch_mrview/test/couch_mrview_design_docs_tests.erl +++ /dev/null @@ -1,136 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_mrview_design_docs_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 1000). - - - -setup() -> - {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), design), - Db. - -teardown(Db) -> - couch_db:close(Db), - couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), - ok. - - -design_docs_test_() -> - { - "_design_docs view tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_query/1, - fun should_query_with_range/1, - fun should_query_with_range_rev/1, - fun should_query_with_limit_and_skip/1, - fun should_query_with_include_docs/1 - ] - } - } - }. - - -should_query(Db) -> - Result = run_query(Db, []), - Expect = {ok, [ - {meta, [{total, 10}, {offset, 10}]}, - mk_row(<<"_design/bar01">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), - mk_row(<<"_design/bar02">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), - mk_row(<<"_design/bar03">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), - mk_row(<<"_design/bar04">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), - mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), - mk_row(<<"_design/bar06">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), - mk_row(<<"_design/bar07">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), - mk_row(<<"_design/bar08">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), - mk_row(<<"_design/bar09">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), - mk_row(<<"_design/bar10">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>) - ]}, - ?_assertEqual(Expect, Result). - -should_query_with_range(Db) -> - Result = run_query(Db, [ - {start_key, <<"_design/bar03">>}, - {end_key, <<"_design/bar05">>} - ]), - Expect = {ok, [ - {meta, [{total, 10}, {offset, 12}]}, - mk_row(<<"_design/bar03">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), - mk_row(<<"_design/bar04">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), - mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>) - ]}, - ?_assertEqual(Expect, Result). - -should_query_with_range_rev(Db) -> - Result = run_query(Db, [ - {direction, rev}, - {start_key, <<"_design/bar05">>}, {end_key, <<"_design/bar03">>}, - {inclusive_end, true} - ]), - Expect = {ok, [ - {meta, [{total, 10}, {offset, 5}]}, - mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), - mk_row(<<"_design/bar04">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), - mk_row(<<"_design/bar03">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>) - ]}, - ?_assertEqual(Expect, Result). - -should_query_with_limit_and_skip(Db) -> - Result = run_query(Db, [ - {start_key, <<"_design/bar02">>}, - {limit, 3}, - {skip, 3} - ]), - Expect = {ok, [ - {meta, [{total, 10}, {offset, 14}]}, - mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), - mk_row(<<"_design/bar06">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), - mk_row(<<"_design/bar07">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>) - ]}, - ?_assertEqual(Expect, Result). - -should_query_with_include_docs(Db) -> - Result = run_query(Db, [ - {start_key, <<"_design/bar08">>}, - {end_key, <<"_design/bar08">>}, - {include_docs, true} - ]), - Doc = {[ - {<<"_id">>,<<"_design/bar08">>}, - {<<"_rev">>,<<"1-0b24e44a44af45e51e562fd124ce3007">>}, - {<<"views">>,{[]}} - ]}, - Val = {[{rev, <<"1-0b24e44a44af45e51e562fd124ce3007">>}]}, - Expect = {ok, [ - {meta, [{total, 10}, {offset, 17}]}, - {row, [{id, <<"_design/bar08">>}, {key, <<"_design/bar08">>}, - {value, Val}, {doc, Doc}]} - ]}, - ?_assertEqual(Expect, Result). - - -mk_row(Id, Rev) -> - {row, [{id, Id}, {key, Id}, {value, {[{rev, Rev}]}}]}. - -run_query(Db, Opts0) -> - Opts = [{extra, [{namespace, <<"_design">>}]} | Opts0], - couch_mrview:query_all_docs(Db, Opts). diff --git a/src/couch_mrview/test/couch_mrview_http_tests.erl b/src/couch_mrview/test/couch_mrview_http_tests.erl deleted file mode 100644 index bd11c7ad8..000000000 --- a/src/couch_mrview/test/couch_mrview_http_tests.erl +++ /dev/null @@ -1,28 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_mrview_http_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch_mrview/include/couch_mrview.hrl"). - - -mrview_http_test_() -> - [ - ?_assertEqual(#mrargs{group_level=undefined, group=true}, - couch_mrview_http:parse_params([{"group", "true"}], - undefined, #mrargs{})), - - ?_assertEqual(#mrargs{group_level=1, group=undefined}, - couch_mrview_http:parse_params([{"group_level", "1"}], - undefined, #mrargs{})) - ]. diff --git a/src/couch_mrview/test/couch_mrview_index_changes_tests.erl b/src/couch_mrview/test/couch_mrview_index_changes_tests.erl deleted file mode 100644 index f0be1b9b1..000000000 --- a/src/couch_mrview/test/couch_mrview_index_changes_tests.erl +++ /dev/null @@ -1,224 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_mrview_index_changes_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - - -setup() -> - {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map), - Db. - -teardown(Db) -> - couch_db:close(Db), - couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), - ok. - -changes_index_test() -> - { - "changes index tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun test_normal_changes/1, - fun test_stream_once/1, - fun test_stream_once_since/1, - fun test_stream_once_timeout/1, - fun test_stream_once_heartbeat/1, - fun test_stream/1, - fun test_indexer/1 - ] - } - } - }. - - -test_normal_changes(Db) -> - Result = run_query(Db, []), - Expect = {ok, 11, [ - {{2, 1, <<"1">>}, 1}, - {{3, 10, <<"10">>}, 10}, - {{4, 2, <<"2">>}, 2}, - {{5, 3, <<"3">>}, 3}, - {{6, 4, <<"4">>}, 4}, - {{7, 5, <<"5">>}, 5}, - {{8, 6, <<"6">>}, 6}, - {{9, 7, <<"7">>}, 7}, - {{10, 8, <<"8">>}, 8}, - {{11, 9, <<"9">>}, 9} - ]}, - ?_assertEqual(Result, Expect). - -test_stream_once(Db) -> - Result = run_query(Db, [{stream, once}]), - Expect = {ok, 11, [ - {{2, 1, <<"1">>}, 1}, - {{3, 10, <<"10">>}, 10}, - {{4, 2, <<"2">>}, 2}, - {{5, 3, <<"3">>}, 3}, - {{6, 4, <<"4">>}, 4}, - {{7, 5, <<"5">>}, 5}, - {{8, 6, <<"6">>}, 6}, - {{9, 7, <<"7">>}, 7}, - {{10, 8, <<"8">>}, 8}, - {{11, 9, <<"9">>}, 9} - ]}, - ?_assertEqual(Result, Expect). - - -test_stream_once_since(Db) -> - Self = self(), - spawn(fun() -> - Result = run_query(Db, [{since, 11}, - {stream, once}]), - Self ! {result, Result} - end), - - spawn(fun() -> - timer:sleep(1000), - {ok, Db1} = save_doc(Db, 11), - couch_mrview:refresh(Db1, <<"_design/bar">>) - end), - - Expect = {ok,12,[{{12,11,<<"11">>},11}]}, - - receive - {result, Result} -> - ?_assertEqual(Result, Expect) - after 5000 -> - io:format("never got the change", []) - end. - - -test_stream_once_timeout(Db) -> - Self = self(), - spawn(fun() -> - Result = run_query(Db, [{since, 12}, - {stream, once}, - {timeout, 3000}]), - Self ! {result, Result} - end), - - - - Expect = {ok, 12, []}, - - receive - {result, Result} -> - ?_assertEqual(Result, Expect) - after 5000 -> - io:format("never got the change", []) - end. - -test_stream_once_heartbeat(Db) -> - Self = self(), - spawn(fun() -> - Result = run_query(Db, [{since, 12}, - {stream, once}, - {heartbeat, 1000}]), - Self ! {result, Result} - end), - - spawn(fun() -> - timer:sleep(3000), - {ok, Db1} = save_doc(Db, 12), - couch_mrview:refresh(Db1, <<"_design/bar">>) - end), - - Expect = {ok,13,[heartbeat, - heartbeat, - heartbeat, - {{13,12,<<"12">>},12}]}, - - - - receive - {result, Result} -> - ?_assertEqual(Result, Expect) - after 5000 -> - io:format("never got the change", []) - end. - - -test_stream(Db) -> - Self = self(), - spawn(fun() -> - Result = run_query(Db, [{since, 13}, - stream, - {timeout, 3000}]), - Self ! {result, Result} - end), - - spawn(fun() -> - timer:sleep(1000), - {ok, Db1} = save_doc(Db, 13), - couch_mrview:refresh(Db1, <<"_design/bar">>), - {ok, Db2} = save_doc(Db1, 14), - couch_mrview:refresh(Db2, <<"_design/bar">>) - end), - - Expect = {ok, 15,[{{14,13,<<"13">>},13}, - {{15,14,<<"14">>},14}]}, - - receive - {result, Result} -> - ?_assertEqual(Result, Expect) - after 5000 -> - io:format("never got the change", []) - end. - - -test_indexer(Db) -> - Result = run_query(Db, [{since, 14}, refresh]), - Expect = {ok, 15, [{{15,14,<<"14">>},14}]}, - - {ok, Db1} = save_doc(Db, 15), - timer:sleep(1500), - Result1 = run_query(Db1, [{since, 14}], false), - Expect1 = {ok, 16, [{{15,14,<<"14">>},14}, - {{16,15,<<"15">>},15}]}, - ?_assert(Result == Expect andalso Result1 == Expect1). - - -save_doc(Db, Id) -> - Doc = couch_mrview_test_util:doc(Id), - {ok, _Rev} = couch_db:update_doc(Db, Doc, []), - {ok, _} = couch_db:ensure_full_commit(Db), - couch_db:reopen(Db). - -run_query(Db, Opts) -> - run_query(Db, Opts, true). - -run_query(Db, Opts, Refresh) -> - Fun = fun - (stop, {LastSeq, Acc}) -> - {ok, LastSeq, Acc}; - (heartbeat, Acc) -> - {ok, [heartbeat | Acc]}; - (Event, Acc) -> - {ok, [Event | Acc]} - end, - case Refresh of - true -> - couch_mrview:refresh(Db, <<"_design/bar">>); - false -> - ok - end, - {ok, LastSeq, R} = couch_mrview_changes:handle_changes(Db, <<"_design/bar">>, - <<"baz">>, Fun, [], Opts), - {ok, LastSeq, lists:reverse(R)}. diff --git a/src/couch_mrview/test/couch_mrview_index_info_tests.erl b/src/couch_mrview/test/couch_mrview_index_info_tests.erl deleted file mode 100644 index efa03e7c0..000000000 --- a/src/couch_mrview/test/couch_mrview_index_info_tests.erl +++ /dev/null @@ -1,121 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_mrview_index_info_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 1000). - - -setup() -> - {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map), - couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>), - {ok, Info} = couch_mrview:get_info(Db, <<"_design/bar">>), - {Db, Info}. - - -teardown({Db, _}) -> - couch_db:close(Db), - couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), - ok. - - -view_info_test_() -> - { - "Views index tests", - { - setup, - fun test_util:start_couch/0, - fun test_util:stop_couch/1, - { - foreach, - fun setup/0, - fun teardown/1, - [ - fun sig_is_binary/1, - fun language_is_js/1, - fun file_size_is_non_neg_int/1, - fun active_size_is_non_neg_int/1, - fun external_size_is_non_neg_int/1, - fun disk_size_is_file_size/1, - fun data_size_is_external_size/1, - fun active_size_less_than_file_size/1, - fun update_seq_is_non_neg_int/1, - fun purge_seq_is_non_neg_int/1, - fun update_opts_is_bin_list/1 - ] - } - } - }. - - -sig_is_binary({_, Info}) -> - ?_assert(is_binary(prop(signature, Info))). - - -language_is_js({_, Info}) -> - ?_assertEqual(<<"javascript">>, prop(language, Info)). - - -file_size_is_non_neg_int({_, Info}) -> - ?_assert(check_non_neg_int([sizes, file], Info)). - - -active_size_is_non_neg_int({_, Info}) -> - ?_assert(check_non_neg_int([sizes, active], Info)). - - -external_size_is_non_neg_int({_, Info}) -> - ?_assert(check_non_neg_int([sizes, external], Info)). - - -disk_size_is_file_size({_, Info}) -> - ?_assertEqual(prop([sizes, file], Info), prop(disk_size, Info)). - - -data_size_is_external_size({_, Info}) -> - ?_assertEqual(prop([sizes, external], Info), prop(data_size, Info)). - - -active_size_less_than_file_size({_, Info}) -> - ?_assert(prop([sizes, active], Info) < prop([sizes, file], Info)). - - -update_seq_is_non_neg_int({_, Info}) -> - ?_assert(check_non_neg_int(update_seq, Info)). - - -purge_seq_is_non_neg_int({_, Info}) -> - ?_assert(check_non_neg_int(purge_seq, Info)). - - -update_opts_is_bin_list({_, Info}) -> - Opts = prop(update_options, Info), - ?_assert(is_list(Opts) andalso - (Opts == [] orelse lists:all([is_binary(B) || B <- Opts]))). - - -check_non_neg_int(Key, Info) -> - Size = prop(Key, Info), - is_integer(Size) andalso Size >= 0. - - -prop(Key, {Props}) when is_list(Props) -> - prop(Key, Props); -prop([Key], Info) -> - prop(Key, Info); -prop([Key | Rest], Info) -> - prop(Rest, prop(Key, Info)); -prop(Key, Info) when is_atom(Key), is_list(Info) -> - couch_util:get_value(Key, Info). diff --git a/src/couch_mrview/test/couch_mrview_local_docs_tests.erl b/src/couch_mrview/test/couch_mrview_local_docs_tests.erl deleted file mode 100644 index b0d25469a..000000000 --- a/src/couch_mrview/test/couch_mrview_local_docs_tests.erl +++ /dev/null @@ -1,148 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_mrview_local_docs_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 1000). - - - -setup() -> - {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), local), - Db. - -teardown(Db) -> - couch_db:close(Db), - couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), - ok. - - -all_docs_test_() -> - { - "_local_docs view tests", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_query/1, - fun should_query_with_range/1, - fun should_query_with_range_rev/1, - fun should_query_with_limit_and_skip/1, - fun should_query_with_include_docs/1, - fun should_query_with_update_seq/1 - ] - } - } - }. - - -should_query(Db) -> - Result = run_query(Db, []), - Expect = {ok, [ - {meta, [{total, null}, {offset, null}]}, - mk_row(1), - mk_row(10), - mk_row(2), - mk_row(3), - mk_row(4), - mk_row(5), - mk_row(6), - mk_row(7), - mk_row(8), - mk_row(9) - ]}, - ?_assertEqual(Expect, Result). - -should_query_with_range(Db) -> - Result = run_query(Db, [ - {start_key, <<"_local/3">>}, - {end_key, <<"_local/5">>} - ]), - Expect = {ok, [ - {meta, [{total, null}, {offset, null}]}, - mk_row(3), - mk_row(4), - mk_row(5) - ]}, - ?_assertEqual(Expect, Result). - -should_query_with_range_rev(Db) -> - Result = run_query(Db, [ - {direction, rev}, - {start_key, <<"_local/5">>}, {end_key, <<"_local/3">>}, - {inclusive_end, true} - ]), - Expect = {ok, [ - {meta, [{total, null}, {offset, null}]}, - mk_row(5), - mk_row(4), - mk_row(3) - ]}, - ?_assertEqual(Expect, Result). - -should_query_with_limit_and_skip(Db) -> - Result = run_query(Db, [ - {start_key, <<"_local/2">>}, - {limit, 3}, - {skip, 3} - ]), - Expect = {ok, [ - {meta, [{total, null}, {offset, null}]}, - mk_row(5), - mk_row(6), - mk_row(7) - ]}, - ?_assertEqual(Expect, Result). - -should_query_with_include_docs(Db) -> - Result = run_query(Db, [ - {start_key, <<"_local/8">>}, - {end_key, <<"_local/8">>}, - {include_docs, true} - ]), - {row, Doc0} = mk_row(8), - Doc = Doc0 ++ [{doc, {[ - {<<"_id">>, <<"_local/8">>}, - {<<"_rev">>, <<"0-1">>}, - {<<"val">>, 8} - ]}}], - Expect = {ok, [ - {meta, [{total, null}, {offset, null}]}, - {row, Doc} - ]}, - ?_assertEqual(Expect, Result). - -should_query_with_update_seq(Db) -> - Result = run_query(Db, [ - {start_key, <<"_local/2">>}, - {limit, 1}, - {update_seq, true} - ]), - Expect = {ok, [ - {meta, [{total, null}, {offset, null}, {update_seq, null}]}, - mk_row(2) - ]}, - ?_assertEqual(Expect, Result). - -mk_row(IntId) -> - Id = list_to_binary(io_lib:format("_local/~b", [IntId])), - {row, [{id, Id}, {key, Id}, {value, {[{rev, <<"0-1">>}]}}]}. - -run_query(Db, Opts0) -> - Opts = [{extra, [{namespace, <<"_local">>}]} | Opts0], - couch_mrview:query_all_docs(Db, Opts). diff --git a/src/couch_mrview/test/couch_mrview_map_views_tests.erl b/src/couch_mrview/test/couch_mrview_map_views_tests.erl deleted file mode 100644 index 805dc6c74..000000000 --- a/src/couch_mrview/test/couch_mrview_map_views_tests.erl +++ /dev/null @@ -1,144 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_mrview_map_views_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 1000). - - -setup() -> - {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map), - Db. - -teardown(Db) -> - couch_db:close(Db), - couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), - ok. - - -map_views_test_() -> - { - "Map views", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_map/1, - fun should_map_with_range/1, - fun should_map_with_limit_and_skip/1, - fun should_map_with_include_docs/1, - fun should_map_empty_views/1, - fun should_give_ext_size_seq_indexed_test/1 - ] - } - } - }. - - -should_map(Db) -> - Result = run_query(Db, []), - Expect = {ok, [ - {meta, [{total, 10}, {offset, 0}]}, - {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, - {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, - {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}, - {row, [{id, <<"6">>}, {key, 6}, {value, 6}]}, - {row, [{id, <<"7">>}, {key, 7}, {value, 7}]}, - {row, [{id, <<"8">>}, {key, 8}, {value, 8}]}, - {row, [{id, <<"9">>}, {key, 9}, {value, 9}]}, - {row, [{id, <<"10">>}, {key, 10}, {value, 10}]} - ]}, - ?_assertEqual(Expect, Result). - -should_map_with_range(Db) -> - Result = run_query(Db, [ - {direction, rev}, - {start_key, 5}, {end_key, 3}, - {inclusive_end, true} - ]), - Expect = {ok, [ - {meta, [{total, 10}, {offset, 5}]}, - {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]} - ]}, - ?_assertEqual(Expect, Result). - -should_map_with_limit_and_skip(Db) -> - Result = run_query(Db, [ - {start_key, 2}, - {limit, 3}, - {skip, 3} - ]), - Expect = {ok, [ - {meta, [{total, 10}, {offset, 4}]}, - {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}, - {row, [{id, <<"6">>}, {key, 6}, {value, 6}]}, - {row, [{id, <<"7">>}, {key, 7}, {value, 7}]} - ]}, - ?_assertEqual(Expect, Result). - -should_map_with_include_docs(Db) -> - Result = run_query(Db, [ - {start_key, 8}, - {end_key, 8}, - {include_docs, true} - ]), - Doc = {[ - {<<"_id">>,<<"8">>}, - {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>}, - {<<"val">>,8} - ]}, - Expect = {ok, [ - {meta, [{total, 10}, {offset, 7}]}, - {row, [{id, <<"8">>}, {key, 8}, {value, 8}, {doc, Doc}]} - ]}, - ?_assertEqual(Expect, Result). - -should_map_empty_views(Db) -> - Result = couch_mrview:query_view(Db, <<"_design/bar">>, <<"bing">>), - Expect = {ok, [ - {meta, [{total, 0}, {offset, 0}]} - ]}, - ?_assertEqual(Expect, Result). - -should_give_ext_size_seq_indexed_test(Db) -> - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/seqdoc">>}, - {<<"options">>, {[{<<"seq_indexed">>, true}]}}, - {<<"views">>, {[ - {<<"view1">>, {[ - {<<"map">>, <<"function(doc){emit(doc._id, doc._id);}">>} - ]}} - ]} - } - ]}), - {ok, _} = couch_db:update_doc(Db, DDoc, []), - {ok, Db1} = couch_db:open_int(couch_db:name(Db), []), - {ok, DDoc1} = couch_db:open_doc(Db1, <<"_design/seqdoc">>, [ejson_body]), - couch_mrview:query_view(Db1, DDoc1, <<"view1">>, [{update, true}]), - {ok, Info} = couch_mrview:get_info(Db1, DDoc), - Size = couch_util:get_nested_json_value({Info}, [sizes, external]), - ok = couch_db:close(Db1), - ?_assert(is_number(Size)). - - -run_query(Db, Opts) -> - couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, Opts). diff --git a/src/couch_mrview/test/couch_mrview_purge_docs_fabric_tests.erl b/src/couch_mrview/test/couch_mrview_purge_docs_fabric_tests.erl deleted file mode 100644 index 213acac0b..000000000 --- a/src/couch_mrview/test/couch_mrview_purge_docs_fabric_tests.erl +++ /dev/null @@ -1,276 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_mrview_purge_docs_fabric_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include_lib("mem3/include/mem3.hrl"). --include_lib("couch_mrview/include/couch_mrview.hrl"). - --define(TIMEOUT, 1000). - - -setup() -> - DbName = ?tempdb(), - ok = fabric:create_db(DbName, [?ADMIN_CTX, {q, 1}]), - meck:new(couch_mrview_index, [passthrough]), - meck:expect(couch_mrview_index, ensure_local_purge_docs, fun(A, B) -> - meck:passthrough([A, B]) - end), - DbName. - - -teardown(DbName) -> - meck:unload(), - ok = fabric:delete_db(DbName, [?ADMIN_CTX]). - - -view_purge_fabric_test_() -> - { - "Map views", - { - setup, - fun() -> test_util:start_couch([fabric, mem3]) end, - fun test_util:stop_couch/1, - { - foreach, - fun setup/0, - fun teardown/1, - [ - fun test_purge_verify_index/1, - fun test_purge_hook_before_compaction/1 - ] - } - } - }. - - -test_purge_verify_index(DbName) -> - ?_test(begin - Docs1 = couch_mrview_test_util:make_docs(normal, 5), - {ok, _} = fabric:update_docs(DbName, Docs1, [?ADMIN_CTX]), - {ok, _} = fabric:update_doc( - DbName, - couch_mrview_test_util:ddoc(map), - [?ADMIN_CTX] - ), - - Result1 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}), - Expect1 = {ok, [ - {meta, [{total, 5}, {offset, 0}]}, - {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, - {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, - {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} - ]}, - ?assertEqual(Expect1, Result1), - - {ok, #doc{body = {Props1}}} = get_local_purge_doc(DbName), - ?assertEqual(0, couch_util:get_value(<<"purge_seq">>, Props1)), - ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)], - [ShardDbName | _Rest ] = ShardNames, - ?assertEqual(true, couch_mrview_index:verify_index_exists( - ShardDbName, Props1)), - - purge_docs(DbName, [<<"1">>]), - - Result2 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}), - Expect2 = {ok, [ - {meta, [{total, 4}, {offset, 0}]}, - {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, - {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} - ]}, - ?assertEqual(Expect2, Result2), - - {ok, #doc{body = {Props2}}} = get_local_purge_doc(DbName), - ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props2)), - ?assertEqual(true, couch_mrview_index:verify_index_exists( - ShardDbName, Props2)) - end). - - -test_purge_hook_before_compaction(DbName) -> - ?_test(begin - Docs1 = couch_mrview_test_util:make_docs(normal, 5), - {ok, _} = fabric:update_docs(DbName, Docs1, [?ADMIN_CTX]), - {ok, _} = fabric:update_doc( - DbName, - couch_mrview_test_util:ddoc(map), - [?ADMIN_CTX] - ), - - Result1 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}), - Expect1 = {ok, [ - {meta, [{total, 5}, {offset, 0}]}, - {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, - {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, - {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} - ]}, - ?assertEqual(Expect1, Result1), - - purge_docs(DbName, [<<"1">>]), - - Result2 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}), - Expect2 = {ok, [ - {meta, [{total, 4}, {offset, 0}]}, - {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, - {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} - ]}, - ?assertEqual(Expect2, Result2), - - {ok, #doc{body = {Props1}}} = get_local_purge_doc(DbName), - ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props1)), - - [ShardName | _] = local_shards(DbName), - couch_util:with_db(ShardName, fun(Db) -> - {ok, _} = couch_db:start_compact(Db) - end), - wait_compaction(ShardName, ?LINE), - - ?assertEqual(ok, meck:wait(1, couch_mrview_index, - ensure_local_purge_docs, '_', 5000) - ), - - % Make sure compaction didn't change the update seq - {ok, #doc{body = {Props1}}} = get_local_purge_doc(DbName), - ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props1)), - - purge_docs(DbName, [<<"2">>]), - - couch_util:with_db(ShardName, fun(Db) -> - {ok, _} = couch_db:start_compact(Db) - end), - wait_compaction(ShardName, ?LINE), - - ?assertEqual(ok, meck:wait(2, couch_mrview_index, - ensure_local_purge_docs, '_', 5000) - ), - - % Make sure compaction after a purge didn't overwrite - % the local purge doc for the index - {ok, #doc{body = {Props2}}} = get_local_purge_doc(DbName), - ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props2)), - - % Force another update to ensure that we update - % the local doc appropriate after compaction - Result3 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}), - Expect3 = {ok, [ - {meta, [{total, 3}, {offset, 0}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, - {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} - ]}, - ?assertEqual(Expect3, Result3), - - {ok, #doc{body = {Props3}}} = get_local_purge_doc(DbName), - ?assertEqual(2, couch_util:get_value(<<"purge_seq">>, Props3)), - - % Check that if the local doc doesn't exist that one - % is created for the index on compaction - delete_local_purge_doc(DbName), - ?assertMatch({not_found, _}, get_local_purge_doc(DbName)), - - couch_util:with_db(ShardName, fun(Db) -> - {ok, _} = couch_db:start_compact(Db) - end), - wait_compaction(ShardName, ?LINE), - - ?assertEqual(ok, meck:wait(3, couch_mrview_index, - ensure_local_purge_docs, '_', 5000) - ), - - {ok, #doc{body = {Props4}}} = get_local_purge_doc(DbName), - ?assertEqual(2, couch_util:get_value(<<"purge_seq">>, Props4)) - end). - - -get_local_purge_doc(DbName) -> - {ok, DDoc} = fabric:open_doc(DbName, <<"_design/bar">>, []), - {ok, IdxState} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc), - Sig = IdxState#mrst.sig, - HexSig = list_to_binary(couch_index_util:hexsig(Sig)), - DocId = couch_mrview_util:get_local_purge_doc_id(HexSig), - [ShardName | _] = local_shards(DbName), - couch_util:with_db(ShardName, fun(Db) -> - couch_db:open_doc(Db, DocId, []) - end). - - -delete_local_purge_doc(DbName) -> - {ok, DDoc} = fabric:open_doc(DbName, <<"_design/bar">>, []), - {ok, IdxState} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc), - Sig = IdxState#mrst.sig, - HexSig = list_to_binary(couch_index_util:hexsig(Sig)), - DocId = couch_mrview_util:get_local_purge_doc_id(HexSig), - NewDoc = #doc{id = DocId, deleted = true}, - [ShardName | _] = local_shards(DbName), - couch_util:with_db(ShardName, fun(Db) -> - {ok, _} = couch_db:update_doc(Db, NewDoc, []) - end). - - -get_rev(#full_doc_info{} = FDI) -> - #doc_info{ - revs = [#rev_info{} = PrevRev | _] - } = couch_doc:to_doc_info(FDI), - PrevRev#rev_info.rev. - - -purge_docs(DbName, DocIds) -> - lists:foreach(fun(DocId) -> - FDI = fabric:get_full_doc_info(DbName, DocId, []), - Rev = get_rev(FDI), - {ok, [{ok, _}]} = fabric:purge_docs(DbName, [{DocId, [Rev]}], []) - end, DocIds). - - -wait_compaction(DbName, Line) -> - WaitFun = fun() -> - case is_compaction_running(DbName) of - true -> wait; - false -> ok - end - end, - case test_util:wait(WaitFun, 10000) of - timeout -> - erlang:error({assertion_failed, [ - {module, ?MODULE}, - {line, Line}, - {reason, "Timeout waiting for database compaction"} - ]}); - _ -> - ok - end. - - -is_compaction_running(DbName) -> - {ok, DbInfo} = couch_util:with_db(DbName, fun(Db) -> - couch_db:get_db_info(Db) - end), - couch_util:get_value(compact_running, DbInfo). - - -local_shards(DbName) -> - try - [ShardName || #shard{name = ShardName} <- mem3:local_shards(DbName)] - catch - error:database_does_not_exist -> - [] - end. diff --git a/src/couch_mrview/test/couch_mrview_purge_docs_tests.erl b/src/couch_mrview/test/couch_mrview_purge_docs_tests.erl deleted file mode 100644 index 1020607a4..000000000 --- a/src/couch_mrview/test/couch_mrview_purge_docs_tests.erl +++ /dev/null @@ -1,575 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_mrview_purge_docs_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include_lib("couch_mrview/include/couch_mrview.hrl"). - --define(TIMEOUT, 1000). - - -setup() -> - meck:new(couch_index_updater, [passthrough]), - {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map, 5), - Db. - -teardown(Db) -> - couch_db:close(Db), - couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), - meck:unload(), - ok. - -view_purge_test_() -> - { - "Map views", - { - setup, - fun test_util:start_couch/0, - fun test_util:stop_couch/1, - { - foreach, - fun setup/0, - fun teardown/1, - [ - fun test_purge_single/1, - fun test_purge_partial/1, - fun test_purge_complete/1, - fun test_purge_nochange/1, - fun test_purge_index_reset/1, - fun test_purge_compact_size_check/1, - fun test_purge_single_for_docid_with_list/1, - fun test_purge_complete_for_docid_with_list/1, - fun test_purge_compact_for_stale_purge_cp_without_client/1, - fun test_purge_compact_for_stale_purge_cp_with_client/1 - ] - } - } - }. - - -test_purge_single(Db) -> - ?_test(begin - Result = run_query(Db, []), - Expect = {ok, [ - {meta, [{total, 5}, {offset, 0}]}, - {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, - {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, - {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} - ]}, - ?assertEqual(Expect, Result), - - FDI = couch_db:get_full_doc_info(Db, <<"1">>), - Rev = get_rev(FDI), - {ok, [{ok, _PRevs}]} = couch_db:purge_docs( - Db, - [{<<"UUID1">>, <<"1">>, [Rev]}] - ), - {ok, Db2} = couch_db:reopen(Db), - - Result2 = run_query(Db2, []), - Expect2 = {ok, [ - {meta, [{total, 4}, {offset, 0}]}, - {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, - {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} - ]}, - ?assertEqual(Expect2, Result2) - end). - - -test_purge_single_for_docid_with_list(Db) -> - ?_test(begin - Result = run_query(Db, []), - Expect = {ok, [ - {meta, [{total, 5}, {offset, 0}]}, - {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, - {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, - {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} - ]}, - ?assertEqual(Expect, Result), - - FDI = couch_db:get_full_doc_info(Db, <<"1">>), - Rev = get_rev(FDI), - {ok, [{ok, _PRevs}]} = couch_db:purge_docs( - Db, - [{<<"UUID1">>, "1", [Rev]}] - ), - {ok, Db2} = couch_db:reopen(Db), - - Result2 = run_query(Db2, []), - Expect2 = {ok, [ - {meta, [{total, 4}, {offset, 0}]}, - {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, - {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} - ]}, - ?assertEqual(Expect2, Result2) - end). - -test_purge_partial(Db) -> - ?_test(begin - Result = run_query(Db, []), - Expect = {ok, [ - {meta, [{total, 5}, {offset, 0}]}, - {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, - {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, - {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} - ]}, - ?assertEqual(Expect, Result), - - FDI1 = couch_db:get_full_doc_info(Db, <<"1">>), Rev1 = get_rev(FDI1), - Update = {[ - {'_id', <<"1">>}, - {'_rev', couch_doc:rev_to_str({1, [crypto:hash(md5, <<"1.2">>)]})}, - {'val', 1.2} - ]}, - {ok, [_Rev2]} = save_docs(Db, [Update], [replicated_changes]), - - PurgeInfos = [{<<"UUID1">>, <<"1">>, [Rev1]}], - - {ok, _} = couch_db:purge_docs(Db, PurgeInfos), - {ok, Db2} = couch_db:reopen(Db), - - Result2 = run_query(Db2, []), - Expect2 = {ok, [ - {meta, [{total, 5}, {offset, 0}]}, - {row, [{id, <<"1">>}, {key, 1.2}, {value, 1.2}]}, - {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, - {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} - ]}, - ?assertEqual(Expect2, Result2) - end). - - -test_purge_complete(Db) -> - ?_test(begin - Result = run_query(Db, []), - Expect = {ok, [ - {meta, [{total, 5}, {offset, 0}]}, - {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, - {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, - {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} - ]}, - ?assertEqual(Expect, Result), - - FDI1 = couch_db:get_full_doc_info(Db, <<"1">>), Rev1 = get_rev(FDI1), - FDI2 = couch_db:get_full_doc_info(Db, <<"2">>), Rev2 = get_rev(FDI2), - FDI5 = couch_db:get_full_doc_info(Db, <<"5">>), Rev5 = get_rev(FDI5), - - PurgeInfos = [ - {<<"UUID1">>, <<"1">>, [Rev1]}, - {<<"UUID2">>, <<"2">>, [Rev2]}, - {<<"UUID5">>, <<"5">>, [Rev5]} - ], - {ok, _} = couch_db:purge_docs(Db, PurgeInfos), - {ok, Db2} = couch_db:reopen(Db), - - Result2 = run_query(Db2, []), - Expect2 = {ok, [ - {meta, [{total, 2}, {offset, 0}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]} - ]}, - ?assertEqual(Expect2, Result2) - end). - - -test_purge_complete_for_docid_with_list(Db) -> - ?_test(begin - Result = run_query(Db, []), - Expect = {ok, [ - {meta, [{total, 5}, {offset, 0}]}, - {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, - {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, - {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} - ]}, - ?assertEqual(Expect, Result), - - FDI1 = couch_db:get_full_doc_info(Db, <<"1">>), Rev1 = get_rev(FDI1), - FDI2 = couch_db:get_full_doc_info(Db, <<"2">>), Rev2 = get_rev(FDI2), - FDI5 = couch_db:get_full_doc_info(Db, <<"5">>), Rev5 = get_rev(FDI5), - - PurgeInfos = [ - {<<"UUID1">>, "1", [Rev1]}, - {<<"UUID2">>, "2", [Rev2]}, - {<<"UUID5">>, "5", [Rev5]} - ], - {ok, _} = couch_db:purge_docs(Db, PurgeInfos), - {ok, Db2} = couch_db:reopen(Db), - - Result2 = run_query(Db2, []), - Expect2 = {ok, [ - {meta, [{total, 2}, {offset, 0}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]} - ]}, - ?assertEqual(Expect2, Result2) - end). - - -test_purge_nochange(Db) -> - ?_test(begin - Result = run_query(Db, []), - Expect = {ok, [ - {meta, [{total, 5}, {offset, 0}]}, - {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, - {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, - {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} - ]}, - ?assertEqual(Expect, Result), - - FDI1 = couch_db:get_full_doc_info(Db, <<"1">>), - Rev1 = get_rev(FDI1), - - PurgeInfos = [ - {<<"UUID1">>, <<"6">>, [Rev1]} - ], - {ok, _} = couch_db:purge_docs(Db, PurgeInfos), - {ok, Db2} = couch_db:reopen(Db), - - Result2 = run_query(Db2, []), - Expect2 = {ok, [ - {meta, [{total, 5}, {offset, 0}]}, - {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, - {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, - {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} - ]}, - ?assertEqual(Expect2, Result2) - end). - - -test_purge_index_reset(Db) -> - ?_test(begin - ok = couch_db:set_purge_infos_limit(Db, 2), - {ok, Db1} = couch_db:reopen(Db), - - Result = run_query(Db1, []), - Expect = {ok, [ - {meta, [{total, 5}, {offset, 0}]}, - {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, - {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, - {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, - {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, - {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} - ]}, - ?assertEqual(Expect, Result), - - PurgeInfos = lists:map(fun(I) -> - DocId = list_to_binary(integer_to_list(I)), - FDI = couch_db:get_full_doc_info(Db, DocId), - Rev = get_rev(FDI), - {couch_uuids:random(), DocId, [Rev]} - end, lists:seq(1, 5)), - {ok, _} = couch_db:purge_docs(Db1, PurgeInfos), - - {ok, Db2} = couch_db:reopen(Db1), - - % Forcibly set the purge doc to a newer purge - % sequence to force an index reset. This should - % never happen in real life but the reset - % is required for correctness. - {ok, #doc{body = {OldProps}} = LocalDoc} = get_local_purge_doc(Db2), - NewPurgeSeq = {<<"purge_seq">>, 5}, - NewProps = lists:keyreplace(<<"purge_seq">>, 1, OldProps, NewPurgeSeq), - RewindDoc = LocalDoc#doc{body = {NewProps}}, - {ok, _} = couch_db:update_doc(Db2, RewindDoc, []), - - % Compact the database to remove purge infos - {ok, _} = couch_db:start_compact(Db2), - wait_compaction(couch_db:name(Db), "database", ?LINE), - - {ok, Db3} = couch_db:reopen(Db2), - Result2 = run_query(Db3, []), - Expect2 = {ok, [ - {meta, [{total, 0}, {offset, 0}]} - ]}, - ?assertEqual(Expect2, Result2), - - % Assert that we had a reset - meck:wait( - 1, - couch_index_updater, - handle_info, - [{'EXIT', '_', {reset, '_'}}, '_'], - 5000 - ) - end). - - -test_purge_compact_size_check(Db) -> - ?_test(begin - DbName = couch_db:name(Db), - Docs = couch_mrview_test_util:make_docs(normal, 6, 200), - {ok, Db1} = couch_mrview_test_util:save_docs(Db, Docs), - _Result = run_query(Db1, []), - DiskSizeBefore = db_disk_size(DbName), - - PurgedDocsNum = 150, - IdsRevs = lists:foldl(fun(Id, CIdRevs) -> - Id1 = docid(Id), - FDI1 = couch_db:get_full_doc_info(Db1, Id1), - Rev1 = get_rev(FDI1), - UUID1 = uuid(Id), - [{UUID1, Id1, [Rev1]} | CIdRevs] - end, [], lists:seq(1, PurgedDocsNum)), - {ok, _} = couch_db:purge_docs(Db1, IdsRevs), - - {ok, Db2} = couch_db:reopen(Db1), - _Result1 = run_query(Db2, []), - {ok, PurgedIdRevs} = couch_db:fold_purge_infos( - Db2, - 0, - fun fold_fun/2, - [], - [] - ), - ?assertEqual(PurgedDocsNum, length(PurgedIdRevs)), - config:set("couchdb", "file_compression", "snappy", false), - - {ok, Db3} = couch_db:open_int(DbName, []), - {ok, _CompactPid} = couch_db:start_compact(Db3), - wait_compaction(DbName, "database", ?LINE), - ok = couch_db:close(Db3), - DiskSizeAfter = db_disk_size(DbName), - ?assert(DiskSizeBefore > DiskSizeAfter) - end). - - -test_purge_compact_for_stale_purge_cp_without_client(Db) -> - ?_test(begin - DbName = couch_db:name(Db), - % add more documents to database for purge - Docs = couch_mrview_test_util:make_docs(normal, 6, 200), - {ok, Db1} = couch_mrview_test_util:save_docs(Db, Docs), - - % change PurgedDocsLimit to 10 from 1000 to - % avoid timeout of eunit test - PurgedDocsLimit = 10, - couch_db:set_purge_infos_limit(Db1, PurgedDocsLimit), - - % purge 150 documents - PurgedDocsNum = 150, - PurgeInfos = lists:foldl(fun(Id, CIdRevs) -> - Id1 = docid(Id), - FDI1 = couch_db:get_full_doc_info(Db1, Id1), - Rev1 = get_rev(FDI1), - UUID1 = uuid(Id), - [{UUID1, Id1, [Rev1]} | CIdRevs] - end, [], lists:seq(1, PurgedDocsNum)), - {ok, _} = couch_db:purge_docs(Db1, PurgeInfos), - - {ok, Db2} = couch_db:reopen(Db1), - {ok, PurgedIdRevs} = couch_db:fold_purge_infos( - Db2, - 0, - fun fold_fun/2, - [], - [] - ), - ?assertEqual(PurgedDocsNum, length(PurgedIdRevs)), - - % run compaction to trigger pruning of purge tree - {ok, Db3} = couch_db:open_int(DbName, []), - {ok, _CompactPid} = couch_db:start_compact(Db3), - wait_compaction(DbName, "database", ?LINE), - ok = couch_db:close(Db3), - - % check the remaining purge requests in purge tree - {ok, Db4} = couch_db:reopen(Db3), - OldestPSeq = couch_db:get_oldest_purge_seq(Db4), - {ok, PurgedIdRevs2} = couch_db:fold_purge_infos( - Db4, - OldestPSeq - 1, - fun fold_fun/2, - [], - [] - ), - ?assertEqual(PurgedDocsLimit, length(PurgedIdRevs2)) - end). - - -test_purge_compact_for_stale_purge_cp_with_client(Db) -> - ?_test(begin - DbName = couch_db:name(Db), - % add more documents to database for purge - Docs = couch_mrview_test_util:make_docs(normal, 6, 200), - {ok, Db1} = couch_mrview_test_util:save_docs(Db, Docs), - - % change PurgedDocsLimit to 10 from 1000 to - % avoid timeout of eunit test - PurgedDocsLimit = 10, - couch_db:set_purge_infos_limit(Db1, PurgedDocsLimit), - _Result = run_query(Db1, []), - - % first purge 30 documents - PurgedDocsNum1 = 30, - IdsRevs = lists:foldl(fun(Id, CIdRevs) -> - Id1 = docid(Id), - FDI1 = couch_db:get_full_doc_info(Db1, Id1), - Rev1 = get_rev(FDI1), - UUID1 = uuid(Id), - [{UUID1, Id1, [Rev1]} | CIdRevs] - end, [], lists:seq(1, PurgedDocsNum1)), - {ok, _} = couch_db:purge_docs(Db1, IdsRevs), - - {ok, Db2} = couch_db:reopen(Db1), - % run query again to reflect purge request to mrview - _Result1 = run_query(Db2, []), - {ok, PurgedIdRevs} = couch_db:fold_purge_infos( - Db2, - 0, - fun fold_fun/2, - [], - [] - ), - ?assertEqual(PurgedDocsNum1, length(PurgedIdRevs)), - - % then purge 120 documents - PurgedDocsNum2 = 150, - IdsRevs2 = lists:foldl(fun(Id, CIdRevs) -> - Id1 = docid(Id), - FDI1 = couch_db:get_full_doc_info(Db1, Id1), - Rev1 = get_rev(FDI1), - UUID1 = uuid(Id), - [{UUID1, Id1, [Rev1]} | CIdRevs] - end, [], lists:seq(PurgedDocsNum1 + 1, PurgedDocsNum2)), - {ok, _} = couch_db:purge_docs(Db2, IdsRevs2), - - % run compaction to trigger pruning of purge tree - % only the first 30 purge requests are pruned - {ok, Db3} = couch_db:open_int(DbName, []), - {ok, _CompactPid} = couch_db:start_compact(Db3), - wait_compaction(DbName, "database", ?LINE), - ok = couch_db:close(Db3), - - % check the remaining purge requests in purge tree - {ok, Db4} = couch_db:reopen(Db3), - OldestPSeq = couch_db:get_oldest_purge_seq(Db4), - {ok, PurgedIdRevs2} = couch_db:fold_purge_infos( - Db4, - OldestPSeq - 1, - fun fold_fun/2, - [], - [] - ), - ?assertEqual(PurgedDocsNum2 - PurgedDocsNum1, length(PurgedIdRevs2)) - end). - - -get_local_purge_doc(Db) -> - {ok, DDoc} = couch_db:open_doc(Db, <<"_design/bar">>, []), - {ok, IdxState} = couch_mrview_util:ddoc_to_mrst(couch_db:name(Db), DDoc), - Sig = IdxState#mrst.sig, - HexSig = list_to_binary(couch_index_util:hexsig(Sig)), - DocId = couch_mrview_util:get_local_purge_doc_id(HexSig), - couch_db:open_doc(Db, DocId, []). - - -run_query(Db, Opts) -> - couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, Opts). - - -save_docs(Db, JsonDocs, Options) -> - Docs = lists:map(fun(JDoc) -> - couch_doc:from_json_obj(?JSON_DECODE(?JSON_ENCODE(JDoc))) - end, JsonDocs), - Opts = [full_commit | Options], - case lists:member(replicated_changes, Options) of - true -> - {ok, []} = couch_db:update_docs( - Db, Docs, Opts, replicated_changes), - {ok, lists:map(fun(Doc) -> - {Pos, [RevId | _]} = Doc#doc.revs, - {Pos, RevId} - end, Docs)}; - false -> - {ok, Resp} = couch_db:update_docs(Db, Docs, Opts), - {ok, [Rev || {ok, Rev} <- Resp]} - end. - - -get_rev(#full_doc_info{} = FDI) -> - #doc_info{ - revs = [#rev_info{} = PrevRev | _] - } = couch_doc:to_doc_info(FDI), - PrevRev#rev_info.rev. - - -db_disk_size(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - {ok, Info} = couch_db:get_db_info(Db), - ok = couch_db:close(Db), - active_size(Info). - - -active_size(Info) -> - couch_util:get_nested_json_value({Info}, [sizes, active]). - - -wait_compaction(DbName, Kind, Line) -> - WaitFun = fun() -> - case is_compaction_running(DbName) of - true -> wait; - false -> ok - end - end, - case test_util:wait(WaitFun, 10000) of - timeout -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, Line}, - {reason, "Timeout waiting for " - ++ Kind - ++ " database compaction"}]}); - _ -> - ok - end. - - -is_compaction_running(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - {ok, DbInfo} = couch_db:get_db_info(Db), - couch_db:close(Db), - couch_util:get_value(compact_running, DbInfo). - - -fold_fun({_PSeq, _UUID, Id, Revs}, Acc) -> - {ok, [{Id, Revs} | Acc]}. - - -docid(I) -> - list_to_binary(integer_to_list(I)). - - -uuid(I) -> - Str = io_lib:format("UUID~4..0b", [I]), - iolist_to_binary(Str). diff --git a/src/couch_mrview/test/couch_mrview_red_views_tests.erl b/src/couch_mrview/test/couch_mrview_red_views_tests.erl deleted file mode 100644 index b83686113..000000000 --- a/src/couch_mrview/test/couch_mrview_red_views_tests.erl +++ /dev/null @@ -1,95 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_mrview_red_views_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 1000). - - -setup() -> - {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), red), - Db. - -teardown(Db) -> - couch_db:close(Db), - couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), - ok. - - -reduce_views_test_() -> - { - "Reduce views", - { - setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_reduce_basic/1, - fun should_reduce_key_range/1, - fun should_reduce_with_group_level/1, - fun should_reduce_with_group_exact/1 - ] - } - } - }. - - -should_reduce_basic(Db) -> - Result = run_query(Db, []), - Expect = {ok, [ - {meta, []}, - {row, [{key, null}, {value, 55}]} - ]}, - ?_assertEqual(Expect, Result). - -should_reduce_key_range(Db) -> - Result = run_query(Db, [{start_key, [0, 2]}, {end_key, [0, 4]}]), - Expect = {ok, [ - {meta, []}, - {row, [{key, null}, {value, 6}]} - ]}, - ?_assertEqual(Expect, Result). - -should_reduce_with_group_level(Db) -> - Result = run_query(Db, [{group_level, 1}]), - Expect = {ok, [ - {meta, []}, - {row, [{key, [0]}, {value, 30}]}, - {row, [{key, [1]}, {value, 25}]} - ]}, - ?_assertEqual(Expect, Result). - -should_reduce_with_group_exact(Db) -> - Result = run_query(Db, [{group_level, exact}]), - Expect = {ok, [ - {meta, []}, - {row, [{key, [0, 2]}, {value, 2}]}, - {row, [{key, [0, 4]}, {value, 4}]}, - {row, [{key, [0, 6]}, {value, 6}]}, - {row, [{key, [0, 8]}, {value, 8}]}, - {row, [{key, [0, 10]}, {value, 10}]}, - {row, [{key, [1, 1]}, {value, 1}]}, - {row, [{key, [1, 3]}, {value, 3}]}, - {row, [{key, [1, 5]}, {value, 5}]}, - {row, [{key, [1, 7]}, {value, 7}]}, - {row, [{key, [1, 9]}, {value, 9}]} - ]}, - ?_assertEqual(Expect, Result). - - -run_query(Db, Opts) -> - couch_mrview:query_view(Db, <<"_design/red">>, <<"baz">>, Opts). diff --git a/src/couch_mrview/test/couch_mrview_util_tests.erl b/src/couch_mrview/test/couch_mrview_util_tests.erl deleted file mode 100644 index 7046c9bb2..000000000 --- a/src/couch_mrview/test/couch_mrview_util_tests.erl +++ /dev/null @@ -1,39 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_mrview_util_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch_mrview/include/couch_mrview.hrl"). - - - -couch_mrview_util_test_() -> - [ - ?_assertEqual(0, validate_group_level(undefined, undefined)), - ?_assertEqual(exact, validate_group_level(true, undefined)), - ?_assertEqual(0, validate_group_level(false, undefined)), - ?_assertEqual(1, validate_group_level(undefined, 1)), - ?_assertEqual(0, validate_group_level(true, 0)), - ?_assertEqual(0, validate_group_level(undefined, 0)), - ?_assertEqual(1, validate_group_level(true, 1)), - ?_assertEqual(0, validate_group_level(false, 0)), - ?_assertThrow({query_parse_error, - <<"Can't specify group=false and group_level>0 at the same time">>}, - validate_group_level(false,1)) - ]. - -validate_group_level(Group, GroupLevel) -> - Args0 = #mrargs{group=Group, group_level=GroupLevel, view_type=red}, - Args1 = couch_mrview_util:validate_args(Args0), - Args1#mrargs.group_level. - diff --git a/src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl new file mode 100644 index 000000000..bf8eb7e5b --- /dev/null +++ b/src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl @@ -0,0 +1,140 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_mrview_all_docs_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 1000). + + + +setup() -> + {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map), + Db. + +teardown(Db) -> + couch_db:close(Db), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), + ok. + + +all_docs_test_() -> + { + "_all_docs view tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_query/1, + fun should_query_with_range/1, + fun should_query_with_range_rev/1, + fun should_query_with_limit_and_skip/1, + fun should_query_with_include_docs/1, + fun should_query_empty_views/1 + ] + } + } + }. + + +should_query(Db) -> + Result = run_query(Db, []), + Expect = {ok, [ + {meta, [{total, 11}, {offset, 0}]}, + mk_row(<<"1">>, <<"1-08d53a5760b95fce6df2e2c5b008be39">>), + mk_row(<<"10">>, <<"1-a05b6ea2bc0243949f103d5b4f15f71e">>), + mk_row(<<"2">>, <<"1-b57c77a9e6f7574ca6469f0d6dcd78bb">>), + mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>), + mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>), + mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>), + mk_row(<<"6">>, <<"1-aca21c2e7bc5f8951424fcfc5d1209d8">>), + mk_row(<<"7">>, <<"1-4374aeec17590d82f16e70f318116ad9">>), + mk_row(<<"8">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>), + mk_row(<<"9">>, <<"1-558c8487d9aee25399a91b5d31d90fe2">>), + mk_row(<<"_design/bar">>, <<"1-a44e1dd1994a7717bf89c894ebd1f081">>) + ]}, + ?_assertEqual(Expect, Result). + +should_query_with_range(Db) -> + Result = run_query(Db, [{start_key, <<"3">>}, {end_key, <<"5">>}]), + Expect = {ok, [ + {meta, [{total, 11}, {offset, 3}]}, + mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>), + mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>), + mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>) + ]}, + ?_assertEqual(Expect, Result). + +should_query_with_range_rev(Db) -> + Result = run_query(Db, [ + {direction, rev}, + {start_key, <<"5">>}, {end_key, <<"3">>}, + {inclusive_end, true} + ]), + Expect = {ok, [ + {meta, [{total, 11}, {offset, 5}]}, + mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>), + mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>), + mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>) + ]}, + ?_assertEqual(Expect, Result). + +should_query_with_limit_and_skip(Db) -> + Result = run_query(Db, [ + {start_key, <<"2">>}, + {limit, 3}, + {skip, 3} + ]), + Expect = {ok, [ + {meta, [{total, 11}, {offset, 5}]}, + mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>), + mk_row(<<"6">>, <<"1-aca21c2e7bc5f8951424fcfc5d1209d8">>), + mk_row(<<"7">>, <<"1-4374aeec17590d82f16e70f318116ad9">>) + ]}, + ?_assertEqual(Expect, Result). + +should_query_with_include_docs(Db) -> + Result = run_query(Db, [ + {start_key, <<"8">>}, + {end_key, <<"8">>}, + {include_docs, true} + ]), + Doc = {[ + {<<"_id">>,<<"8">>}, + {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>}, + {<<"val">>, 8} + ]}, + Val = {[{rev, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>}]}, + Expect = {ok, [ + {meta, [{total, 11}, {offset, 8}]}, + {row, [{id, <<"8">>}, {key, <<"8">>}, {value, Val}, {doc, Doc}]} + ]}, + ?_assertEqual(Expect, Result). + +should_query_empty_views(Db) -> + Result = couch_mrview:query_view(Db, <<"_design/bar">>, <<"bing">>), + Expect = {ok, [ + {meta, [{total, 0}, {offset, 0}]} + ]}, + ?_assertEqual(Expect, Result). + + +mk_row(Id, Rev) -> + {row, [{id, Id}, {key, Id}, {value, {[{rev, Rev}]}}]}. + +run_query(Db, Opts) -> + couch_mrview:query_all_docs(Db, Opts). diff --git a/src/couch_mrview/test/eunit/couch_mrview_changes_since_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_changes_since_tests.erl new file mode 100644 index 000000000..d670e109b --- /dev/null +++ b/src/couch_mrview/test/eunit/couch_mrview_changes_since_tests.erl @@ -0,0 +1,210 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_mrview_changes_since_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 1000). + +teardown(Db) -> + couch_db:close(Db), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), + ok. + +changes_since_basic_test_() -> + { + "changes_since tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun() -> + Type = {changes, seq_indexed}, + {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), Type), + Db + end, + fun teardown/1, + [ + fun test_basic/1, + fun test_basic_since/1, + fun test_basic_count/1, + fun test_basic_count_since/1, + fun test_compact/1 + ] + } + } + }. + +changes_since_range_test_() -> + { + "changes_since_range tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun() -> + Type = {changes, keyseq_indexed}, + {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), Type), + Db + end, + fun teardown/1, + [ + fun test_range/1, + fun test_range_since/1 + ] + } + } + }. + +changes_since_range_count_test_() -> + { + "changes_since_range_count tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun() -> + Type = {changes, seq_indexed_keyseq_indexed}, + {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), Type), + Db + end, + fun teardown/1, + [ + fun test_range_count/1, + fun test_range_count_since/1, + fun test_remove_key/1 + ] + } + } + }. + +test_basic(Db) -> + Result = run_query(Db, 0, []), + Expect = {ok, [ + {{2, <<"1">>, <<"1">>}, 1}, + {{3, <<"10">>, <<"10">>}, 10}, + {{4, <<"2">>, <<"2">>}, 2}, + {{5, <<"3">>, <<"3">>}, 3}, + {{6, <<"4">>, <<"4">>}, 4}, + {{7, <<"5">>, <<"5">>}, 5}, + {{8, <<"6">>, <<"6">>}, 6}, + {{9, <<"7">>, <<"7">>}, 7}, + {{10, <<"8">>, <<"8">>}, 8}, + {{11, <<"9">>, <<"9">>}, 9} + ]}, + ?_assertEqual(Result, Expect). + + +test_range(Db) -> + Range = [{start_key, <<"3">>}, {end_key, <<"5">>}], + Result = run_query(Db, 0, Range), + Expect = {ok, [ + {{5, <<"3">>, <<"3">>}, 3}, + {{6, <<"4">>, <<"4">>}, 4}, + {{7, <<"5">>, <<"5">>}, 5} + ]}, + ?_assertEqual(Result, Expect). + +test_basic_since(Db) -> + Result = run_query(Db, 5, []), + Expect = {ok, [ + {{6, <<"4">>, <<"4">>}, 4}, + {{7, <<"5">>, <<"5">>}, 5}, + {{8, <<"6">>, <<"6">>}, 6}, + {{9, <<"7">>, <<"7">>}, 7}, + {{10, <<"8">>, <<"8">>}, 8}, + {{11, <<"9">>, <<"9">>}, 9} + ]}, + ?_assertEqual(Result, Expect). + +test_range_since(Db) -> + Range = [{start_key, <<"3">>}, {end_key, <<"5">>}], + Result = run_query(Db, 5, Range), + Expect = {ok, [ + {{6, <<"4">>, <<"4">>}, 4}, + {{7, <<"5">>, <<"5">>}, 5} + ]}, + ?_assertEqual(Result, Expect). + +test_basic_count(Db) -> + Result = run_count_query(Db, 0, []), + ?_assertEqual(Result, 10). + +test_range_count(Db) -> + Range = [{start_key, <<"3">>}, {end_key, <<"5">>}], + Result = run_count_query(Db, 0, Range), + ?_assertEqual(Result, 3). + +test_basic_count_since(Db) -> + Result = run_count_query(Db, 5, []), + ?_assertEqual(Result, 6). + +test_range_count_since(Db) -> + Range = [{start_key, <<"3">>}, {end_key, <<"5">>}], + Result = run_count_query(Db, 5, Range), + ?_assertEqual(Result, 2). + +test_compact(Db) -> + Result = couch_mrview:compact(Db, <<"_design/bar">>), + Count = run_count_query(Db, 0, []), + [?_assertEqual(Result, ok), ?_assertEqual(Count, 10)]. + +test_remove_key(Db) -> + %% add new doc + Doc = couch_mrview_test_util:doc(11), + {ok, Rev} = couch_db:update_doc(Db, Doc, []), + RevStr = couch_doc:rev_to_str(Rev), + {ok, _} = couch_db:ensure_full_commit(Db), + {ok, Db1} = couch_db:reopen(Db), + Result = run_count_query(Db1, 0, []), + %% check new view key + Range = [{start_key, <<"11">>}, {end_key, <<"11">>}], + Result1 = run_query(Db1, 0, Range), + Expect = {ok, [ + {{12, <<"11">>, <<"11">>}, 11} + ]}, + + %% delete doc + Doc2 = couch_doc:from_json_obj({[ + {<<"_id">>, <<"11">>}, + {<<"_rev">>, RevStr}, + {<<"_deleted">>, true} + ]}), + {ok, _} = couch_db:update_doc(Db1, Doc2, []), + {ok, Db2} = couch_db:reopen(Db1), + Result2 = run_count_query(Db2, 0, []), + %% check new view key + Result3 = run_query(Db2, 0, Range), + Expect2 = {ok, [ + {{13, <<"11">>, <<"11">>}, removed} + ]}, + [ + ?_assertEqual(Result, 11), + ?_assertEqual(Result1, Expect), + ?_assertEqual(Result2, 11), + ?_assertEqual(Result3, Expect2) + ]. + +run_query(Db, Since, Opts) -> + Fun = fun(KV, Acc) -> {ok, [KV | Acc]} end, + {ok, R} = couch_mrview:view_changes_since(Db, <<"_design/bar">>, <<"baz">>, + Since, Fun, Opts, []), + {ok, lists:reverse(R)}. + +run_count_query(Db, Since, Opts) -> + couch_mrview:count_view_changes_since(Db, <<"_design/bar">>, <<"baz">>, + Since, Opts). diff --git a/src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl new file mode 100644 index 000000000..5c8cb54b1 --- /dev/null +++ b/src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl @@ -0,0 +1,207 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_mrview_collation_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 1000). +-define(VALUES, [ + null, + false, + true, + + 1, + 2, + 3.0, + 4, + + <<"a">>, + <<"A">>, + <<"aa">>, + <<"b">>, + <<"B">>, + <<"ba">>, + <<"bb">>, + + % U+200B is a zero-width space, which will be ignored by ICU but will cause + % the raw collator to treat these as three distinct keys + <<"c">>, + unicode:characters_to_binary([$c, 16#200B]), + unicode:characters_to_binary([$c, 16#200B, 16#200B]), + + [<<"a">>], + [<<"b">>], + [<<"b">>, <<"c">>], + [<<"b">>, <<"c">>, <<"a">>], + [<<"b">>, <<"d">>], + [<<"b">>, <<"d">>, <<"e">>], + + {[{<<"a">>, 1}]}, + {[{<<"a">>, 2}]}, + {[{<<"b">>, 1}]}, + {[{<<"b">>, 2}]}, + {[{<<"b">>, 2}, {<<"a">>, 1}]}, + {[{<<"b">>, 2}, {<<"c">>, 2}]} +]). + + +setup() -> + {ok, Db1} = couch_mrview_test_util:new_db(?tempdb(), map), + Docs = [couch_mrview_test_util:ddoc(red) | make_docs()], + {ok, Db2} = couch_mrview_test_util:save_docs(Db1, Docs), + Db2. + +teardown(Db) -> + couch_db:close(Db), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), + ok. + + +collation_test_() -> + { + "Collation tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_collate_fwd/1, + fun should_collate_rev/1, + fun should_collate_range_/1, + fun should_collate_with_inclusive_end_fwd/1, + fun should_collate_with_inclusive_end_rev/1, + fun should_collate_without_inclusive_end_fwd/1, + fun should_collate_without_inclusive_end_rev/1, + fun should_collate_with_endkey_docid/1, + fun should_use_collator_for_reduce_grouping/1 + ] + } + } + }. + + +should_collate_fwd(Db) -> + {ok, Results} = run_query(Db, []), + Expect = [{meta, [{total, length(?VALUES)}, {offset, 0}]}] ++ rows(), + ?_assertEquiv(Expect, Results). + +should_collate_rev(Db) -> + {ok, Results} = run_query(Db, [{direction, rev}]), + Expect = [{meta, [{total, length(?VALUES)}, {offset, 0}]}] ++ lists:reverse(rows()), + ?_assertEquiv(Expect, Results). + +should_collate_range_(Db) -> + Index = lists:zip(lists:seq(0, length(?VALUES)-1), ?VALUES), + lists:map(fun(V) -> + {ok, Results} = run_query(Db, [{start_key, V}, {end_key, V}]), + Expect = [ + {meta, [{total, length(?VALUES)}, find_offset(Index, V)]} | + find_matching_rows(Index, V) + ], + ?_assertEquiv(Expect, Results) + end, ?VALUES). + +find_offset(Index, Value) -> + [{Offset, _} | _] = lists:dropwhile(fun({_, V}) -> + couch_ejson_compare:less(Value, V) =/= 0 + end, Index), + {offset, Offset}. + +find_matching_rows(Index, Value) -> + Matches = lists:filter(fun({_, V}) -> + couch_ejson_compare:less(Value, V) =:= 0 + end, Index), + lists:map(fun({Id, V}) -> + {row, [{id, list_to_binary(integer_to_list(Id))}, {key, V}, {value, 0}]} + end, Matches). + +should_collate_with_inclusive_end_fwd(Db) -> + Opts = [{end_key, <<"b">>}, {inclusive_end, true}], + {ok, Rows0} = run_query(Db, Opts), + LastRow = lists:last(Rows0), + Expect = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]}, + ?_assertEqual(Expect, LastRow). + +should_collate_with_inclusive_end_rev(Db) -> + Opts = [{end_key, <<"b">>}, {inclusive_end, true}, {direction, rev}], + {ok, Rows} = run_query(Db, Opts), + LastRow = lists:last(Rows), + Expect = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]}, + ?_assertEqual(Expect, LastRow). + +should_collate_without_inclusive_end_fwd(Db) -> + Opts = [{end_key, <<"b">>}, {inclusive_end, false}], + {ok, Rows0} = run_query(Db, Opts), + LastRow = lists:last(Rows0), + Expect = {row, [{id,<<"9">>}, {key,<<"aa">>}, {value,0}]}, + ?_assertEqual(Expect, LastRow). + +should_collate_without_inclusive_end_rev(Db) -> + Opts = [{end_key, <<"b">>}, {inclusive_end, false}, {direction, rev}], + {ok, Rows} = run_query(Db, Opts), + LastRow = lists:last(Rows), + Expect = {row, [{id,<<"11">>}, {key,<<"B">>}, {value,0}]}, + ?_assertEqual(Expect, LastRow). + +should_collate_with_endkey_docid(Db) -> + ?_test(begin + {ok, Rows0} = run_query(Db, [ + {end_key, <<"b">>}, {end_key_docid, <<"10">>}, + {inclusive_end, false} + ]), + Result0 = lists:last(Rows0), + Expect0 = {row, [{id,<<"9">>}, {key,<<"aa">>}, {value,0}]}, + ?assertEqual(Expect0, Result0), + + {ok, Rows1} = run_query(Db, [ + {end_key, <<"b">>}, {end_key_docid, <<"11">>}, + {inclusive_end, false} + ]), + Result1 = lists:last(Rows1), + Expect1 = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]}, + ?assertEqual(Expect1, Result1) + end). + +should_use_collator_for_reduce_grouping(Db) -> + UniqueKeys = lists:usort(fun(A, B) -> + not couch_ejson_compare:less_json(B, A) + end, ?VALUES), + {ok, [{meta,_} | Rows]} = reduce_query(Db, [{group_level, exact}]), + ?_assertEqual(length(UniqueKeys), length(Rows)). + +make_docs() -> + {Docs, _} = lists:foldl(fun(V, {Docs0, Count}) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, list_to_binary(integer_to_list(Count))}, + {<<"foo">>, V} + ]}), + {[Doc | Docs0], Count+1} + end, {[], 0}, ?VALUES), + Docs. + +rows() -> + {Rows, _} = lists:foldl(fun(V, {Rows0, Count}) -> + Id = list_to_binary(integer_to_list(Count)), + Row = {row, [{id, Id}, {key, V}, {value, 0}]}, + {[Row | Rows0], Count+1} + end, {[], 0}, ?VALUES), + lists:reverse(Rows). + +run_query(Db, Opts) -> + couch_mrview:query_view(Db, <<"_design/bar">>, <<"zing">>, Opts). + +reduce_query(Db, Opts) -> + couch_mrview:query_view(Db, <<"_design/red">>, <<"zing">>, Opts). diff --git a/src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl new file mode 100644 index 000000000..7664becdc --- /dev/null +++ b/src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl @@ -0,0 +1,115 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_mrview_compact_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 1000). + + +setup() -> + {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map, 1000), + ok = meck:new(couch_mrview_compactor, [passthrough]), + Db. + +teardown(Db) -> + meck:unload(), + couch_db:close(Db), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), + ok. + + +compaction_test_() -> + { + "Compaction tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_swap/1, + fun should_remove/1 + ] + } + } + }. + + +should_swap(Db) -> + ?_test(begin + couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>), + {ok, QPid} = start_query(Db), + {ok, MonRef} = couch_mrview:compact(Db, <<"_design/bar">>, [monitor]), + receive + {'DOWN', MonRef, process, _, _} -> ok + after ?TIMEOUT -> + erlang:error( + {assertion_failed, + [{module, ?MODULE}, {line, ?LINE}, + {reason, "compaction failed"}]}) + end, + QPid ! {self(), continue}, + receive + {QPid, Count} -> + ?assertEqual(1000, Count) + after ?TIMEOUT -> + erlang:error( + {assertion_failed, + [{module, ?MODULE}, {line, ?LINE}, + {reason, "query failed"}]}) + end + end). + + +should_remove(Db) -> + ?_test(begin + DDoc = <<"_design/bar">>, + {ok, _Results} = couch_mrview:query_view(Db, DDoc, <<"baz">>), + {ok, IndexPid} = couch_index_server:get_index(couch_mrview_index, Db, DDoc), + ok = couch_index:compact(IndexPid, []), + {ok, CompactorPid} = couch_index:get_compactor_pid(IndexPid), + {ok, CompactingPid} = couch_index_compactor:get_compacting_pid(CompactorPid), + MonRef = erlang:monitor(process, CompactingPid), + exit(CompactingPid, crash), + receive + {'DOWN', MonRef, process, _, crash} -> + meck:wait(couch_mrview_compactor, remove_compacted, '_', 100), + ?assertEqual(1, meck:num_calls( + couch_mrview_compactor, remove_compacted, '_', IndexPid)), + ?assert(is_process_alive(IndexPid)), + ?assert(is_process_alive(CompactorPid)) + after ?TIMEOUT -> + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, {line, ?LINE}, + {reason, "compaction didn't exit :/"}]}) + end + end). + + +start_query(Db) -> + Self = self(), + Pid = spawn(fun() -> + CB = fun + (_, wait) -> receive {Self, continue} -> {ok, 0} end; + ({row, _}, Count) -> {ok, Count+1}; + (_, Count) -> {ok, Count} + end, + {ok, Result} = + couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, [], CB, wait), + Self ! {self(), Result} + end), + {ok, Pid}. diff --git a/src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl new file mode 100644 index 000000000..4310157eb --- /dev/null +++ b/src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl @@ -0,0 +1,145 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_mrview_ddoc_updated_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 1000). + + +setup() -> + Name = ?tempdb(), + couch_server:delete(Name, [?ADMIN_CTX]), + {ok, Db} = couch_db:create(Name, [?ADMIN_CTX]), + DDoc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/bar">>}, + {<<"views">>, {[ + {<<"baz">>, {[ + {<<"map">>, << + "function(doc) {\n" + " emit(doc.val, doc.val);\n" + "}" + >>} + ]}} + ]}} + ]}), + [Doc1 | Docs999] = couch_mrview_test_util:make_docs(map, 100), + {ok, _} = couch_db:update_docs(Db, [DDoc, Doc1], []), + {ok, Db2} = couch_db:reopen(Db), + + % run a query with 1 doc to initialize couch_index process + CB = fun + ({row, _}, Count) -> {ok, Count+1}; + (_, Count) -> {ok, Count} + end, + {ok, _} = + couch_mrview:query_view(Db2, <<"_design/bar">>, <<"baz">>, [], CB, 0), + + meck:new(couch_index_updater, [passthrough]), + meck:expect(couch_index_updater, update, fun(Idx, Mod, IdxSt) -> + timer:sleep(5000), + meck:passthrough([Idx, Mod, IdxSt]) + end), + + % add more docs + {ok, _} = couch_db:update_docs(Db2, Docs999, []), + {ok, Db3} = couch_db:reopen(Db2), + Db3. + +teardown(Db) -> + meck:unload(couch_index_updater), + couch_db:close(Db), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), + ok. + + +ddoc_update_test_() -> + { + "Check ddoc update actions", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun check_indexing_stops_on_ddoc_change/1 + ] + } + } + }. + + +check_indexing_stops_on_ddoc_change(Db) -> + ?_test(begin + DDocID = <<"_design/bar">>, + + IndexesBefore = get_indexes_by_ddoc(DDocID, 1), + ?assertEqual(1, length(IndexesBefore)), + AliveBefore = lists:filter(fun erlang:is_process_alive/1, IndexesBefore), + ?assertEqual(1, length(AliveBefore)), + + {ok, DDoc} = couch_db:open_doc(Db, DDocID, [ejson_body, ?ADMIN_CTX]), + DDocJson2 = couch_doc:from_json_obj({[ + {<<"_id">>, DDocID}, + {<<"_deleted">>, true}, + {<<"_rev">>, couch_doc:rev_to_str(DDoc#doc.revs)} + ]}), + + % spawn a process for query + Self = self(), + QPid = spawn(fun() -> + {ok, Result} = couch_mrview:query_view( + Db, <<"_design/bar">>, <<"baz">>, []), + Self ! {self(), Result} + end), + + % while indexing for the query is in progress, delete DDoc + {ok, _} = couch_db:update_doc(Db, DDocJson2, []), + receive + {QPid, Msg} -> + ?assertEqual(Msg, ddoc_updated) + after ?TIMEOUT -> + erlang:error( + {assertion_failed, [{module, ?MODULE}, {line, ?LINE}, + {reason, "test failed"}]}) + end, + + %% assert that previously running indexes are gone + IndexesAfter = get_indexes_by_ddoc(DDocID, 0), + ?assertEqual(0, length(IndexesAfter)), + AliveAfter = lists:filter(fun erlang:is_process_alive/1, IndexesBefore), + ?assertEqual(0, length(AliveAfter)) + end). + + +get_indexes_by_ddoc(DDocID, N) -> + Indexes = test_util:wait(fun() -> + Indxs = ets:match_object( + couchdb_indexes_by_db, {'$1', {DDocID, '$2'}}), + case length(Indxs) == N of + true -> + Indxs; + false -> + wait + end + end), + lists:foldl(fun({DbName, {_DDocID, Sig}}, Acc) -> + case ets:lookup(couchdb_indexes_by_sig, {DbName, Sig}) of + [{_, Pid}] -> [Pid|Acc]; + _ -> Acc + end + end, [], Indexes). + + diff --git a/src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl new file mode 100644 index 000000000..ce2be8904 --- /dev/null +++ b/src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl @@ -0,0 +1,422 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_mrview_ddoc_validation_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(LIB, {[{<<"mylib">>, {[{<<"lib1">>, <<"x=42">>}]}}]}). + +setup() -> + {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map), + Db. + +teardown(Db) -> + couch_db:close(Db), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), + ok. + +ddoc_validation_test_() -> + { + "ddoc validation tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_reject_invalid_js_map/1, + fun should_reject_invalid_js_reduce/1, + fun should_reject_invalid_builtin_reduce/1, + fun should_reject_non_object_options/1, + fun should_reject_non_object_filters/1, + fun should_accept_obj_in_filters/1, + fun should_reject_non_object_lists/1, + fun should_accept_obj_in_lists/1, + fun should_reject_non_object_shows/1, + fun should_accept_obj_in_shows/1, + fun should_reject_non_object_updates/1, + fun should_accept_obj_in_updates/1, + fun should_reject_non_object_views/1, + fun should_reject_non_string_language/1, + fun should_reject_non_string_validate_doc_update/1, + fun should_accept_string_rewrites/1, + fun should_reject_bad_rewrites/1, + fun should_accept_option/1, + fun should_accept_any_option/1, + fun should_accept_filter/1, + fun should_reject_non_string_or_obj_filter_function/1, + fun should_accept_list/1, + fun should_reject_non_string_or_obj_list_function/1, + fun should_accept_show/1, + fun should_reject_non_string_or_obj_show_function/1, + fun should_accept_update/1, + fun should_reject_non_string_or_obj_update_function/1, + fun should_accept_view/1, + fun should_accept_view_with_reduce/1, + fun should_accept_view_with_lib/1, + fun should_reject_view_that_is_not_an_object/1, + fun should_reject_view_without_map_function/1, + fun should_reject_view_with_non_string_map_function/1, + fun should_reject_view_with_non_string_reduce_function/1, + fun should_accept_any_in_lib/1, + fun should_accept_map_object_for_queries/1, + fun should_reject_map_non_objects_for_queries/1 + ] + } + } + }. + +should_reject_invalid_js_map(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_invalid_js_map">>}, + {<<"views">>, {[ + {<<"foo">>, {[ + {<<"map">>, <<"function(doc) }{">>} + ]}} + ]}} + ]}), + ?_assertThrow( + {bad_request, compilation_error, _}, + couch_db:update_doc(Db, Doc, [])). + +should_reject_invalid_js_reduce(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_invalid_js_reduce">>}, + {<<"views">>, {[ + {<<"foo">>, {[ + {<<"map">>, <<"function(doc) { emit(null); }">>}, + {<<"reduce">>, <<"function(k, v, r) }{}">>} + ]}} + ]}} + ]}), + ?_assertThrow( + {bad_request, compilation_error, _}, + couch_db:update_doc(Db, Doc, [])). + +should_reject_invalid_builtin_reduce(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_invalid_builtin_reduce">>}, + {<<"views">>, {[ + {<<"foo">>, {[ + {<<"map">>, <<"function(doc) { emit(null); }">>}, + {<<"reduce">>, <<"_foobar">>} + ]}} + ]}} + ]}), + ?_assertThrow( + {bad_request, invalid_design_doc, _}, + couch_db:update_doc(Db, Doc, [])). + +should_reject_non_object_options(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_non_object_options">>}, + {<<"options">>, <<"invalid">>} + ]}), + ?_assertThrow({bad_request, invalid_design_doc, _}, + couch_db:update_doc(Db, Doc, [])). + +should_reject_non_object_filters(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_non_object_filters">>}, + {<<"filters">>, <<"invalid">>} + ]}), + ?_assertThrow({bad_request, invalid_design_doc, _}, + couch_db:update_doc(Db, Doc, [])). + +should_accept_obj_in_filters(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_accept_obj_in_filters">>}, + {<<"filters">>, ?LIB} + ]}), + ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])). + +should_reject_non_object_lists(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_non_object_lists">>}, + {<<"lists">>, <<"invalid">>} + ]}), + ?_assertThrow({bad_request, invalid_design_doc, _}, + couch_db:update_doc(Db, Doc, [])). + +should_reject_non_object_shows(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_non_object_shows">>}, + {<<"shows">>, <<"invalid">>} + ]}), + ?_assertThrow({bad_request, invalid_design_doc, _}, + couch_db:update_doc(Db, Doc, [])). + +should_accept_obj_in_shows(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_accept_obj_in_shows">>}, + {<<"shows">>, ?LIB} + ]}), + ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])). + +should_reject_non_object_updates(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_non_object_updates">>}, + {<<"updates">>, <<"invalid">>} + ]}), + ?_assertThrow({bad_request, invalid_design_doc, _}, + couch_db:update_doc(Db, Doc, [])). + +should_accept_obj_in_updates(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_accept_obj_in_updates">>}, + {<<"updates">>, ?LIB} + ]}), + ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])). + +should_reject_non_object_views(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_non_object_views">>}, + {<<"views">>, <<"invalid">>} + ]}), + ?_assertThrow({bad_request, invalid_design_doc, _}, + couch_db:update_doc(Db, Doc, [])). + +should_reject_non_string_language(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_non_string_language">>}, + {<<"language">>, 1} + ]}), + ?_assertThrow({bad_request, invalid_design_doc, _}, + couch_db:update_doc(Db, Doc, [])). + +should_reject_non_string_validate_doc_update(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_non_string_vdu">>}, + {<<"validate_doc_update">>, 1} + ]}), + ?_assertThrow({bad_request, invalid_design_doc, _}, + couch_db:update_doc(Db, Doc, [])). + +should_accept_string_rewrites(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_non_array_rewrites">>}, + {<<"rewrites">>, <<"function(req){}">>} + ]}), + ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). + +should_reject_bad_rewrites(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_non_array_rewrites">>}, + {<<"rewrites">>, 42} + ]}), + ?_assertThrow({bad_request, invalid_design_doc, _}, + couch_db:update_doc(Db, Doc, [])). + +should_accept_option(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_accept_options">>}, + {<<"options">>, {[ {<<"option1">>, <<"function(doc,req){}">>} ]}} + ]}), + ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). + +should_accept_any_option(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_accept_any_option">>}, + {<<"options">>, {[ {<<"option1">>, true} ]}} + ]}), + ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). + +should_accept_filter(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_accept_filters">>}, + {<<"filters">>, {[ {<<"filter1">>, <<"function(doc,req){}">>} ]}} + ]}), + ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). + +should_reject_non_string_or_obj_filter_function(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_non_string_or_obj_filter_function">>}, + {<<"filters">>, {[ {<<"filter1">>, 1} ]}} + ]}), + ?_assertThrow({bad_request, invalid_design_doc, _}, + couch_db:update_doc(Db, Doc, [])). + +should_accept_list(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_accept_lists">>}, + {<<"lists">>, {[ {<<"list1">>, <<"function(doc,req){}">>} ]}} + ]}), + ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). + +should_reject_non_string_or_obj_list_function(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_non_string_or_obj_list_function">>}, + {<<"lists">>, {[ {<<"list1">>, 1} ]}} + ]}), + ?_assertThrow({bad_request, invalid_design_doc, _}, + couch_db:update_doc(Db, Doc, [])). + +should_accept_obj_in_lists(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_accept_obj_in_lists">>}, + {<<"lists">>, ?LIB} + ]}), + ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])). + + +should_accept_show(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_accept_shows">>}, + {<<"shows">>, {[ {<<"show1">>, <<"function(doc,req){}">>} ]}} + ]}), + ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). + +should_reject_non_string_or_obj_show_function(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_non_string_or_obj_show_function">>}, + {<<"shows">>, {[ {<<"show1">>, 1} ]}} + ]}), + ?_assertThrow({bad_request, invalid_design_doc, _}, + couch_db:update_doc(Db, Doc, [])). + +should_accept_update(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_accept_updates">>}, + {<<"updates">>, {[ {<<"update1">>, <<"function(doc,req){}">>} ]}} + ]}), + ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). + +should_reject_non_string_or_obj_update_function(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_non_string_or_obj_update_function">>}, + {<<"updates">>, {[ {<<"update1">>, 1} ]}} + ]}), + ?_assertThrow({bad_request, invalid_design_doc, _}, + couch_db:update_doc(Db, Doc, [])). + +should_accept_view(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_accept_view">>}, + {<<"views">>, {[ + {<<"view1">>, {[{<<"map">>, <<"function(d){}">>}]}} + ]}} + ]}), + ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). + +should_accept_view_with_reduce(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_accept_view_with_reduce">>}, + {<<"views">>, {[ + {<<"view1">>, {[ + {<<"map">>, <<"function(d){}">>}, + {<<"reduce">>,<<"function(d){}">>} + ]}} + ]}} + ]}), + ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). + +should_accept_view_with_lib(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_accept_view_with_lib">>}, + {<<"views">>, {[ + {<<"view1">>, {[ + {<<"map">>, <<"function(d){}">>} + ]}}, + {<<"lib">>, {[ + {<<"lib1">>, <<"x=42">>} + ]}} + ]}} + ]}), + ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). + +should_reject_view_that_is_not_an_object(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_non_object_view">>}, + {<<"views">>, {[{<<"view1">>, <<"thisisbad">>}]}} + ]}), + ?_assertThrow({bad_request, invalid_design_doc, _}, + couch_db:update_doc(Db, Doc, [])). + +should_reject_view_without_map_function(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_accept_view_without_map">>}, + {<<"views">>, {[ + {<<"view1">>, {[]}} + ]}} + ]}), + ?_assertThrow({bad_request, invalid_design_doc, _}, + couch_db:update_doc(Db, Doc, [])). + + +should_reject_view_with_non_string_map_function(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_view_with_nonstr_map">>}, + {<<"views">>, {[ + {<<"view1">>, {[ + {<<"map">>,{[]}} + ]}} + ]}} + ]}), + ?_assertThrow({bad_request, invalid_design_doc, _}, + couch_db:update_doc(Db, Doc, [])). + +should_reject_view_with_non_string_reduce_function(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_view_with_nonstr_reduce">>}, + {<<"views">>, {[ + {<<"view1">>, {[ + {<<"map">>,<<"function(d){}">>}, + {<<"reduce">>,1} + ]}} + ]}} + ]}), + ?_assertThrow({bad_request, invalid_design_doc, _}, + couch_db:update_doc(Db, Doc, [])). + +should_accept_any_in_lib(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_accept_any_in_lib">>}, + {<<"views">>, {[ + {<<"view1">>, {[ + {<<"map">>, <<"function(d){}">>} + ]}}, + {<<"lib">>, {[{<<"lib1">>, {[]}}]}} + ]}} + ]}), + ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). + + +should_accept_map_object_for_queries(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_accept_map_objects_for_queries">>}, + {<<"language">>, <<"query">>}, + {<<"views">>, {[ + {<<"view1">>, {[ + {<<"map">>, {[ + {<<"x">>, <<"y">>} + ]}} + ]}} + ]}} + ]}), + ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])). + + +should_reject_map_non_objects_for_queries(Db) -> + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/should_reject_map_non_objects__with_nonstr_reduce">>}, + {<<"language">>, <<"query">>}, + {<<"views">>, {[ + {<<"view1">>, {[ + {<<"map">>, <<"function(d){}">>} + ]}} + ]}} + ]}), + ?_assertThrow({bad_request, invalid_design_doc, _}, + couch_db:update_doc(Db, Doc, [])). diff --git a/src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl new file mode 100644 index 000000000..aedd42865 --- /dev/null +++ b/src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl @@ -0,0 +1,136 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_mrview_design_docs_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 1000). + + + +setup() -> + {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), design), + Db. + +teardown(Db) -> + couch_db:close(Db), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), + ok. + + +design_docs_test_() -> + { + "_design_docs view tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_query/1, + fun should_query_with_range/1, + fun should_query_with_range_rev/1, + fun should_query_with_limit_and_skip/1, + fun should_query_with_include_docs/1 + ] + } + } + }. + + +should_query(Db) -> + Result = run_query(Db, []), + Expect = {ok, [ + {meta, [{total, 10}, {offset, 10}]}, + mk_row(<<"_design/bar01">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), + mk_row(<<"_design/bar02">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), + mk_row(<<"_design/bar03">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), + mk_row(<<"_design/bar04">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), + mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), + mk_row(<<"_design/bar06">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), + mk_row(<<"_design/bar07">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), + mk_row(<<"_design/bar08">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), + mk_row(<<"_design/bar09">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), + mk_row(<<"_design/bar10">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>) + ]}, + ?_assertEqual(Expect, Result). + +should_query_with_range(Db) -> + Result = run_query(Db, [ + {start_key, <<"_design/bar03">>}, + {end_key, <<"_design/bar05">>} + ]), + Expect = {ok, [ + {meta, [{total, 10}, {offset, 12}]}, + mk_row(<<"_design/bar03">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), + mk_row(<<"_design/bar04">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), + mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>) + ]}, + ?_assertEqual(Expect, Result). + +should_query_with_range_rev(Db) -> + Result = run_query(Db, [ + {direction, rev}, + {start_key, <<"_design/bar05">>}, {end_key, <<"_design/bar03">>}, + {inclusive_end, true} + ]), + Expect = {ok, [ + {meta, [{total, 10}, {offset, 5}]}, + mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), + mk_row(<<"_design/bar04">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), + mk_row(<<"_design/bar03">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>) + ]}, + ?_assertEqual(Expect, Result). + +should_query_with_limit_and_skip(Db) -> + Result = run_query(Db, [ + {start_key, <<"_design/bar02">>}, + {limit, 3}, + {skip, 3} + ]), + Expect = {ok, [ + {meta, [{total, 10}, {offset, 14}]}, + mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), + mk_row(<<"_design/bar06">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>), + mk_row(<<"_design/bar07">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>) + ]}, + ?_assertEqual(Expect, Result). + +should_query_with_include_docs(Db) -> + Result = run_query(Db, [ + {start_key, <<"_design/bar08">>}, + {end_key, <<"_design/bar08">>}, + {include_docs, true} + ]), + Doc = {[ + {<<"_id">>,<<"_design/bar08">>}, + {<<"_rev">>,<<"1-0b24e44a44af45e51e562fd124ce3007">>}, + {<<"views">>,{[]}} + ]}, + Val = {[{rev, <<"1-0b24e44a44af45e51e562fd124ce3007">>}]}, + Expect = {ok, [ + {meta, [{total, 10}, {offset, 17}]}, + {row, [{id, <<"_design/bar08">>}, {key, <<"_design/bar08">>}, + {value, Val}, {doc, Doc}]} + ]}, + ?_assertEqual(Expect, Result). + + +mk_row(Id, Rev) -> + {row, [{id, Id}, {key, Id}, {value, {[{rev, Rev}]}}]}. + +run_query(Db, Opts0) -> + Opts = [{extra, [{namespace, <<"_design">>}]} | Opts0], + couch_mrview:query_all_docs(Db, Opts). diff --git a/src/couch_mrview/test/eunit/couch_mrview_http_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_http_tests.erl new file mode 100644 index 000000000..bd11c7ad8 --- /dev/null +++ b/src/couch_mrview/test/eunit/couch_mrview_http_tests.erl @@ -0,0 +1,28 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_mrview_http_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch_mrview/include/couch_mrview.hrl"). + + +mrview_http_test_() -> + [ + ?_assertEqual(#mrargs{group_level=undefined, group=true}, + couch_mrview_http:parse_params([{"group", "true"}], + undefined, #mrargs{})), + + ?_assertEqual(#mrargs{group_level=1, group=undefined}, + couch_mrview_http:parse_params([{"group_level", "1"}], + undefined, #mrargs{})) + ]. diff --git a/src/couch_mrview/test/eunit/couch_mrview_index_changes_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_index_changes_tests.erl new file mode 100644 index 000000000..f0be1b9b1 --- /dev/null +++ b/src/couch_mrview/test/eunit/couch_mrview_index_changes_tests.erl @@ -0,0 +1,224 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_mrview_index_changes_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + + +setup() -> + {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map), + Db. + +teardown(Db) -> + couch_db:close(Db), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), + ok. + +changes_index_test() -> + { + "changes index tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun test_normal_changes/1, + fun test_stream_once/1, + fun test_stream_once_since/1, + fun test_stream_once_timeout/1, + fun test_stream_once_heartbeat/1, + fun test_stream/1, + fun test_indexer/1 + ] + } + } + }. + + +test_normal_changes(Db) -> + Result = run_query(Db, []), + Expect = {ok, 11, [ + {{2, 1, <<"1">>}, 1}, + {{3, 10, <<"10">>}, 10}, + {{4, 2, <<"2">>}, 2}, + {{5, 3, <<"3">>}, 3}, + {{6, 4, <<"4">>}, 4}, + {{7, 5, <<"5">>}, 5}, + {{8, 6, <<"6">>}, 6}, + {{9, 7, <<"7">>}, 7}, + {{10, 8, <<"8">>}, 8}, + {{11, 9, <<"9">>}, 9} + ]}, + ?_assertEqual(Result, Expect). + +test_stream_once(Db) -> + Result = run_query(Db, [{stream, once}]), + Expect = {ok, 11, [ + {{2, 1, <<"1">>}, 1}, + {{3, 10, <<"10">>}, 10}, + {{4, 2, <<"2">>}, 2}, + {{5, 3, <<"3">>}, 3}, + {{6, 4, <<"4">>}, 4}, + {{7, 5, <<"5">>}, 5}, + {{8, 6, <<"6">>}, 6}, + {{9, 7, <<"7">>}, 7}, + {{10, 8, <<"8">>}, 8}, + {{11, 9, <<"9">>}, 9} + ]}, + ?_assertEqual(Result, Expect). + + +test_stream_once_since(Db) -> + Self = self(), + spawn(fun() -> + Result = run_query(Db, [{since, 11}, + {stream, once}]), + Self ! {result, Result} + end), + + spawn(fun() -> + timer:sleep(1000), + {ok, Db1} = save_doc(Db, 11), + couch_mrview:refresh(Db1, <<"_design/bar">>) + end), + + Expect = {ok,12,[{{12,11,<<"11">>},11}]}, + + receive + {result, Result} -> + ?_assertEqual(Result, Expect) + after 5000 -> + io:format("never got the change", []) + end. + + +test_stream_once_timeout(Db) -> + Self = self(), + spawn(fun() -> + Result = run_query(Db, [{since, 12}, + {stream, once}, + {timeout, 3000}]), + Self ! {result, Result} + end), + + + + Expect = {ok, 12, []}, + + receive + {result, Result} -> + ?_assertEqual(Result, Expect) + after 5000 -> + io:format("never got the change", []) + end. + +test_stream_once_heartbeat(Db) -> + Self = self(), + spawn(fun() -> + Result = run_query(Db, [{since, 12}, + {stream, once}, + {heartbeat, 1000}]), + Self ! {result, Result} + end), + + spawn(fun() -> + timer:sleep(3000), + {ok, Db1} = save_doc(Db, 12), + couch_mrview:refresh(Db1, <<"_design/bar">>) + end), + + Expect = {ok,13,[heartbeat, + heartbeat, + heartbeat, + {{13,12,<<"12">>},12}]}, + + + + receive + {result, Result} -> + ?_assertEqual(Result, Expect) + after 5000 -> + io:format("never got the change", []) + end. + + +test_stream(Db) -> + Self = self(), + spawn(fun() -> + Result = run_query(Db, [{since, 13}, + stream, + {timeout, 3000}]), + Self ! {result, Result} + end), + + spawn(fun() -> + timer:sleep(1000), + {ok, Db1} = save_doc(Db, 13), + couch_mrview:refresh(Db1, <<"_design/bar">>), + {ok, Db2} = save_doc(Db1, 14), + couch_mrview:refresh(Db2, <<"_design/bar">>) + end), + + Expect = {ok, 15,[{{14,13,<<"13">>},13}, + {{15,14,<<"14">>},14}]}, + + receive + {result, Result} -> + ?_assertEqual(Result, Expect) + after 5000 -> + io:format("never got the change", []) + end. + + +test_indexer(Db) -> + Result = run_query(Db, [{since, 14}, refresh]), + Expect = {ok, 15, [{{15,14,<<"14">>},14}]}, + + {ok, Db1} = save_doc(Db, 15), + timer:sleep(1500), + Result1 = run_query(Db1, [{since, 14}], false), + Expect1 = {ok, 16, [{{15,14,<<"14">>},14}, + {{16,15,<<"15">>},15}]}, + ?_assert(Result == Expect andalso Result1 == Expect1). + + +save_doc(Db, Id) -> + Doc = couch_mrview_test_util:doc(Id), + {ok, _Rev} = couch_db:update_doc(Db, Doc, []), + {ok, _} = couch_db:ensure_full_commit(Db), + couch_db:reopen(Db). + +run_query(Db, Opts) -> + run_query(Db, Opts, true). + +run_query(Db, Opts, Refresh) -> + Fun = fun + (stop, {LastSeq, Acc}) -> + {ok, LastSeq, Acc}; + (heartbeat, Acc) -> + {ok, [heartbeat | Acc]}; + (Event, Acc) -> + {ok, [Event | Acc]} + end, + case Refresh of + true -> + couch_mrview:refresh(Db, <<"_design/bar">>); + false -> + ok + end, + {ok, LastSeq, R} = couch_mrview_changes:handle_changes(Db, <<"_design/bar">>, + <<"baz">>, Fun, [], Opts), + {ok, LastSeq, lists:reverse(R)}. diff --git a/src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl new file mode 100644 index 000000000..efa03e7c0 --- /dev/null +++ b/src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl @@ -0,0 +1,121 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_mrview_index_info_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 1000). + + +setup() -> + {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map), + couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>), + {ok, Info} = couch_mrview:get_info(Db, <<"_design/bar">>), + {Db, Info}. + + +teardown({Db, _}) -> + couch_db:close(Db), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), + ok. + + +view_info_test_() -> + { + "Views index tests", + { + setup, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, + { + foreach, + fun setup/0, + fun teardown/1, + [ + fun sig_is_binary/1, + fun language_is_js/1, + fun file_size_is_non_neg_int/1, + fun active_size_is_non_neg_int/1, + fun external_size_is_non_neg_int/1, + fun disk_size_is_file_size/1, + fun data_size_is_external_size/1, + fun active_size_less_than_file_size/1, + fun update_seq_is_non_neg_int/1, + fun purge_seq_is_non_neg_int/1, + fun update_opts_is_bin_list/1 + ] + } + } + }. + + +sig_is_binary({_, Info}) -> + ?_assert(is_binary(prop(signature, Info))). + + +language_is_js({_, Info}) -> + ?_assertEqual(<<"javascript">>, prop(language, Info)). + + +file_size_is_non_neg_int({_, Info}) -> + ?_assert(check_non_neg_int([sizes, file], Info)). + + +active_size_is_non_neg_int({_, Info}) -> + ?_assert(check_non_neg_int([sizes, active], Info)). + + +external_size_is_non_neg_int({_, Info}) -> + ?_assert(check_non_neg_int([sizes, external], Info)). + + +disk_size_is_file_size({_, Info}) -> + ?_assertEqual(prop([sizes, file], Info), prop(disk_size, Info)). + + +data_size_is_external_size({_, Info}) -> + ?_assertEqual(prop([sizes, external], Info), prop(data_size, Info)). + + +active_size_less_than_file_size({_, Info}) -> + ?_assert(prop([sizes, active], Info) < prop([sizes, file], Info)). + + +update_seq_is_non_neg_int({_, Info}) -> + ?_assert(check_non_neg_int(update_seq, Info)). + + +purge_seq_is_non_neg_int({_, Info}) -> + ?_assert(check_non_neg_int(purge_seq, Info)). + + +update_opts_is_bin_list({_, Info}) -> + Opts = prop(update_options, Info), + ?_assert(is_list(Opts) andalso + (Opts == [] orelse lists:all([is_binary(B) || B <- Opts]))). + + +check_non_neg_int(Key, Info) -> + Size = prop(Key, Info), + is_integer(Size) andalso Size >= 0. + + +prop(Key, {Props}) when is_list(Props) -> + prop(Key, Props); +prop([Key], Info) -> + prop(Key, Info); +prop([Key | Rest], Info) -> + prop(Rest, prop(Key, Info)); +prop(Key, Info) when is_atom(Key), is_list(Info) -> + couch_util:get_value(Key, Info). diff --git a/src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl new file mode 100644 index 000000000..b0d25469a --- /dev/null +++ b/src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl @@ -0,0 +1,148 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_mrview_local_docs_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 1000). + + + +setup() -> + {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), local), + Db. + +teardown(Db) -> + couch_db:close(Db), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), + ok. + + +all_docs_test_() -> + { + "_local_docs view tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_query/1, + fun should_query_with_range/1, + fun should_query_with_range_rev/1, + fun should_query_with_limit_and_skip/1, + fun should_query_with_include_docs/1, + fun should_query_with_update_seq/1 + ] + } + } + }. + + +should_query(Db) -> + Result = run_query(Db, []), + Expect = {ok, [ + {meta, [{total, null}, {offset, null}]}, + mk_row(1), + mk_row(10), + mk_row(2), + mk_row(3), + mk_row(4), + mk_row(5), + mk_row(6), + mk_row(7), + mk_row(8), + mk_row(9) + ]}, + ?_assertEqual(Expect, Result). + +should_query_with_range(Db) -> + Result = run_query(Db, [ + {start_key, <<"_local/3">>}, + {end_key, <<"_local/5">>} + ]), + Expect = {ok, [ + {meta, [{total, null}, {offset, null}]}, + mk_row(3), + mk_row(4), + mk_row(5) + ]}, + ?_assertEqual(Expect, Result). + +should_query_with_range_rev(Db) -> + Result = run_query(Db, [ + {direction, rev}, + {start_key, <<"_local/5">>}, {end_key, <<"_local/3">>}, + {inclusive_end, true} + ]), + Expect = {ok, [ + {meta, [{total, null}, {offset, null}]}, + mk_row(5), + mk_row(4), + mk_row(3) + ]}, + ?_assertEqual(Expect, Result). + +should_query_with_limit_and_skip(Db) -> + Result = run_query(Db, [ + {start_key, <<"_local/2">>}, + {limit, 3}, + {skip, 3} + ]), + Expect = {ok, [ + {meta, [{total, null}, {offset, null}]}, + mk_row(5), + mk_row(6), + mk_row(7) + ]}, + ?_assertEqual(Expect, Result). + +should_query_with_include_docs(Db) -> + Result = run_query(Db, [ + {start_key, <<"_local/8">>}, + {end_key, <<"_local/8">>}, + {include_docs, true} + ]), + {row, Doc0} = mk_row(8), + Doc = Doc0 ++ [{doc, {[ + {<<"_id">>, <<"_local/8">>}, + {<<"_rev">>, <<"0-1">>}, + {<<"val">>, 8} + ]}}], + Expect = {ok, [ + {meta, [{total, null}, {offset, null}]}, + {row, Doc} + ]}, + ?_assertEqual(Expect, Result). + +should_query_with_update_seq(Db) -> + Result = run_query(Db, [ + {start_key, <<"_local/2">>}, + {limit, 1}, + {update_seq, true} + ]), + Expect = {ok, [ + {meta, [{total, null}, {offset, null}, {update_seq, null}]}, + mk_row(2) + ]}, + ?_assertEqual(Expect, Result). + +mk_row(IntId) -> + Id = list_to_binary(io_lib:format("_local/~b", [IntId])), + {row, [{id, Id}, {key, Id}, {value, {[{rev, <<"0-1">>}]}}]}. + +run_query(Db, Opts0) -> + Opts = [{extra, [{namespace, <<"_local">>}]} | Opts0], + couch_mrview:query_all_docs(Db, Opts). diff --git a/src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl new file mode 100644 index 000000000..805dc6c74 --- /dev/null +++ b/src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl @@ -0,0 +1,144 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_mrview_map_views_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 1000). + + +setup() -> + {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map), + Db. + +teardown(Db) -> + couch_db:close(Db), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), + ok. + + +map_views_test_() -> + { + "Map views", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_map/1, + fun should_map_with_range/1, + fun should_map_with_limit_and_skip/1, + fun should_map_with_include_docs/1, + fun should_map_empty_views/1, + fun should_give_ext_size_seq_indexed_test/1 + ] + } + } + }. + + +should_map(Db) -> + Result = run_query(Db, []), + Expect = {ok, [ + {meta, [{total, 10}, {offset, 0}]}, + {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, + {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, + {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}, + {row, [{id, <<"6">>}, {key, 6}, {value, 6}]}, + {row, [{id, <<"7">>}, {key, 7}, {value, 7}]}, + {row, [{id, <<"8">>}, {key, 8}, {value, 8}]}, + {row, [{id, <<"9">>}, {key, 9}, {value, 9}]}, + {row, [{id, <<"10">>}, {key, 10}, {value, 10}]} + ]}, + ?_assertEqual(Expect, Result). + +should_map_with_range(Db) -> + Result = run_query(Db, [ + {direction, rev}, + {start_key, 5}, {end_key, 3}, + {inclusive_end, true} + ]), + Expect = {ok, [ + {meta, [{total, 10}, {offset, 5}]}, + {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]} + ]}, + ?_assertEqual(Expect, Result). + +should_map_with_limit_and_skip(Db) -> + Result = run_query(Db, [ + {start_key, 2}, + {limit, 3}, + {skip, 3} + ]), + Expect = {ok, [ + {meta, [{total, 10}, {offset, 4}]}, + {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}, + {row, [{id, <<"6">>}, {key, 6}, {value, 6}]}, + {row, [{id, <<"7">>}, {key, 7}, {value, 7}]} + ]}, + ?_assertEqual(Expect, Result). + +should_map_with_include_docs(Db) -> + Result = run_query(Db, [ + {start_key, 8}, + {end_key, 8}, + {include_docs, true} + ]), + Doc = {[ + {<<"_id">>,<<"8">>}, + {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>}, + {<<"val">>,8} + ]}, + Expect = {ok, [ + {meta, [{total, 10}, {offset, 7}]}, + {row, [{id, <<"8">>}, {key, 8}, {value, 8}, {doc, Doc}]} + ]}, + ?_assertEqual(Expect, Result). + +should_map_empty_views(Db) -> + Result = couch_mrview:query_view(Db, <<"_design/bar">>, <<"bing">>), + Expect = {ok, [ + {meta, [{total, 0}, {offset, 0}]} + ]}, + ?_assertEqual(Expect, Result). + +should_give_ext_size_seq_indexed_test(Db) -> + DDoc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/seqdoc">>}, + {<<"options">>, {[{<<"seq_indexed">>, true}]}}, + {<<"views">>, {[ + {<<"view1">>, {[ + {<<"map">>, <<"function(doc){emit(doc._id, doc._id);}">>} + ]}} + ]} + } + ]}), + {ok, _} = couch_db:update_doc(Db, DDoc, []), + {ok, Db1} = couch_db:open_int(couch_db:name(Db), []), + {ok, DDoc1} = couch_db:open_doc(Db1, <<"_design/seqdoc">>, [ejson_body]), + couch_mrview:query_view(Db1, DDoc1, <<"view1">>, [{update, true}]), + {ok, Info} = couch_mrview:get_info(Db1, DDoc), + Size = couch_util:get_nested_json_value({Info}, [sizes, external]), + ok = couch_db:close(Db1), + ?_assert(is_number(Size)). + + +run_query(Db, Opts) -> + couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, Opts). diff --git a/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl new file mode 100644 index 000000000..213acac0b --- /dev/null +++ b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl @@ -0,0 +1,276 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_mrview_purge_docs_fabric_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("mem3/include/mem3.hrl"). +-include_lib("couch_mrview/include/couch_mrview.hrl"). + +-define(TIMEOUT, 1000). + + +setup() -> + DbName = ?tempdb(), + ok = fabric:create_db(DbName, [?ADMIN_CTX, {q, 1}]), + meck:new(couch_mrview_index, [passthrough]), + meck:expect(couch_mrview_index, ensure_local_purge_docs, fun(A, B) -> + meck:passthrough([A, B]) + end), + DbName. + + +teardown(DbName) -> + meck:unload(), + ok = fabric:delete_db(DbName, [?ADMIN_CTX]). + + +view_purge_fabric_test_() -> + { + "Map views", + { + setup, + fun() -> test_util:start_couch([fabric, mem3]) end, + fun test_util:stop_couch/1, + { + foreach, + fun setup/0, + fun teardown/1, + [ + fun test_purge_verify_index/1, + fun test_purge_hook_before_compaction/1 + ] + } + } + }. + + +test_purge_verify_index(DbName) -> + ?_test(begin + Docs1 = couch_mrview_test_util:make_docs(normal, 5), + {ok, _} = fabric:update_docs(DbName, Docs1, [?ADMIN_CTX]), + {ok, _} = fabric:update_doc( + DbName, + couch_mrview_test_util:ddoc(map), + [?ADMIN_CTX] + ), + + Result1 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}), + Expect1 = {ok, [ + {meta, [{total, 5}, {offset, 0}]}, + {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, + {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, + {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} + ]}, + ?assertEqual(Expect1, Result1), + + {ok, #doc{body = {Props1}}} = get_local_purge_doc(DbName), + ?assertEqual(0, couch_util:get_value(<<"purge_seq">>, Props1)), + ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)], + [ShardDbName | _Rest ] = ShardNames, + ?assertEqual(true, couch_mrview_index:verify_index_exists( + ShardDbName, Props1)), + + purge_docs(DbName, [<<"1">>]), + + Result2 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}), + Expect2 = {ok, [ + {meta, [{total, 4}, {offset, 0}]}, + {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, + {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} + ]}, + ?assertEqual(Expect2, Result2), + + {ok, #doc{body = {Props2}}} = get_local_purge_doc(DbName), + ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props2)), + ?assertEqual(true, couch_mrview_index:verify_index_exists( + ShardDbName, Props2)) + end). + + +test_purge_hook_before_compaction(DbName) -> + ?_test(begin + Docs1 = couch_mrview_test_util:make_docs(normal, 5), + {ok, _} = fabric:update_docs(DbName, Docs1, [?ADMIN_CTX]), + {ok, _} = fabric:update_doc( + DbName, + couch_mrview_test_util:ddoc(map), + [?ADMIN_CTX] + ), + + Result1 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}), + Expect1 = {ok, [ + {meta, [{total, 5}, {offset, 0}]}, + {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, + {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, + {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} + ]}, + ?assertEqual(Expect1, Result1), + + purge_docs(DbName, [<<"1">>]), + + Result2 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}), + Expect2 = {ok, [ + {meta, [{total, 4}, {offset, 0}]}, + {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, + {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} + ]}, + ?assertEqual(Expect2, Result2), + + {ok, #doc{body = {Props1}}} = get_local_purge_doc(DbName), + ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props1)), + + [ShardName | _] = local_shards(DbName), + couch_util:with_db(ShardName, fun(Db) -> + {ok, _} = couch_db:start_compact(Db) + end), + wait_compaction(ShardName, ?LINE), + + ?assertEqual(ok, meck:wait(1, couch_mrview_index, + ensure_local_purge_docs, '_', 5000) + ), + + % Make sure compaction didn't change the update seq + {ok, #doc{body = {Props1}}} = get_local_purge_doc(DbName), + ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props1)), + + purge_docs(DbName, [<<"2">>]), + + couch_util:with_db(ShardName, fun(Db) -> + {ok, _} = couch_db:start_compact(Db) + end), + wait_compaction(ShardName, ?LINE), + + ?assertEqual(ok, meck:wait(2, couch_mrview_index, + ensure_local_purge_docs, '_', 5000) + ), + + % Make sure compaction after a purge didn't overwrite + % the local purge doc for the index + {ok, #doc{body = {Props2}}} = get_local_purge_doc(DbName), + ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props2)), + + % Force another update to ensure that we update + % the local doc appropriate after compaction + Result3 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}), + Expect3 = {ok, [ + {meta, [{total, 3}, {offset, 0}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, + {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} + ]}, + ?assertEqual(Expect3, Result3), + + {ok, #doc{body = {Props3}}} = get_local_purge_doc(DbName), + ?assertEqual(2, couch_util:get_value(<<"purge_seq">>, Props3)), + + % Check that if the local doc doesn't exist that one + % is created for the index on compaction + delete_local_purge_doc(DbName), + ?assertMatch({not_found, _}, get_local_purge_doc(DbName)), + + couch_util:with_db(ShardName, fun(Db) -> + {ok, _} = couch_db:start_compact(Db) + end), + wait_compaction(ShardName, ?LINE), + + ?assertEqual(ok, meck:wait(3, couch_mrview_index, + ensure_local_purge_docs, '_', 5000) + ), + + {ok, #doc{body = {Props4}}} = get_local_purge_doc(DbName), + ?assertEqual(2, couch_util:get_value(<<"purge_seq">>, Props4)) + end). + + +get_local_purge_doc(DbName) -> + {ok, DDoc} = fabric:open_doc(DbName, <<"_design/bar">>, []), + {ok, IdxState} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc), + Sig = IdxState#mrst.sig, + HexSig = list_to_binary(couch_index_util:hexsig(Sig)), + DocId = couch_mrview_util:get_local_purge_doc_id(HexSig), + [ShardName | _] = local_shards(DbName), + couch_util:with_db(ShardName, fun(Db) -> + couch_db:open_doc(Db, DocId, []) + end). + + +delete_local_purge_doc(DbName) -> + {ok, DDoc} = fabric:open_doc(DbName, <<"_design/bar">>, []), + {ok, IdxState} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc), + Sig = IdxState#mrst.sig, + HexSig = list_to_binary(couch_index_util:hexsig(Sig)), + DocId = couch_mrview_util:get_local_purge_doc_id(HexSig), + NewDoc = #doc{id = DocId, deleted = true}, + [ShardName | _] = local_shards(DbName), + couch_util:with_db(ShardName, fun(Db) -> + {ok, _} = couch_db:update_doc(Db, NewDoc, []) + end). + + +get_rev(#full_doc_info{} = FDI) -> + #doc_info{ + revs = [#rev_info{} = PrevRev | _] + } = couch_doc:to_doc_info(FDI), + PrevRev#rev_info.rev. + + +purge_docs(DbName, DocIds) -> + lists:foreach(fun(DocId) -> + FDI = fabric:get_full_doc_info(DbName, DocId, []), + Rev = get_rev(FDI), + {ok, [{ok, _}]} = fabric:purge_docs(DbName, [{DocId, [Rev]}], []) + end, DocIds). + + +wait_compaction(DbName, Line) -> + WaitFun = fun() -> + case is_compaction_running(DbName) of + true -> wait; + false -> ok + end + end, + case test_util:wait(WaitFun, 10000) of + timeout -> + erlang:error({assertion_failed, [ + {module, ?MODULE}, + {line, Line}, + {reason, "Timeout waiting for database compaction"} + ]}); + _ -> + ok + end. + + +is_compaction_running(DbName) -> + {ok, DbInfo} = couch_util:with_db(DbName, fun(Db) -> + couch_db:get_db_info(Db) + end), + couch_util:get_value(compact_running, DbInfo). + + +local_shards(DbName) -> + try + [ShardName || #shard{name = ShardName} <- mem3:local_shards(DbName)] + catch + error:database_does_not_exist -> + [] + end. diff --git a/src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl new file mode 100644 index 000000000..1020607a4 --- /dev/null +++ b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl @@ -0,0 +1,575 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_mrview_purge_docs_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("couch_mrview/include/couch_mrview.hrl"). + +-define(TIMEOUT, 1000). + + +setup() -> + meck:new(couch_index_updater, [passthrough]), + {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map, 5), + Db. + +teardown(Db) -> + couch_db:close(Db), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), + meck:unload(), + ok. + +view_purge_test_() -> + { + "Map views", + { + setup, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, + { + foreach, + fun setup/0, + fun teardown/1, + [ + fun test_purge_single/1, + fun test_purge_partial/1, + fun test_purge_complete/1, + fun test_purge_nochange/1, + fun test_purge_index_reset/1, + fun test_purge_compact_size_check/1, + fun test_purge_single_for_docid_with_list/1, + fun test_purge_complete_for_docid_with_list/1, + fun test_purge_compact_for_stale_purge_cp_without_client/1, + fun test_purge_compact_for_stale_purge_cp_with_client/1 + ] + } + } + }. + + +test_purge_single(Db) -> + ?_test(begin + Result = run_query(Db, []), + Expect = {ok, [ + {meta, [{total, 5}, {offset, 0}]}, + {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, + {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, + {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} + ]}, + ?assertEqual(Expect, Result), + + FDI = couch_db:get_full_doc_info(Db, <<"1">>), + Rev = get_rev(FDI), + {ok, [{ok, _PRevs}]} = couch_db:purge_docs( + Db, + [{<<"UUID1">>, <<"1">>, [Rev]}] + ), + {ok, Db2} = couch_db:reopen(Db), + + Result2 = run_query(Db2, []), + Expect2 = {ok, [ + {meta, [{total, 4}, {offset, 0}]}, + {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, + {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} + ]}, + ?assertEqual(Expect2, Result2) + end). + + +test_purge_single_for_docid_with_list(Db) -> + ?_test(begin + Result = run_query(Db, []), + Expect = {ok, [ + {meta, [{total, 5}, {offset, 0}]}, + {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, + {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, + {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} + ]}, + ?assertEqual(Expect, Result), + + FDI = couch_db:get_full_doc_info(Db, <<"1">>), + Rev = get_rev(FDI), + {ok, [{ok, _PRevs}]} = couch_db:purge_docs( + Db, + [{<<"UUID1">>, "1", [Rev]}] + ), + {ok, Db2} = couch_db:reopen(Db), + + Result2 = run_query(Db2, []), + Expect2 = {ok, [ + {meta, [{total, 4}, {offset, 0}]}, + {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, + {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} + ]}, + ?assertEqual(Expect2, Result2) + end). + +test_purge_partial(Db) -> + ?_test(begin + Result = run_query(Db, []), + Expect = {ok, [ + {meta, [{total, 5}, {offset, 0}]}, + {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, + {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, + {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} + ]}, + ?assertEqual(Expect, Result), + + FDI1 = couch_db:get_full_doc_info(Db, <<"1">>), Rev1 = get_rev(FDI1), + Update = {[ + {'_id', <<"1">>}, + {'_rev', couch_doc:rev_to_str({1, [crypto:hash(md5, <<"1.2">>)]})}, + {'val', 1.2} + ]}, + {ok, [_Rev2]} = save_docs(Db, [Update], [replicated_changes]), + + PurgeInfos = [{<<"UUID1">>, <<"1">>, [Rev1]}], + + {ok, _} = couch_db:purge_docs(Db, PurgeInfos), + {ok, Db2} = couch_db:reopen(Db), + + Result2 = run_query(Db2, []), + Expect2 = {ok, [ + {meta, [{total, 5}, {offset, 0}]}, + {row, [{id, <<"1">>}, {key, 1.2}, {value, 1.2}]}, + {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, + {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} + ]}, + ?assertEqual(Expect2, Result2) + end). + + +test_purge_complete(Db) -> + ?_test(begin + Result = run_query(Db, []), + Expect = {ok, [ + {meta, [{total, 5}, {offset, 0}]}, + {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, + {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, + {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} + ]}, + ?assertEqual(Expect, Result), + + FDI1 = couch_db:get_full_doc_info(Db, <<"1">>), Rev1 = get_rev(FDI1), + FDI2 = couch_db:get_full_doc_info(Db, <<"2">>), Rev2 = get_rev(FDI2), + FDI5 = couch_db:get_full_doc_info(Db, <<"5">>), Rev5 = get_rev(FDI5), + + PurgeInfos = [ + {<<"UUID1">>, <<"1">>, [Rev1]}, + {<<"UUID2">>, <<"2">>, [Rev2]}, + {<<"UUID5">>, <<"5">>, [Rev5]} + ], + {ok, _} = couch_db:purge_docs(Db, PurgeInfos), + {ok, Db2} = couch_db:reopen(Db), + + Result2 = run_query(Db2, []), + Expect2 = {ok, [ + {meta, [{total, 2}, {offset, 0}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]} + ]}, + ?assertEqual(Expect2, Result2) + end). + + +test_purge_complete_for_docid_with_list(Db) -> + ?_test(begin + Result = run_query(Db, []), + Expect = {ok, [ + {meta, [{total, 5}, {offset, 0}]}, + {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, + {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, + {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} + ]}, + ?assertEqual(Expect, Result), + + FDI1 = couch_db:get_full_doc_info(Db, <<"1">>), Rev1 = get_rev(FDI1), + FDI2 = couch_db:get_full_doc_info(Db, <<"2">>), Rev2 = get_rev(FDI2), + FDI5 = couch_db:get_full_doc_info(Db, <<"5">>), Rev5 = get_rev(FDI5), + + PurgeInfos = [ + {<<"UUID1">>, "1", [Rev1]}, + {<<"UUID2">>, "2", [Rev2]}, + {<<"UUID5">>, "5", [Rev5]} + ], + {ok, _} = couch_db:purge_docs(Db, PurgeInfos), + {ok, Db2} = couch_db:reopen(Db), + + Result2 = run_query(Db2, []), + Expect2 = {ok, [ + {meta, [{total, 2}, {offset, 0}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]} + ]}, + ?assertEqual(Expect2, Result2) + end). + + +test_purge_nochange(Db) -> + ?_test(begin + Result = run_query(Db, []), + Expect = {ok, [ + {meta, [{total, 5}, {offset, 0}]}, + {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, + {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, + {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} + ]}, + ?assertEqual(Expect, Result), + + FDI1 = couch_db:get_full_doc_info(Db, <<"1">>), + Rev1 = get_rev(FDI1), + + PurgeInfos = [ + {<<"UUID1">>, <<"6">>, [Rev1]} + ], + {ok, _} = couch_db:purge_docs(Db, PurgeInfos), + {ok, Db2} = couch_db:reopen(Db), + + Result2 = run_query(Db2, []), + Expect2 = {ok, [ + {meta, [{total, 5}, {offset, 0}]}, + {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, + {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, + {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} + ]}, + ?assertEqual(Expect2, Result2) + end). + + +test_purge_index_reset(Db) -> + ?_test(begin + ok = couch_db:set_purge_infos_limit(Db, 2), + {ok, Db1} = couch_db:reopen(Db), + + Result = run_query(Db1, []), + Expect = {ok, [ + {meta, [{total, 5}, {offset, 0}]}, + {row, [{id, <<"1">>}, {key, 1}, {value, 1}]}, + {row, [{id, <<"2">>}, {key, 2}, {value, 2}]}, + {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}, + {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}, + {row, [{id, <<"5">>}, {key, 5}, {value, 5}]} + ]}, + ?assertEqual(Expect, Result), + + PurgeInfos = lists:map(fun(I) -> + DocId = list_to_binary(integer_to_list(I)), + FDI = couch_db:get_full_doc_info(Db, DocId), + Rev = get_rev(FDI), + {couch_uuids:random(), DocId, [Rev]} + end, lists:seq(1, 5)), + {ok, _} = couch_db:purge_docs(Db1, PurgeInfos), + + {ok, Db2} = couch_db:reopen(Db1), + + % Forcibly set the purge doc to a newer purge + % sequence to force an index reset. This should + % never happen in real life but the reset + % is required for correctness. + {ok, #doc{body = {OldProps}} = LocalDoc} = get_local_purge_doc(Db2), + NewPurgeSeq = {<<"purge_seq">>, 5}, + NewProps = lists:keyreplace(<<"purge_seq">>, 1, OldProps, NewPurgeSeq), + RewindDoc = LocalDoc#doc{body = {NewProps}}, + {ok, _} = couch_db:update_doc(Db2, RewindDoc, []), + + % Compact the database to remove purge infos + {ok, _} = couch_db:start_compact(Db2), + wait_compaction(couch_db:name(Db), "database", ?LINE), + + {ok, Db3} = couch_db:reopen(Db2), + Result2 = run_query(Db3, []), + Expect2 = {ok, [ + {meta, [{total, 0}, {offset, 0}]} + ]}, + ?assertEqual(Expect2, Result2), + + % Assert that we had a reset + meck:wait( + 1, + couch_index_updater, + handle_info, + [{'EXIT', '_', {reset, '_'}}, '_'], + 5000 + ) + end). + + +test_purge_compact_size_check(Db) -> + ?_test(begin + DbName = couch_db:name(Db), + Docs = couch_mrview_test_util:make_docs(normal, 6, 200), + {ok, Db1} = couch_mrview_test_util:save_docs(Db, Docs), + _Result = run_query(Db1, []), + DiskSizeBefore = db_disk_size(DbName), + + PurgedDocsNum = 150, + IdsRevs = lists:foldl(fun(Id, CIdRevs) -> + Id1 = docid(Id), + FDI1 = couch_db:get_full_doc_info(Db1, Id1), + Rev1 = get_rev(FDI1), + UUID1 = uuid(Id), + [{UUID1, Id1, [Rev1]} | CIdRevs] + end, [], lists:seq(1, PurgedDocsNum)), + {ok, _} = couch_db:purge_docs(Db1, IdsRevs), + + {ok, Db2} = couch_db:reopen(Db1), + _Result1 = run_query(Db2, []), + {ok, PurgedIdRevs} = couch_db:fold_purge_infos( + Db2, + 0, + fun fold_fun/2, + [], + [] + ), + ?assertEqual(PurgedDocsNum, length(PurgedIdRevs)), + config:set("couchdb", "file_compression", "snappy", false), + + {ok, Db3} = couch_db:open_int(DbName, []), + {ok, _CompactPid} = couch_db:start_compact(Db3), + wait_compaction(DbName, "database", ?LINE), + ok = couch_db:close(Db3), + DiskSizeAfter = db_disk_size(DbName), + ?assert(DiskSizeBefore > DiskSizeAfter) + end). + + +test_purge_compact_for_stale_purge_cp_without_client(Db) -> + ?_test(begin + DbName = couch_db:name(Db), + % add more documents to database for purge + Docs = couch_mrview_test_util:make_docs(normal, 6, 200), + {ok, Db1} = couch_mrview_test_util:save_docs(Db, Docs), + + % change PurgedDocsLimit to 10 from 1000 to + % avoid timeout of eunit test + PurgedDocsLimit = 10, + couch_db:set_purge_infos_limit(Db1, PurgedDocsLimit), + + % purge 150 documents + PurgedDocsNum = 150, + PurgeInfos = lists:foldl(fun(Id, CIdRevs) -> + Id1 = docid(Id), + FDI1 = couch_db:get_full_doc_info(Db1, Id1), + Rev1 = get_rev(FDI1), + UUID1 = uuid(Id), + [{UUID1, Id1, [Rev1]} | CIdRevs] + end, [], lists:seq(1, PurgedDocsNum)), + {ok, _} = couch_db:purge_docs(Db1, PurgeInfos), + + {ok, Db2} = couch_db:reopen(Db1), + {ok, PurgedIdRevs} = couch_db:fold_purge_infos( + Db2, + 0, + fun fold_fun/2, + [], + [] + ), + ?assertEqual(PurgedDocsNum, length(PurgedIdRevs)), + + % run compaction to trigger pruning of purge tree + {ok, Db3} = couch_db:open_int(DbName, []), + {ok, _CompactPid} = couch_db:start_compact(Db3), + wait_compaction(DbName, "database", ?LINE), + ok = couch_db:close(Db3), + + % check the remaining purge requests in purge tree + {ok, Db4} = couch_db:reopen(Db3), + OldestPSeq = couch_db:get_oldest_purge_seq(Db4), + {ok, PurgedIdRevs2} = couch_db:fold_purge_infos( + Db4, + OldestPSeq - 1, + fun fold_fun/2, + [], + [] + ), + ?assertEqual(PurgedDocsLimit, length(PurgedIdRevs2)) + end). + + +test_purge_compact_for_stale_purge_cp_with_client(Db) -> + ?_test(begin + DbName = couch_db:name(Db), + % add more documents to database for purge + Docs = couch_mrview_test_util:make_docs(normal, 6, 200), + {ok, Db1} = couch_mrview_test_util:save_docs(Db, Docs), + + % change PurgedDocsLimit to 10 from 1000 to + % avoid timeout of eunit test + PurgedDocsLimit = 10, + couch_db:set_purge_infos_limit(Db1, PurgedDocsLimit), + _Result = run_query(Db1, []), + + % first purge 30 documents + PurgedDocsNum1 = 30, + IdsRevs = lists:foldl(fun(Id, CIdRevs) -> + Id1 = docid(Id), + FDI1 = couch_db:get_full_doc_info(Db1, Id1), + Rev1 = get_rev(FDI1), + UUID1 = uuid(Id), + [{UUID1, Id1, [Rev1]} | CIdRevs] + end, [], lists:seq(1, PurgedDocsNum1)), + {ok, _} = couch_db:purge_docs(Db1, IdsRevs), + + {ok, Db2} = couch_db:reopen(Db1), + % run query again to reflect purge request to mrview + _Result1 = run_query(Db2, []), + {ok, PurgedIdRevs} = couch_db:fold_purge_infos( + Db2, + 0, + fun fold_fun/2, + [], + [] + ), + ?assertEqual(PurgedDocsNum1, length(PurgedIdRevs)), + + % then purge 120 documents + PurgedDocsNum2 = 150, + IdsRevs2 = lists:foldl(fun(Id, CIdRevs) -> + Id1 = docid(Id), + FDI1 = couch_db:get_full_doc_info(Db1, Id1), + Rev1 = get_rev(FDI1), + UUID1 = uuid(Id), + [{UUID1, Id1, [Rev1]} | CIdRevs] + end, [], lists:seq(PurgedDocsNum1 + 1, PurgedDocsNum2)), + {ok, _} = couch_db:purge_docs(Db2, IdsRevs2), + + % run compaction to trigger pruning of purge tree + % only the first 30 purge requests are pruned + {ok, Db3} = couch_db:open_int(DbName, []), + {ok, _CompactPid} = couch_db:start_compact(Db3), + wait_compaction(DbName, "database", ?LINE), + ok = couch_db:close(Db3), + + % check the remaining purge requests in purge tree + {ok, Db4} = couch_db:reopen(Db3), + OldestPSeq = couch_db:get_oldest_purge_seq(Db4), + {ok, PurgedIdRevs2} = couch_db:fold_purge_infos( + Db4, + OldestPSeq - 1, + fun fold_fun/2, + [], + [] + ), + ?assertEqual(PurgedDocsNum2 - PurgedDocsNum1, length(PurgedIdRevs2)) + end). + + +get_local_purge_doc(Db) -> + {ok, DDoc} = couch_db:open_doc(Db, <<"_design/bar">>, []), + {ok, IdxState} = couch_mrview_util:ddoc_to_mrst(couch_db:name(Db), DDoc), + Sig = IdxState#mrst.sig, + HexSig = list_to_binary(couch_index_util:hexsig(Sig)), + DocId = couch_mrview_util:get_local_purge_doc_id(HexSig), + couch_db:open_doc(Db, DocId, []). + + +run_query(Db, Opts) -> + couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, Opts). + + +save_docs(Db, JsonDocs, Options) -> + Docs = lists:map(fun(JDoc) -> + couch_doc:from_json_obj(?JSON_DECODE(?JSON_ENCODE(JDoc))) + end, JsonDocs), + Opts = [full_commit | Options], + case lists:member(replicated_changes, Options) of + true -> + {ok, []} = couch_db:update_docs( + Db, Docs, Opts, replicated_changes), + {ok, lists:map(fun(Doc) -> + {Pos, [RevId | _]} = Doc#doc.revs, + {Pos, RevId} + end, Docs)}; + false -> + {ok, Resp} = couch_db:update_docs(Db, Docs, Opts), + {ok, [Rev || {ok, Rev} <- Resp]} + end. + + +get_rev(#full_doc_info{} = FDI) -> + #doc_info{ + revs = [#rev_info{} = PrevRev | _] + } = couch_doc:to_doc_info(FDI), + PrevRev#rev_info.rev. + + +db_disk_size(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + {ok, Info} = couch_db:get_db_info(Db), + ok = couch_db:close(Db), + active_size(Info). + + +active_size(Info) -> + couch_util:get_nested_json_value({Info}, [sizes, active]). + + +wait_compaction(DbName, Kind, Line) -> + WaitFun = fun() -> + case is_compaction_running(DbName) of + true -> wait; + false -> ok + end + end, + case test_util:wait(WaitFun, 10000) of + timeout -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, Line}, + {reason, "Timeout waiting for " + ++ Kind + ++ " database compaction"}]}); + _ -> + ok + end. + + +is_compaction_running(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + {ok, DbInfo} = couch_db:get_db_info(Db), + couch_db:close(Db), + couch_util:get_value(compact_running, DbInfo). + + +fold_fun({_PSeq, _UUID, Id, Revs}, Acc) -> + {ok, [{Id, Revs} | Acc]}. + + +docid(I) -> + list_to_binary(integer_to_list(I)). + + +uuid(I) -> + Str = io_lib:format("UUID~4..0b", [I]), + iolist_to_binary(Str). diff --git a/src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl new file mode 100644 index 000000000..b83686113 --- /dev/null +++ b/src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl @@ -0,0 +1,95 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_mrview_red_views_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 1000). + + +setup() -> + {ok, Db} = couch_mrview_test_util:init_db(?tempdb(), red), + Db. + +teardown(Db) -> + couch_db:close(Db), + couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]), + ok. + + +reduce_views_test_() -> + { + "Reduce views", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_reduce_basic/1, + fun should_reduce_key_range/1, + fun should_reduce_with_group_level/1, + fun should_reduce_with_group_exact/1 + ] + } + } + }. + + +should_reduce_basic(Db) -> + Result = run_query(Db, []), + Expect = {ok, [ + {meta, []}, + {row, [{key, null}, {value, 55}]} + ]}, + ?_assertEqual(Expect, Result). + +should_reduce_key_range(Db) -> + Result = run_query(Db, [{start_key, [0, 2]}, {end_key, [0, 4]}]), + Expect = {ok, [ + {meta, []}, + {row, [{key, null}, {value, 6}]} + ]}, + ?_assertEqual(Expect, Result). + +should_reduce_with_group_level(Db) -> + Result = run_query(Db, [{group_level, 1}]), + Expect = {ok, [ + {meta, []}, + {row, [{key, [0]}, {value, 30}]}, + {row, [{key, [1]}, {value, 25}]} + ]}, + ?_assertEqual(Expect, Result). + +should_reduce_with_group_exact(Db) -> + Result = run_query(Db, [{group_level, exact}]), + Expect = {ok, [ + {meta, []}, + {row, [{key, [0, 2]}, {value, 2}]}, + {row, [{key, [0, 4]}, {value, 4}]}, + {row, [{key, [0, 6]}, {value, 6}]}, + {row, [{key, [0, 8]}, {value, 8}]}, + {row, [{key, [0, 10]}, {value, 10}]}, + {row, [{key, [1, 1]}, {value, 1}]}, + {row, [{key, [1, 3]}, {value, 3}]}, + {row, [{key, [1, 5]}, {value, 5}]}, + {row, [{key, [1, 7]}, {value, 7}]}, + {row, [{key, [1, 9]}, {value, 9}]} + ]}, + ?_assertEqual(Expect, Result). + + +run_query(Db, Opts) -> + couch_mrview:query_view(Db, <<"_design/red">>, <<"baz">>, Opts). diff --git a/src/couch_mrview/test/eunit/couch_mrview_util_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_util_tests.erl new file mode 100644 index 000000000..7046c9bb2 --- /dev/null +++ b/src/couch_mrview/test/eunit/couch_mrview_util_tests.erl @@ -0,0 +1,39 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_mrview_util_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch_mrview/include/couch_mrview.hrl"). + + + +couch_mrview_util_test_() -> + [ + ?_assertEqual(0, validate_group_level(undefined, undefined)), + ?_assertEqual(exact, validate_group_level(true, undefined)), + ?_assertEqual(0, validate_group_level(false, undefined)), + ?_assertEqual(1, validate_group_level(undefined, 1)), + ?_assertEqual(0, validate_group_level(true, 0)), + ?_assertEqual(0, validate_group_level(undefined, 0)), + ?_assertEqual(1, validate_group_level(true, 1)), + ?_assertEqual(0, validate_group_level(false, 0)), + ?_assertThrow({query_parse_error, + <<"Can't specify group=false and group_level>0 at the same time">>}, + validate_group_level(false,1)) + ]. + +validate_group_level(Group, GroupLevel) -> + Args0 = #mrargs{group=Group, group_level=GroupLevel, view_type=red}, + Args1 = couch_mrview_util:validate_args(Args0), + Args1#mrargs.group_level. + diff --git a/src/couch_peruser/test/couch_peruser_test.erl b/src/couch_peruser/test/couch_peruser_test.erl deleted file mode 100644 index 8501cc36f..000000000 --- a/src/couch_peruser/test/couch_peruser_test.erl +++ /dev/null @@ -1,519 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_peruser_test). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(ADMIN_USERNAME, "admin"). --define(ADMIN_PASSWORD, "secret"). - --define(WAIT_FOR_USER_DELETE_TIMEOUT, 3000). - -setup_all() -> - TestCtx = test_util:start_couch([chttpd]), - ok = application:start(couch_peruser), - Hashed = couch_passwords:hash_admin_password(?ADMIN_PASSWORD), - ok = config:set("admins", ?ADMIN_USERNAME, ?b2l(Hashed), _Persist=false), - TestCtx. - -teardown_all(TestCtx) -> - config:delete("admins", ?ADMIN_USERNAME), - ok = application:stop(couch_peruser), - test_util:stop_couch(TestCtx). - -setup() -> - TestAuthDb = ?tempdb(), - do_request(put, get_base_url() ++ "/" ++ ?b2l(TestAuthDb)), - do_request(put, get_cluster_base_url() ++ "/" ++ ?b2l(TestAuthDb)), - set_config("couch_httpd_auth", "authentication_db", ?b2l(TestAuthDb)), - set_config("couch_peruser", "cluster_quiet_period", "1"), - set_config("couch_peruser", "cluster_start_period", "1"), - set_config("couch_peruser", "enable", "true"), - set_config("cluster", "n", "1"), - TestAuthDb. - -teardown(TestAuthDb) -> - set_config("couch_peruser", "enable", "false"), - set_config("couch_peruser", "delete_dbs", "false"), - set_config("couch_httpd_auth", "authentication_db", "_users"), - set_config("couch_peruser", "cluster_quiet_period", "60"), - set_config("couch_peruser", "cluster_start_period", "5"), - set_config("cluster", "n", "3"), - do_request(delete, get_cluster_base_url() ++ "/" ++ ?b2l(TestAuthDb)), - do_request(delete, get_base_url() ++ "/" ++ ?b2l(TestAuthDb)), - lists:foreach(fun(DbName) -> - case binary:part(DbName, 0, 7) of - <<"userdb-">> -> delete_db(DbName); - _ -> ok - end - end, all_dbs()). - -set_config(Section, Key, Value) -> - ok = config:set(Section, Key, Value, _Persist=false). - -delete_config(Section, Key) -> - ok = config:delete(Section, Key, _Persist=false). - -do_request(Method, Url) -> - Headers = [{basic_auth, {?ADMIN_USERNAME, ?ADMIN_PASSWORD}}], - {ok, _, _, _} = test_request:request(Method, Url, Headers). - -do_request(Method, Url, Body) -> - Headers = [ - {basic_auth, {?ADMIN_USERNAME, ?ADMIN_PASSWORD}}, - {"Content-Type", "application/json"}], - {ok, _, _, _} = test_request:request(Method, Url, Headers, Body). - -do_anon_request(Method, Url, Body) -> - Headers = [ - {"Content-Type", "application/json"}], - {ok, _, _, _} = test_request:request(Method, Url, Headers, Body). - -create_db(DbName) -> - {ok, _, _, _} = do_request(put, get_cluster_base_url() ++ "/" ++ ?b2l(DbName)). - -delete_db(DbName) -> - {ok, _, _, _} = do_request(delete, get_cluster_base_url() ++ "/" ++ ?b2l(DbName)). - -create_user(AuthDb, Name) -> - Body = "{\"name\":\"" ++ Name ++ - "\",\"type\":\"user\",\"roles\":[],\"password\":\"secret\"}", - Url = lists:concat([ - get_cluster_base_url(), "/", ?b2l(AuthDb), "/org.couchdb.user:", Name]), - {ok, 201, _, _} = do_request(put, Url, Body). - -create_anon_user(AuthDb, Name) -> - Body = "{\"name\":\"" ++ Name ++ - "\",\"type\":\"user\",\"roles\":[],\"password\":\"secret\"}", - Url = lists:concat([ - get_cluster_base_url(), "/", ?b2l(AuthDb), "/org.couchdb.user:", Name]), - {ok, 201, _, _} = do_anon_request(put, Url, Body). - -delete_user(AuthDb, Name) -> - Url = lists:concat([get_cluster_base_url(), "/", ?b2l(AuthDb), - "/org.couchdb.user:", Name]), - {ok, 200, _, Body} = do_request(get, Url), - {DocProps} = jiffy:decode(Body), - Rev = proplists:get_value(<<"_rev">>, DocProps), - {ok, 200, _, _} = do_request(delete, Url ++ "?rev=" ++ ?b2l(Rev)). - -get_security(DbName) -> - Url = lists:concat([ - get_cluster_base_url(), "/", ?b2l(DbName), "/_security"]), - test_util:wait(fun() -> - {ok, 200, _, Body} = do_request(get, Url), - case jiffy:decode(Body) of - {[]} -> wait; - {SecurityProperties} -> SecurityProperties - end - end). - -set_security(DbName, SecurityProperties) -> - Url = lists:concat([ - get_cluster_base_url(), "/", ?b2l(DbName), "/_security"]), - Body = jiffy:encode({SecurityProperties}), - {ok, 200, _, _} = do_request(put, Url, Body). - -all_dbs() -> - {ok, 200, _, Body} = do_request(get, get_cluster_base_url() ++ "/_all_dbs"), - jiffy:decode(Body). - -all_dbs_with_errors() -> - {Result, StatusCode, _Headers, Body} = do_request(get, get_cluster_base_url() ++ "/_all_dbs"), - {Result, StatusCode, _Headers, jiffy:decode(Body)}. - -get_base_url() -> - Addr = config:get("httpd", "bind_address", "127.0.0.1"), - Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)), - "http://" ++ Addr ++ ":" ++ Port. - -get_cluster_base_url() -> - Addr = config:get("httpd", "bind_address", "127.0.0.1"), - Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)), - "http://" ++ Addr ++ ":" ++ Port. - - -should_create_user_db_with_default(TestAuthDb) -> - create_user(TestAuthDb, "foo"), - wait_for_db_create(<<"userdb-666f6f">>), - {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f">>), - {ClusterInfo} = couch_util:get_value(cluster, DbInfo), - [ - ?_assert(lists:member(<<"userdb-666f6f">>, all_dbs())), - ?_assertEqual(1, couch_util:get_value(q, ClusterInfo)) - ]. - -should_create_user_db_with_custom_prefix(TestAuthDb) -> - set_config("couch_peruser", "database_prefix", "newuserdb-"), - create_user(TestAuthDb, "fooo"), - wait_for_db_create(<<"newuserdb-666f6f6f">>), - delete_config("couch_peruser", "database_prefix"), - ?_assert(lists:member(<<"newuserdb-666f6f6f">>, all_dbs())). - -should_create_user_db_with_custom_special_prefix(TestAuthDb) -> - set_config("couch_peruser", "database_prefix", "userdb_$()+--/"), - create_user(TestAuthDb, "fooo"), - wait_for_db_create(<<"userdb_$()+--/666f6f6f">>), - delete_config("couch_peruser", "database_prefix"), - ?_assert(lists:member(<<"userdb_$()+--/666f6f6f">>, all_dbs())). - -should_create_anon_user_db_with_default(TestAuthDb) -> - create_anon_user(TestAuthDb, "fooo"), - wait_for_db_create(<<"userdb-666f6f6f">>), - {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f6f">>), - {ClusterInfo} = couch_util:get_value(cluster, DbInfo), - [ - ?_assert(lists:member(<<"userdb-666f6f6f">>, all_dbs())), - ?_assertEqual(1, couch_util:get_value(q, ClusterInfo)) - ]. - -should_create_anon_user_db_with_custom_prefix(TestAuthDb) -> - set_config("couch_peruser", "database_prefix", "newuserdb-"), - create_anon_user(TestAuthDb, "fooo"), - wait_for_db_create(<<"newuserdb-666f6f6f">>), - delete_config("couch_peruser", "database_prefix"), - ?_assert(lists:member(<<"newuserdb-666f6f6f">>, all_dbs())). - -should_create_anon_user_db_with_custom_special_prefix(TestAuthDb) -> - set_config("couch_peruser", "database_prefix", "userdb_$()+--/"), - create_anon_user(TestAuthDb, "fooo"), - wait_for_db_create(<<"userdb_$()+--/666f6f6f">>), - delete_config("couch_peruser", "database_prefix"), - ?_assert(lists:member(<<"userdb_$()+--/666f6f6f">>, all_dbs())). - -should_create_user_db_with_q4(TestAuthDb) -> - set_config("couch_peruser", "q", "4"), - create_user(TestAuthDb, "foo"), - wait_for_db_create(<<"userdb-666f6f">>), - {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f">>), - {ClusterInfo} = couch_util:get_value(cluster, DbInfo), - delete_config("couch_peruser", "q"), - [ - ?_assert(lists:member(<<"userdb-666f6f">>, all_dbs())), - ?_assertEqual(4, couch_util:get_value(q, ClusterInfo)) - ]. - -should_create_anon_user_db_with_q4(TestAuthDb) -> - set_config("couch_peruser", "q", "4"), - create_anon_user(TestAuthDb, "fooo"), - wait_for_db_create(<<"userdb-666f6f6f">>), - {ok, TargetInfo} = fabric:get_db_info(<<"userdb-666f6f6f">>), - {ClusterInfo} = couch_util:get_value(cluster, TargetInfo), - delete_config("couch_peruser", "q"), - [ - ?_assert(lists:member(<<"userdb-666f6f6f">>, all_dbs())), - ?_assertEqual(4, couch_util:get_value(q, ClusterInfo)) - ]. - -should_not_delete_user_db(TestAuthDb) -> - User = "foo", - UserDbName = <<"userdb-666f6f">>, - create_user(TestAuthDb, User), - wait_for_db_create(<<"userdb-666f6f">>), - AfterCreate = lists:member(UserDbName, all_dbs()), - delete_user(TestAuthDb, User), - timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT), - AfterDelete = lists:member(UserDbName, all_dbs()), - [?_assert(AfterCreate), ?_assert(AfterDelete)]. - -should_delete_user_db(TestAuthDb) -> - User = "bar", - UserDbName = <<"userdb-626172">>, - set_config("couch_peruser", "delete_dbs", "true"), - create_user(TestAuthDb, User), - wait_for_db_create(UserDbName), - AfterCreate = lists:member(UserDbName, all_dbs()), - delete_user(TestAuthDb, User), - wait_for_db_delete(UserDbName), - AfterDelete = lists:member(UserDbName, all_dbs()), - [?_assert(AfterCreate), ?_assertNot(AfterDelete)]. - -should_delete_user_db_with_custom_prefix(TestAuthDb) -> - User = "bar", - UserDbName = <<"newuserdb-626172">>, - set_config("couch_peruser", "delete_dbs", "true"), - set_config("couch_peruser", "database_prefix", "newuserdb-"), - create_user(TestAuthDb, User), - wait_for_db_create(UserDbName), - AfterCreate = lists:member(UserDbName, all_dbs()), - delete_user(TestAuthDb, User), - wait_for_db_delete(UserDbName), - delete_config("couch_peruser", "database_prefix"), - AfterDelete = lists:member(UserDbName, all_dbs()), - [ - ?_assert(AfterCreate), - ?_assertNot(AfterDelete) - ]. - -should_delete_user_db_with_custom_special_prefix(TestAuthDb) -> - User = "bar", - UserDbName = <<"userdb_$()+--/626172">>, - set_config("couch_peruser", "delete_dbs", "true"), - set_config("couch_peruser", "database_prefix", "userdb_$()+--/"), - create_user(TestAuthDb, User), - wait_for_db_create(UserDbName), - AfterCreate = lists:member(UserDbName, all_dbs()), - delete_user(TestAuthDb, User), - wait_for_db_delete(UserDbName), - delete_config("couch_peruser", "database_prefix"), - AfterDelete = lists:member(UserDbName, all_dbs()), - [ - ?_assert(AfterCreate), - ?_assertNot(AfterDelete) - ]. - -should_reflect_config_changes(TestAuthDb) -> - User = "baz", - UserDbName = <<"userdb-62617a">>, - set_config("couch_peruser", "delete_dbs", "true"), - create_user(TestAuthDb, User), - wait_for_db_create(UserDbName), - AfterCreate1 = lists:member(UserDbName, all_dbs()), - delete_user(TestAuthDb, User), - timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT), - wait_for_db_delete(UserDbName), - AfterDelete1 = lists:member(UserDbName, all_dbs()), - create_user(TestAuthDb, User), - wait_for_db_create(UserDbName), - AfterCreate2 = lists:member(UserDbName, all_dbs()), - set_config("couch_peruser", "delete_dbs", "false"), - delete_user(TestAuthDb, User), - timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT), - AfterDelete2 = lists:member(UserDbName, all_dbs()), - create_user(TestAuthDb, User), - wait_for_db_create(UserDbName), - set_config("couch_peruser", "delete_dbs", "true"), - delete_user(TestAuthDb, User), - wait_for_db_delete(UserDbName), - AfterDelete3 = lists:member(UserDbName, all_dbs()), - set_config("couch_peruser", "enable", "false"), - create_user(TestAuthDb, User), - timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT), - AfterCreate3 = lists:member(UserDbName, all_dbs()), - [ - ?_assert(AfterCreate1), - ?_assertNot(AfterDelete1), - ?_assert(AfterCreate2), - ?_assert(AfterDelete2), - ?_assertNot(AfterDelete3), - ?_assertNot(AfterCreate3) - ]. - - -should_add_user_to_db_admins(TestAuthDb) -> - User = "qux", - UserDbName = <<"userdb-717578">>, - create_user(TestAuthDb, User), - wait_for_db_create(UserDbName), - ?_assertEqual( - {[{<<"names">>,[<<"qux">>]}]}, - proplists:get_value(<<"admins">>, get_security(UserDbName))). - -should_add_user_to_db_members(TestAuthDb) -> - User = "qux", - UserDbName = <<"userdb-717578">>, - create_user(TestAuthDb, User), - wait_for_db_create(UserDbName), - ?_assertEqual( - {[{<<"names">>,[<<"qux">>]}]}, - proplists:get_value(<<"members">>, get_security(UserDbName))). - -should_not_remove_existing_db_admins(TestAuthDb) -> - User = "qux", - UserDbName = <<"userdb-717578">>, - SecurityProperties = [ - {<<"admins">>,{[{<<"names">>,[<<"foo">>,<<"bar">>]}]}}, - {<<"members">>,{[{<<"names">>,[<<"baz">>,<<"pow">>]}]}} - ], - create_db(UserDbName), - set_security(UserDbName, SecurityProperties), - create_user(TestAuthDb, User), - wait_for_security_create(<<"admins">>, User, UserDbName), - {AdminProperties} = proplists:get_value(<<"admins">>, - get_security(UserDbName)), - AdminNames = proplists:get_value(<<"names">>, AdminProperties), - [ - ?_assert(lists:member(<<"foo">>, AdminNames)), - ?_assert(lists:member(<<"bar">>, AdminNames)), - ?_assert(lists:member(<<"qux">>, AdminNames)) - ]. - -should_not_remove_existing_db_members(TestAuthDb) -> - User = "qux", - UserDbName = <<"userdb-717578">>, - SecurityProperties = [ - {<<"admins">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}}, - {<<"members">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}} - ], - create_db(UserDbName), - set_security(UserDbName, SecurityProperties), - create_user(TestAuthDb, User), - wait_for_security_create(<<"members">>, User, UserDbName), - {MemberProperties} = proplists:get_value(<<"members">>, - get_security(UserDbName)), - MemberNames = proplists:get_value(<<"names">>, MemberProperties), - [ - ?_assert(lists:member(<<"pow">>, MemberNames)), - ?_assert(lists:member(<<"wow">>, MemberNames)), - ?_assert(lists:member(<<"qux">>, MemberNames)) - ]. - -should_remove_user_from_db_admins(TestAuthDb) -> - User = "qux", - UserDbName = <<"userdb-717578">>, - SecurityProperties = [ - {<<"admins">>,{[{<<"names">>,[<<"foo">>,<<"bar">>]}]}}, - {<<"members">>,{[{<<"names">>,[<<"baz">>,<<"pow">>]}]}} - ], - create_db(UserDbName), - set_security(UserDbName, SecurityProperties), - create_user(TestAuthDb, User), - wait_for_security_create(<<"admins">>, User, UserDbName), - {AdminProperties} = proplists:get_value(<<"admins">>, - get_security(UserDbName)), - AdminNames = proplists:get_value(<<"names">>, AdminProperties), - FooBefore = lists:member(<<"foo">>, AdminNames), - BarBefore = lists:member(<<"bar">>, AdminNames), - QuxBefore = lists:member(<<"qux">>, AdminNames), - delete_user(TestAuthDb, User), - wait_for_security_delete(<<"admins">>, User, UserDbName), - {NewAdminProperties} = proplists:get_value(<<"admins">>, - get_security(UserDbName)), - NewAdminNames = proplists:get_value(<<"names">>, NewAdminProperties), - FooAfter = lists:member(<<"foo">>, NewAdminNames), - BarAfter = lists:member(<<"bar">>, NewAdminNames), - QuxAfter = lists:member(<<"qux">>, NewAdminNames), - [ - ?_assert(FooBefore), - ?_assert(BarBefore), - ?_assert(QuxBefore), - ?_assert(FooAfter), - ?_assert(BarAfter), - ?_assertNot(QuxAfter) - ]. - -should_remove_user_from_db_members(TestAuthDb) -> - User = "qux", - UserDbName = <<"userdb-717578">>, - SecurityProperties = [ - {<<"admins">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}}, - {<<"members">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}} - ], - create_db(UserDbName), - set_security(UserDbName, SecurityProperties), - create_user(TestAuthDb, User), - wait_for_security_create(<<"members">>, User, UserDbName), - {MemberProperties} = proplists:get_value(<<"members">>, - get_security(UserDbName)), - MemberNames = proplists:get_value(<<"names">>, MemberProperties), - PowBefore = lists:member(<<"pow">>, MemberNames), - WowBefore = lists:member(<<"wow">>, MemberNames), - QuxBefore = lists:member(<<"qux">>, MemberNames), - delete_user(TestAuthDb, User), - wait_for_security_delete(<<"members">>, User, UserDbName), - {NewMemberProperties} = proplists:get_value(<<"members">>, - get_security(UserDbName)), - NewMemberNames = proplists:get_value(<<"names">>, NewMemberProperties), - PowAfter = lists:member(<<"pow">>, NewMemberNames), - WowAfter = lists:member(<<"wow">>, NewMemberNames), - QuxAfter = lists:member(<<"qux">>, NewMemberNames), - [ - ?_assert(PowBefore), - ?_assert(WowBefore), - ?_assert(QuxBefore), - ?_assert(PowAfter), - ?_assert(WowAfter), - ?_assertNot(QuxAfter) - ]. - - -wait_for_db_create(UserDbName) -> - test_util:wait(fun() -> - case all_dbs_with_errors() of - {error, _, _ , _} -> wait; - {ok, _, _, AllDbs} -> - case lists:member(UserDbName, AllDbs) of - true -> true; - false -> wait - end - end - end). - -wait_for_db_delete(UserDbName) -> - test_util:wait(fun() -> - case all_dbs_with_errors() of - {ok, 500, _ , _} -> wait; - {ok, _, _, AllDbs} -> - case not lists:member(UserDbName, AllDbs) of - true -> true; - false -> wait - end - end - end). - -wait_for_security_create(Type, User0, UserDbName) -> - User = ?l2b(User0), - test_util:wait(fun() -> - {Props} = proplists:get_value(Type, get_security(UserDbName)), - Names = proplists:get_value(<<"names">>, Props), - case lists:member(User, Names) of - true -> true; - false -> wait - end - end). - -wait_for_security_delete(Type, User0, UserDbName) -> - User = ?l2b(User0), - test_util:wait(fun() -> - {Props} = proplists:get_value(Type, get_security(UserDbName)), - Names = proplists:get_value(<<"names">>, Props), - case not lists:member(User, Names) of - true -> true; - false -> wait - end - end). - -couch_peruser_test_() -> - { - "couch_peruser test", - { - setup, - fun setup_all/0, fun teardown_all/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_create_anon_user_db_with_default/1, - fun should_create_anon_user_db_with_custom_prefix/1, - fun should_create_anon_user_db_with_custom_special_prefix/1, - fun should_create_user_db_with_default/1, - fun should_create_user_db_with_custom_prefix/1, - fun should_create_user_db_with_custom_special_prefix/1, - fun should_create_user_db_with_q4/1, - fun should_create_anon_user_db_with_q4/1, - fun should_not_delete_user_db/1, - fun should_delete_user_db/1, - fun should_delete_user_db_with_custom_prefix/1, - fun should_delete_user_db_with_custom_special_prefix/1, - fun should_reflect_config_changes/1, - fun should_add_user_to_db_admins/1, - fun should_add_user_to_db_members/1, - fun should_not_remove_existing_db_admins/1, - fun should_not_remove_existing_db_members/1, - fun should_remove_user_from_db_admins/1, - fun should_remove_user_from_db_members/1 - ] - } - } - }. diff --git a/src/couch_peruser/test/eunit/couch_peruser_test.erl b/src/couch_peruser/test/eunit/couch_peruser_test.erl new file mode 100644 index 000000000..8501cc36f --- /dev/null +++ b/src/couch_peruser/test/eunit/couch_peruser_test.erl @@ -0,0 +1,519 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_peruser_test). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(ADMIN_USERNAME, "admin"). +-define(ADMIN_PASSWORD, "secret"). + +-define(WAIT_FOR_USER_DELETE_TIMEOUT, 3000). + +setup_all() -> + TestCtx = test_util:start_couch([chttpd]), + ok = application:start(couch_peruser), + Hashed = couch_passwords:hash_admin_password(?ADMIN_PASSWORD), + ok = config:set("admins", ?ADMIN_USERNAME, ?b2l(Hashed), _Persist=false), + TestCtx. + +teardown_all(TestCtx) -> + config:delete("admins", ?ADMIN_USERNAME), + ok = application:stop(couch_peruser), + test_util:stop_couch(TestCtx). + +setup() -> + TestAuthDb = ?tempdb(), + do_request(put, get_base_url() ++ "/" ++ ?b2l(TestAuthDb)), + do_request(put, get_cluster_base_url() ++ "/" ++ ?b2l(TestAuthDb)), + set_config("couch_httpd_auth", "authentication_db", ?b2l(TestAuthDb)), + set_config("couch_peruser", "cluster_quiet_period", "1"), + set_config("couch_peruser", "cluster_start_period", "1"), + set_config("couch_peruser", "enable", "true"), + set_config("cluster", "n", "1"), + TestAuthDb. + +teardown(TestAuthDb) -> + set_config("couch_peruser", "enable", "false"), + set_config("couch_peruser", "delete_dbs", "false"), + set_config("couch_httpd_auth", "authentication_db", "_users"), + set_config("couch_peruser", "cluster_quiet_period", "60"), + set_config("couch_peruser", "cluster_start_period", "5"), + set_config("cluster", "n", "3"), + do_request(delete, get_cluster_base_url() ++ "/" ++ ?b2l(TestAuthDb)), + do_request(delete, get_base_url() ++ "/" ++ ?b2l(TestAuthDb)), + lists:foreach(fun(DbName) -> + case binary:part(DbName, 0, 7) of + <<"userdb-">> -> delete_db(DbName); + _ -> ok + end + end, all_dbs()). + +set_config(Section, Key, Value) -> + ok = config:set(Section, Key, Value, _Persist=false). + +delete_config(Section, Key) -> + ok = config:delete(Section, Key, _Persist=false). + +do_request(Method, Url) -> + Headers = [{basic_auth, {?ADMIN_USERNAME, ?ADMIN_PASSWORD}}], + {ok, _, _, _} = test_request:request(Method, Url, Headers). + +do_request(Method, Url, Body) -> + Headers = [ + {basic_auth, {?ADMIN_USERNAME, ?ADMIN_PASSWORD}}, + {"Content-Type", "application/json"}], + {ok, _, _, _} = test_request:request(Method, Url, Headers, Body). + +do_anon_request(Method, Url, Body) -> + Headers = [ + {"Content-Type", "application/json"}], + {ok, _, _, _} = test_request:request(Method, Url, Headers, Body). + +create_db(DbName) -> + {ok, _, _, _} = do_request(put, get_cluster_base_url() ++ "/" ++ ?b2l(DbName)). + +delete_db(DbName) -> + {ok, _, _, _} = do_request(delete, get_cluster_base_url() ++ "/" ++ ?b2l(DbName)). + +create_user(AuthDb, Name) -> + Body = "{\"name\":\"" ++ Name ++ + "\",\"type\":\"user\",\"roles\":[],\"password\":\"secret\"}", + Url = lists:concat([ + get_cluster_base_url(), "/", ?b2l(AuthDb), "/org.couchdb.user:", Name]), + {ok, 201, _, _} = do_request(put, Url, Body). + +create_anon_user(AuthDb, Name) -> + Body = "{\"name\":\"" ++ Name ++ + "\",\"type\":\"user\",\"roles\":[],\"password\":\"secret\"}", + Url = lists:concat([ + get_cluster_base_url(), "/", ?b2l(AuthDb), "/org.couchdb.user:", Name]), + {ok, 201, _, _} = do_anon_request(put, Url, Body). + +delete_user(AuthDb, Name) -> + Url = lists:concat([get_cluster_base_url(), "/", ?b2l(AuthDb), + "/org.couchdb.user:", Name]), + {ok, 200, _, Body} = do_request(get, Url), + {DocProps} = jiffy:decode(Body), + Rev = proplists:get_value(<<"_rev">>, DocProps), + {ok, 200, _, _} = do_request(delete, Url ++ "?rev=" ++ ?b2l(Rev)). + +get_security(DbName) -> + Url = lists:concat([ + get_cluster_base_url(), "/", ?b2l(DbName), "/_security"]), + test_util:wait(fun() -> + {ok, 200, _, Body} = do_request(get, Url), + case jiffy:decode(Body) of + {[]} -> wait; + {SecurityProperties} -> SecurityProperties + end + end). + +set_security(DbName, SecurityProperties) -> + Url = lists:concat([ + get_cluster_base_url(), "/", ?b2l(DbName), "/_security"]), + Body = jiffy:encode({SecurityProperties}), + {ok, 200, _, _} = do_request(put, Url, Body). + +all_dbs() -> + {ok, 200, _, Body} = do_request(get, get_cluster_base_url() ++ "/_all_dbs"), + jiffy:decode(Body). + +all_dbs_with_errors() -> + {Result, StatusCode, _Headers, Body} = do_request(get, get_cluster_base_url() ++ "/_all_dbs"), + {Result, StatusCode, _Headers, jiffy:decode(Body)}. + +get_base_url() -> + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)), + "http://" ++ Addr ++ ":" ++ Port. + +get_cluster_base_url() -> + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)), + "http://" ++ Addr ++ ":" ++ Port. + + +should_create_user_db_with_default(TestAuthDb) -> + create_user(TestAuthDb, "foo"), + wait_for_db_create(<<"userdb-666f6f">>), + {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f">>), + {ClusterInfo} = couch_util:get_value(cluster, DbInfo), + [ + ?_assert(lists:member(<<"userdb-666f6f">>, all_dbs())), + ?_assertEqual(1, couch_util:get_value(q, ClusterInfo)) + ]. + +should_create_user_db_with_custom_prefix(TestAuthDb) -> + set_config("couch_peruser", "database_prefix", "newuserdb-"), + create_user(TestAuthDb, "fooo"), + wait_for_db_create(<<"newuserdb-666f6f6f">>), + delete_config("couch_peruser", "database_prefix"), + ?_assert(lists:member(<<"newuserdb-666f6f6f">>, all_dbs())). + +should_create_user_db_with_custom_special_prefix(TestAuthDb) -> + set_config("couch_peruser", "database_prefix", "userdb_$()+--/"), + create_user(TestAuthDb, "fooo"), + wait_for_db_create(<<"userdb_$()+--/666f6f6f">>), + delete_config("couch_peruser", "database_prefix"), + ?_assert(lists:member(<<"userdb_$()+--/666f6f6f">>, all_dbs())). + +should_create_anon_user_db_with_default(TestAuthDb) -> + create_anon_user(TestAuthDb, "fooo"), + wait_for_db_create(<<"userdb-666f6f6f">>), + {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f6f">>), + {ClusterInfo} = couch_util:get_value(cluster, DbInfo), + [ + ?_assert(lists:member(<<"userdb-666f6f6f">>, all_dbs())), + ?_assertEqual(1, couch_util:get_value(q, ClusterInfo)) + ]. + +should_create_anon_user_db_with_custom_prefix(TestAuthDb) -> + set_config("couch_peruser", "database_prefix", "newuserdb-"), + create_anon_user(TestAuthDb, "fooo"), + wait_for_db_create(<<"newuserdb-666f6f6f">>), + delete_config("couch_peruser", "database_prefix"), + ?_assert(lists:member(<<"newuserdb-666f6f6f">>, all_dbs())). + +should_create_anon_user_db_with_custom_special_prefix(TestAuthDb) -> + set_config("couch_peruser", "database_prefix", "userdb_$()+--/"), + create_anon_user(TestAuthDb, "fooo"), + wait_for_db_create(<<"userdb_$()+--/666f6f6f">>), + delete_config("couch_peruser", "database_prefix"), + ?_assert(lists:member(<<"userdb_$()+--/666f6f6f">>, all_dbs())). + +should_create_user_db_with_q4(TestAuthDb) -> + set_config("couch_peruser", "q", "4"), + create_user(TestAuthDb, "foo"), + wait_for_db_create(<<"userdb-666f6f">>), + {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f">>), + {ClusterInfo} = couch_util:get_value(cluster, DbInfo), + delete_config("couch_peruser", "q"), + [ + ?_assert(lists:member(<<"userdb-666f6f">>, all_dbs())), + ?_assertEqual(4, couch_util:get_value(q, ClusterInfo)) + ]. + +should_create_anon_user_db_with_q4(TestAuthDb) -> + set_config("couch_peruser", "q", "4"), + create_anon_user(TestAuthDb, "fooo"), + wait_for_db_create(<<"userdb-666f6f6f">>), + {ok, TargetInfo} = fabric:get_db_info(<<"userdb-666f6f6f">>), + {ClusterInfo} = couch_util:get_value(cluster, TargetInfo), + delete_config("couch_peruser", "q"), + [ + ?_assert(lists:member(<<"userdb-666f6f6f">>, all_dbs())), + ?_assertEqual(4, couch_util:get_value(q, ClusterInfo)) + ]. + +should_not_delete_user_db(TestAuthDb) -> + User = "foo", + UserDbName = <<"userdb-666f6f">>, + create_user(TestAuthDb, User), + wait_for_db_create(<<"userdb-666f6f">>), + AfterCreate = lists:member(UserDbName, all_dbs()), + delete_user(TestAuthDb, User), + timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT), + AfterDelete = lists:member(UserDbName, all_dbs()), + [?_assert(AfterCreate), ?_assert(AfterDelete)]. + +should_delete_user_db(TestAuthDb) -> + User = "bar", + UserDbName = <<"userdb-626172">>, + set_config("couch_peruser", "delete_dbs", "true"), + create_user(TestAuthDb, User), + wait_for_db_create(UserDbName), + AfterCreate = lists:member(UserDbName, all_dbs()), + delete_user(TestAuthDb, User), + wait_for_db_delete(UserDbName), + AfterDelete = lists:member(UserDbName, all_dbs()), + [?_assert(AfterCreate), ?_assertNot(AfterDelete)]. + +should_delete_user_db_with_custom_prefix(TestAuthDb) -> + User = "bar", + UserDbName = <<"newuserdb-626172">>, + set_config("couch_peruser", "delete_dbs", "true"), + set_config("couch_peruser", "database_prefix", "newuserdb-"), + create_user(TestAuthDb, User), + wait_for_db_create(UserDbName), + AfterCreate = lists:member(UserDbName, all_dbs()), + delete_user(TestAuthDb, User), + wait_for_db_delete(UserDbName), + delete_config("couch_peruser", "database_prefix"), + AfterDelete = lists:member(UserDbName, all_dbs()), + [ + ?_assert(AfterCreate), + ?_assertNot(AfterDelete) + ]. + +should_delete_user_db_with_custom_special_prefix(TestAuthDb) -> + User = "bar", + UserDbName = <<"userdb_$()+--/626172">>, + set_config("couch_peruser", "delete_dbs", "true"), + set_config("couch_peruser", "database_prefix", "userdb_$()+--/"), + create_user(TestAuthDb, User), + wait_for_db_create(UserDbName), + AfterCreate = lists:member(UserDbName, all_dbs()), + delete_user(TestAuthDb, User), + wait_for_db_delete(UserDbName), + delete_config("couch_peruser", "database_prefix"), + AfterDelete = lists:member(UserDbName, all_dbs()), + [ + ?_assert(AfterCreate), + ?_assertNot(AfterDelete) + ]. + +should_reflect_config_changes(TestAuthDb) -> + User = "baz", + UserDbName = <<"userdb-62617a">>, + set_config("couch_peruser", "delete_dbs", "true"), + create_user(TestAuthDb, User), + wait_for_db_create(UserDbName), + AfterCreate1 = lists:member(UserDbName, all_dbs()), + delete_user(TestAuthDb, User), + timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT), + wait_for_db_delete(UserDbName), + AfterDelete1 = lists:member(UserDbName, all_dbs()), + create_user(TestAuthDb, User), + wait_for_db_create(UserDbName), + AfterCreate2 = lists:member(UserDbName, all_dbs()), + set_config("couch_peruser", "delete_dbs", "false"), + delete_user(TestAuthDb, User), + timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT), + AfterDelete2 = lists:member(UserDbName, all_dbs()), + create_user(TestAuthDb, User), + wait_for_db_create(UserDbName), + set_config("couch_peruser", "delete_dbs", "true"), + delete_user(TestAuthDb, User), + wait_for_db_delete(UserDbName), + AfterDelete3 = lists:member(UserDbName, all_dbs()), + set_config("couch_peruser", "enable", "false"), + create_user(TestAuthDb, User), + timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT), + AfterCreate3 = lists:member(UserDbName, all_dbs()), + [ + ?_assert(AfterCreate1), + ?_assertNot(AfterDelete1), + ?_assert(AfterCreate2), + ?_assert(AfterDelete2), + ?_assertNot(AfterDelete3), + ?_assertNot(AfterCreate3) + ]. + + +should_add_user_to_db_admins(TestAuthDb) -> + User = "qux", + UserDbName = <<"userdb-717578">>, + create_user(TestAuthDb, User), + wait_for_db_create(UserDbName), + ?_assertEqual( + {[{<<"names">>,[<<"qux">>]}]}, + proplists:get_value(<<"admins">>, get_security(UserDbName))). + +should_add_user_to_db_members(TestAuthDb) -> + User = "qux", + UserDbName = <<"userdb-717578">>, + create_user(TestAuthDb, User), + wait_for_db_create(UserDbName), + ?_assertEqual( + {[{<<"names">>,[<<"qux">>]}]}, + proplists:get_value(<<"members">>, get_security(UserDbName))). + +should_not_remove_existing_db_admins(TestAuthDb) -> + User = "qux", + UserDbName = <<"userdb-717578">>, + SecurityProperties = [ + {<<"admins">>,{[{<<"names">>,[<<"foo">>,<<"bar">>]}]}}, + {<<"members">>,{[{<<"names">>,[<<"baz">>,<<"pow">>]}]}} + ], + create_db(UserDbName), + set_security(UserDbName, SecurityProperties), + create_user(TestAuthDb, User), + wait_for_security_create(<<"admins">>, User, UserDbName), + {AdminProperties} = proplists:get_value(<<"admins">>, + get_security(UserDbName)), + AdminNames = proplists:get_value(<<"names">>, AdminProperties), + [ + ?_assert(lists:member(<<"foo">>, AdminNames)), + ?_assert(lists:member(<<"bar">>, AdminNames)), + ?_assert(lists:member(<<"qux">>, AdminNames)) + ]. + +should_not_remove_existing_db_members(TestAuthDb) -> + User = "qux", + UserDbName = <<"userdb-717578">>, + SecurityProperties = [ + {<<"admins">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}}, + {<<"members">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}} + ], + create_db(UserDbName), + set_security(UserDbName, SecurityProperties), + create_user(TestAuthDb, User), + wait_for_security_create(<<"members">>, User, UserDbName), + {MemberProperties} = proplists:get_value(<<"members">>, + get_security(UserDbName)), + MemberNames = proplists:get_value(<<"names">>, MemberProperties), + [ + ?_assert(lists:member(<<"pow">>, MemberNames)), + ?_assert(lists:member(<<"wow">>, MemberNames)), + ?_assert(lists:member(<<"qux">>, MemberNames)) + ]. + +should_remove_user_from_db_admins(TestAuthDb) -> + User = "qux", + UserDbName = <<"userdb-717578">>, + SecurityProperties = [ + {<<"admins">>,{[{<<"names">>,[<<"foo">>,<<"bar">>]}]}}, + {<<"members">>,{[{<<"names">>,[<<"baz">>,<<"pow">>]}]}} + ], + create_db(UserDbName), + set_security(UserDbName, SecurityProperties), + create_user(TestAuthDb, User), + wait_for_security_create(<<"admins">>, User, UserDbName), + {AdminProperties} = proplists:get_value(<<"admins">>, + get_security(UserDbName)), + AdminNames = proplists:get_value(<<"names">>, AdminProperties), + FooBefore = lists:member(<<"foo">>, AdminNames), + BarBefore = lists:member(<<"bar">>, AdminNames), + QuxBefore = lists:member(<<"qux">>, AdminNames), + delete_user(TestAuthDb, User), + wait_for_security_delete(<<"admins">>, User, UserDbName), + {NewAdminProperties} = proplists:get_value(<<"admins">>, + get_security(UserDbName)), + NewAdminNames = proplists:get_value(<<"names">>, NewAdminProperties), + FooAfter = lists:member(<<"foo">>, NewAdminNames), + BarAfter = lists:member(<<"bar">>, NewAdminNames), + QuxAfter = lists:member(<<"qux">>, NewAdminNames), + [ + ?_assert(FooBefore), + ?_assert(BarBefore), + ?_assert(QuxBefore), + ?_assert(FooAfter), + ?_assert(BarAfter), + ?_assertNot(QuxAfter) + ]. + +should_remove_user_from_db_members(TestAuthDb) -> + User = "qux", + UserDbName = <<"userdb-717578">>, + SecurityProperties = [ + {<<"admins">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}}, + {<<"members">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}} + ], + create_db(UserDbName), + set_security(UserDbName, SecurityProperties), + create_user(TestAuthDb, User), + wait_for_security_create(<<"members">>, User, UserDbName), + {MemberProperties} = proplists:get_value(<<"members">>, + get_security(UserDbName)), + MemberNames = proplists:get_value(<<"names">>, MemberProperties), + PowBefore = lists:member(<<"pow">>, MemberNames), + WowBefore = lists:member(<<"wow">>, MemberNames), + QuxBefore = lists:member(<<"qux">>, MemberNames), + delete_user(TestAuthDb, User), + wait_for_security_delete(<<"members">>, User, UserDbName), + {NewMemberProperties} = proplists:get_value(<<"members">>, + get_security(UserDbName)), + NewMemberNames = proplists:get_value(<<"names">>, NewMemberProperties), + PowAfter = lists:member(<<"pow">>, NewMemberNames), + WowAfter = lists:member(<<"wow">>, NewMemberNames), + QuxAfter = lists:member(<<"qux">>, NewMemberNames), + [ + ?_assert(PowBefore), + ?_assert(WowBefore), + ?_assert(QuxBefore), + ?_assert(PowAfter), + ?_assert(WowAfter), + ?_assertNot(QuxAfter) + ]. + + +wait_for_db_create(UserDbName) -> + test_util:wait(fun() -> + case all_dbs_with_errors() of + {error, _, _ , _} -> wait; + {ok, _, _, AllDbs} -> + case lists:member(UserDbName, AllDbs) of + true -> true; + false -> wait + end + end + end). + +wait_for_db_delete(UserDbName) -> + test_util:wait(fun() -> + case all_dbs_with_errors() of + {ok, 500, _ , _} -> wait; + {ok, _, _, AllDbs} -> + case not lists:member(UserDbName, AllDbs) of + true -> true; + false -> wait + end + end + end). + +wait_for_security_create(Type, User0, UserDbName) -> + User = ?l2b(User0), + test_util:wait(fun() -> + {Props} = proplists:get_value(Type, get_security(UserDbName)), + Names = proplists:get_value(<<"names">>, Props), + case lists:member(User, Names) of + true -> true; + false -> wait + end + end). + +wait_for_security_delete(Type, User0, UserDbName) -> + User = ?l2b(User0), + test_util:wait(fun() -> + {Props} = proplists:get_value(Type, get_security(UserDbName)), + Names = proplists:get_value(<<"names">>, Props), + case not lists:member(User, Names) of + true -> true; + false -> wait + end + end). + +couch_peruser_test_() -> + { + "couch_peruser test", + { + setup, + fun setup_all/0, fun teardown_all/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_create_anon_user_db_with_default/1, + fun should_create_anon_user_db_with_custom_prefix/1, + fun should_create_anon_user_db_with_custom_special_prefix/1, + fun should_create_user_db_with_default/1, + fun should_create_user_db_with_custom_prefix/1, + fun should_create_user_db_with_custom_special_prefix/1, + fun should_create_user_db_with_q4/1, + fun should_create_anon_user_db_with_q4/1, + fun should_not_delete_user_db/1, + fun should_delete_user_db/1, + fun should_delete_user_db_with_custom_prefix/1, + fun should_delete_user_db_with_custom_special_prefix/1, + fun should_reflect_config_changes/1, + fun should_add_user_to_db_admins/1, + fun should_add_user_to_db_members/1, + fun should_not_remove_existing_db_admins/1, + fun should_not_remove_existing_db_members/1, + fun should_remove_user_from_db_admins/1, + fun should_remove_user_from_db_members/1 + ] + } + } + }. diff --git a/src/couch_replicator/test/couch_replicator_attachments_too_large.erl b/src/couch_replicator/test/couch_replicator_attachments_too_large.erl deleted file mode 100644 index 7fe84d2d9..000000000 --- a/src/couch_replicator/test/couch_replicator_attachments_too_large.erl +++ /dev/null @@ -1,104 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_replicator_attachments_too_large). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include_lib("couch_replicator/src/couch_replicator.hrl"). - - -setup(_) -> - Ctx = test_util:start_couch([couch_replicator]), - Source = create_db(), - create_doc_with_attachment(Source, <<"doc">>, 1000), - Target = create_db(), - {Ctx, {Source, Target}}. - - -teardown(_, {Ctx, {Source, Target}}) -> - delete_db(Source), - delete_db(Target), - config:delete("couchdb", "max_attachment_size"), - ok = test_util:stop_couch(Ctx). - - -attachment_too_large_replication_test_() -> - Pairs = [{local, remote}, {remote, local}, {remote, remote}], - { - "Attachment size too large replication tests", - { - foreachx, - fun setup/1, fun teardown/2, - [{Pair, fun should_succeed/2} || Pair <- Pairs] ++ - [{Pair, fun should_fail/2} || Pair <- Pairs] - } - }. - - -should_succeed({From, To}, {_Ctx, {Source, Target}}) -> - RepObject = {[ - {<<"source">>, db_url(From, Source)}, - {<<"target">>, db_url(To, Target)} - ]}, - config:set("couchdb", "max_attachment_size", "1000", _Persist = false), - {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), - ?_assertEqual(ok, couch_replicator_test_helper:compare_dbs(Source, Target)). - - -should_fail({From, To}, {_Ctx, {Source, Target}}) -> - RepObject = {[ - {<<"source">>, db_url(From, Source)}, - {<<"target">>, db_url(To, Target)} - ]}, - config:set("couchdb", "max_attachment_size", "999", _Persist = false), - {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), - ?_assertError({badmatch, {not_found, missing}}, - couch_replicator_test_helper:compare_dbs(Source, Target)). - - -create_db() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = couch_db:close(Db), - DbName. - - -create_doc_with_attachment(DbName, DocId, AttSize) -> - {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - Doc = #doc{id = DocId, atts = att(AttSize)}, - {ok, _} = couch_db:update_doc(Db, Doc, []), - couch_db:close(Db), - ok. - - -att(Size) when is_integer(Size), Size >= 1 -> - [couch_att:new([ - {name, <<"att">>}, - {type, <<"app/binary">>}, - {att_len, Size}, - {data, fun(_Bytes) -> - << <<"x">> || _ <- lists:seq(1, Size) >> - end} - ])]. - - -delete_db(DbName) -> - ok = couch_server:delete(DbName, [?ADMIN_CTX]). - - -db_url(local, DbName) -> - DbName; -db_url(remote, DbName) -> - Addr = config:get("httpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(couch_httpd, port), - ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])). diff --git a/src/couch_replicator/test/couch_replicator_compact_tests.erl b/src/couch_replicator/test/couch_replicator_compact_tests.erl deleted file mode 100644 index 7cc530c19..000000000 --- a/src/couch_replicator/test/couch_replicator_compact_tests.erl +++ /dev/null @@ -1,455 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_replicator_compact_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include_lib("couch_replicator/src/couch_replicator.hrl"). - --import(couch_replicator_test_helper, [ - db_url/1, - get_pid/1 -]). - --define(ATTFILE, filename:join([?FIXTURESDIR, "logo.png"])). --define(DELAY, 500). --define(TIMEOUT, 360000). --define(TIMEOUT_WRITER, 100000). --define(TIMEOUT_EUNIT, ?TIMEOUT div 1000 + 70). - -setup() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = couch_db:close(Db), - DbName. - -setup(local) -> - setup(); -setup(remote) -> - {remote, setup()}; -setup({A, B}) -> - Ctx = test_util:start_couch([couch_replicator]), - Source = setup(A), - Target = setup(B), - {Ctx, {Source, Target}}. - -teardown({remote, DbName}) -> - teardown(DbName); -teardown(DbName) -> - ok = couch_server:delete(DbName, [?ADMIN_CTX]), - ok. - -teardown(_, {Ctx, {Source, Target}}) -> - teardown(Source), - teardown(Target), - ok = application:stop(couch_replicator), - ok = test_util:stop_couch(Ctx). - -compact_test_() -> - Pairs = [{local, local}, {local, remote}, - {remote, local}, {remote, remote}], - { - "Compaction during replication tests", - { - foreachx, - fun setup/1, fun teardown/2, - [{Pair, fun should_populate_replicate_compact/2} - || Pair <- Pairs] - } - }. - - -should_populate_replicate_compact({From, To}, {_Ctx, {Source, Target}}) -> - {ok, RepPid, RepId} = replicate(Source, Target), - {lists:flatten(io_lib:format("~p -> ~p", [From, To])), - {inorder, [ - should_run_replication(RepPid, RepId, Source, Target), - should_all_processes_be_alive(RepPid, Source, Target), - should_populate_and_compact(RepPid, Source, Target, 50, 3), - should_wait_target_in_sync(Source, Target), - should_ensure_replication_still_running(RepPid, RepId, Source, Target), - should_cancel_replication(RepId, RepPid), - should_compare_databases(Source, Target) - ]}}. - -should_all_processes_be_alive(RepPid, Source, Target) -> - ?_test(begin - {ok, SourceDb} = reopen_db(Source), - {ok, TargetDb} = reopen_db(Target), - ?assert(is_process_alive(RepPid)), - ?assert(is_process_alive(couch_db:get_pid(SourceDb))), - ?assert(is_process_alive(couch_db:get_pid(TargetDb))) - end). - -should_run_replication(RepPid, RepId, Source, Target) -> - ?_test(check_active_tasks(RepPid, RepId, Source, Target)). - -should_ensure_replication_still_running(RepPid, RepId, Source, Target) -> - ?_test(check_active_tasks(RepPid, RepId, Source, Target)). - -check_active_tasks(RepPid, {BaseId, Ext} = _RepId, Src, Tgt) -> - Source = case Src of - {remote, NameSrc} -> - <<(db_url(NameSrc))/binary, $/>>; - _ -> - Src - end, - Target = case Tgt of - {remote, NameTgt} -> - <<(db_url(NameTgt))/binary, $/>>; - _ -> - Tgt - end, - FullRepId = ?l2b(BaseId ++ Ext), - Pid = ?l2b(pid_to_list(RepPid)), - RepTasks = wait_for_task_status(), - ?assertNotEqual(timeout, RepTasks), - [RepTask] = RepTasks, - ?assertEqual(Pid, couch_util:get_value(pid, RepTask)), - ?assertEqual(FullRepId, couch_util:get_value(replication_id, RepTask)), - ?assertEqual(true, couch_util:get_value(continuous, RepTask)), - ?assertEqual(Source, couch_util:get_value(source, RepTask)), - ?assertEqual(Target, couch_util:get_value(target, RepTask)), - ?assert(is_integer(couch_util:get_value(docs_read, RepTask))), - ?assert(is_integer(couch_util:get_value(docs_written, RepTask))), - ?assert(is_integer(couch_util:get_value(doc_write_failures, RepTask))), - ?assert(is_integer(couch_util:get_value(revisions_checked, RepTask))), - ?assert(is_integer(couch_util:get_value(missing_revisions_found, RepTask))), - ?assert(is_integer(couch_util:get_value(checkpointed_source_seq, RepTask))), - ?assert(is_integer(couch_util:get_value(source_seq, RepTask))), - Pending = couch_util:get_value(changes_pending, RepTask), - ?assert(is_integer(Pending)). - -replication_tasks() -> - lists:filter(fun(P) -> - couch_util:get_value(type, P) =:= replication - end, couch_task_status:all()). - - -wait_for_task_status() -> - test_util:wait(fun() -> - case replication_tasks() of - [] -> - wait; - Tasks -> - Tasks - end - end). - -should_cancel_replication(RepId, RepPid) -> - ?_assertNot(begin - ok = couch_replicator_scheduler:remove_job(RepId), - is_process_alive(RepPid) - end). - -should_populate_and_compact(RepPid, Source, Target, BatchSize, Rounds) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(begin - {ok, SourceDb0} = reopen_db(Source), - Writer = spawn_writer(SourceDb0), - lists:foreach( - fun(N) -> - {ok, SourceDb} = reopen_db(Source), - {ok, TargetDb} = reopen_db(Target), - pause_writer(Writer), - - compact_db("source", SourceDb), - ?assert(is_process_alive(RepPid)), - ?assert(is_process_alive(couch_db:get_pid(SourceDb))), - wait_for_compaction("source", SourceDb), - - compact_db("target", TargetDb), - ?assert(is_process_alive(RepPid)), - ?assert(is_process_alive(couch_db:get_pid(TargetDb))), - wait_for_compaction("target", TargetDb), - - {ok, SourceDb2} = reopen_db(SourceDb), - {ok, TargetDb2} = reopen_db(TargetDb), - - resume_writer(Writer), - wait_writer(Writer, BatchSize * N), - - compact_db("source", SourceDb2), - ?assert(is_process_alive(RepPid)), - ?assert(is_process_alive(couch_db:get_pid(SourceDb2))), - pause_writer(Writer), - wait_for_compaction("source", SourceDb2), - resume_writer(Writer), - - compact_db("target", TargetDb2), - ?assert(is_process_alive(RepPid)), - ?assert(is_process_alive(couch_db:get_pid(TargetDb2))), - pause_writer(Writer), - wait_for_compaction("target", TargetDb2), - resume_writer(Writer) - end, lists:seq(1, Rounds)), - stop_writer(Writer) - end)}. - -should_wait_target_in_sync({remote, Source}, Target) -> - should_wait_target_in_sync(Source, Target); -should_wait_target_in_sync(Source, {remote, Target}) -> - should_wait_target_in_sync(Source, Target); -should_wait_target_in_sync(Source, Target) -> - {timeout, ?TIMEOUT_EUNIT, ?_assert(begin - {ok, SourceDb} = couch_db:open_int(Source, []), - {ok, SourceInfo} = couch_db:get_db_info(SourceDb), - ok = couch_db:close(SourceDb), - SourceDocCount = couch_util:get_value(doc_count, SourceInfo), - wait_target_in_sync_loop(SourceDocCount, Target, 300) - end)}. - -wait_target_in_sync_loop(_DocCount, _TargetName, 0) -> - erlang:error( - {assertion_failed, - [{module, ?MODULE}, {line, ?LINE}, - {reason, "Could not get source and target databases in sync"}]}); -wait_target_in_sync_loop(DocCount, {remote, TargetName}, RetriesLeft) -> - wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft); -wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) -> - {ok, Target} = couch_db:open_int(TargetName, []), - {ok, TargetInfo} = couch_db:get_db_info(Target), - ok = couch_db:close(Target), - TargetDocCount = couch_util:get_value(doc_count, TargetInfo), - case TargetDocCount == DocCount of - true -> - true; - false -> - ok = timer:sleep(?DELAY), - wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft - 1) - end. - -should_compare_databases({remote, Source}, Target) -> - should_compare_databases(Source, Target); -should_compare_databases(Source, {remote, Target}) -> - should_compare_databases(Source, Target); -should_compare_databases(Source, Target) -> - {timeout, 35, ?_test(begin - {ok, SourceDb} = couch_db:open_int(Source, []), - {ok, TargetDb} = couch_db:open_int(Target, []), - Fun = fun(FullDocInfo, Acc) -> - {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo), - {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]), - DocId = couch_util:get_value(<<"_id">>, Props), - DocTarget = case couch_db:open_doc(TargetDb, DocId) of - {ok, DocT} -> - DocT; - Error -> - erlang:error( - {assertion_failed, - [{module, ?MODULE}, {line, ?LINE}, - {reason, lists:concat(["Error opening document '", - ?b2l(DocId), "' from target: ", - couch_util:to_list(Error)])}]}) - end, - DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]), - ?assertEqual(DocJson, DocTargetJson), - {ok, Acc} - end, - {ok, _} = couch_db:fold_docs(SourceDb, Fun, [], []), - ok = couch_db:close(SourceDb), - ok = couch_db:close(TargetDb) - end)}. - - -reopen_db({remote, Db}) -> - reopen_db(Db); -reopen_db(DbName) when is_binary(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - ok = couch_db:close(Db), - {ok, Db}; -reopen_db(Db) -> - reopen_db(couch_db:name(Db)). - - -compact_db(Type, Db0) -> - Name = couch_db:name(Db0), - {ok, Db} = couch_db:open_int(Name, []), - {ok, CompactPid} = couch_db:start_compact(Db), - MonRef = erlang:monitor(process, CompactPid), - receive - {'DOWN', MonRef, process, CompactPid, normal} -> - ok; - {'DOWN', MonRef, process, CompactPid, noproc} -> - ok; - {'DOWN', MonRef, process, CompactPid, Reason} -> - erlang:error( - {assertion_failed, - [{module, ?MODULE}, {line, ?LINE}, - {reason, - lists:concat(["Error compacting ", Type, " database ", - ?b2l(Name), ": ", - couch_util:to_list(Reason)])}]}) - after ?TIMEOUT -> - erlang:error( - {assertion_failed, - [{module, ?MODULE}, {line, ?LINE}, - {reason, lists:concat(["Compaction for ", Type, " database ", - ?b2l(Name), " didn't finish"])}]}) - end, - ok = couch_db:close(Db). - -wait_for_compaction(Type, Db) -> - case couch_db:wait_for_compaction(Db) of - ok -> - ok; - {error, noproc} -> - ok; - {error, Reason} -> - erlang:error( - {assertion_failed, - [{module, ?MODULE}, {line, ?LINE}, - {reason, lists:concat(["Compaction of ", Type, - " database failed with: ", Reason])}]}) - end. - -replicate({remote, Db}, Target) -> - replicate(db_url(Db), Target); - -replicate(Source, {remote, Db}) -> - replicate(Source, db_url(Db)); - -replicate(Source, Target) -> - RepObject = {[ - {<<"source">>, Source}, - {<<"target">>, Target}, - {<<"continuous">>, true} - ]}, - {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_USER), - ok = couch_replicator_scheduler:add_job(Rep), - couch_replicator_scheduler:reschedule(), - Pid = get_pid(Rep#rep.id), - {ok, Pid, Rep#rep.id}. - - -wait_writer(Pid, NumDocs) -> - case get_writer_num_docs_written(Pid) of - N when N >= NumDocs -> - ok; - _ -> - wait_writer(Pid, NumDocs) - end. - -spawn_writer(Db) -> - Parent = self(), - Pid = spawn(fun() -> writer_loop(Db, Parent, 0) end), - Pid. - - -pause_writer(Pid) -> - Ref = make_ref(), - Pid ! {pause, Ref}, - receive - {paused, Ref} -> - ok - after ?TIMEOUT_WRITER -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "Failed to pause source database writer"}]}) - end. - -resume_writer(Pid) -> - Ref = make_ref(), - Pid ! {continue, Ref}, - receive - {ok, Ref} -> - ok - after ?TIMEOUT_WRITER -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "Failed to pause source database writer"}]}) - end. - -get_writer_num_docs_written(Pid) -> - Ref = make_ref(), - Pid ! {get_count, Ref}, - receive - {count, Ref, Count} -> - Count - after ?TIMEOUT_WRITER -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "Timeout getting number of documents written" - " from source database writer"}]}) - end. - -stop_writer(Pid) -> - Ref = make_ref(), - Pid ! {stop, Ref}, - receive - {stopped, Ref, DocsWritten} -> - MonRef = erlang:monitor(process, Pid), - receive - {'DOWN', MonRef, process, Pid, _Reason} -> - DocsWritten - after ?TIMEOUT -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "Timeout stopping source database writer"}]}) - end - after ?TIMEOUT_WRITER -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "Timeout stopping source database writer"}]}) - end. - -writer_loop(Db0, Parent, Counter) -> - DbName = couch_db:name(Db0), - {ok, Data} = file:read_file(?ATTFILE), - maybe_pause(Parent, Counter), - Doc = couch_doc:from_json_obj({[ - {<<"_id">>, ?l2b(integer_to_list(Counter + 1))}, - {<<"value">>, Counter + 1}, - {<<"_attachments">>, {[ - {<<"icon1.png">>, {[ - {<<"data">>, base64:encode(Data)}, - {<<"content_type">>, <<"image/png">>} - ]}}, - {<<"icon2.png">>, {[ - {<<"data">>, base64:encode(iolist_to_binary([Data, Data]))}, - {<<"content_type">>, <<"image/png">>} - ]}} - ]}} - ]}), - maybe_pause(Parent, Counter), - {ok, Db} = couch_db:open_int(DbName, []), - {ok, _} = couch_db:update_doc(Db, Doc, []), - ok = couch_db:close(Db), - receive - {get_count, Ref} -> - Parent ! {count, Ref, Counter + 1}, - writer_loop(Db, Parent, Counter + 1); - {stop, Ref} -> - Parent ! {stopped, Ref, Counter + 1} - after 0 -> - timer:sleep(?DELAY), - writer_loop(Db, Parent, Counter + 1) - end. - -maybe_pause(Parent, Counter) -> - receive - {get_count, Ref} -> - Parent ! {count, Ref, Counter}; - {pause, Ref} -> - Parent ! {paused, Ref}, - receive - {continue, Ref2} -> - Parent ! {ok, Ref2} - end - after 0 -> - ok - end. diff --git a/src/couch_replicator/test/couch_replicator_connection_tests.erl b/src/couch_replicator/test/couch_replicator_connection_tests.erl deleted file mode 100644 index e75cc5a63..000000000 --- a/src/couch_replicator/test/couch_replicator_connection_tests.erl +++ /dev/null @@ -1,241 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_replicator_connection_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 1000). - - -setup() -> - Host = config:get("httpd", "bind_address", "127.0.0.1"), - Port = config:get("httpd", "port", "5984"), - {Host, Port}. - -teardown(_) -> - ok. - - -httpc_pool_test_() -> - { - "replicator connection sharing tests", - { - setup, - fun() -> test_util:start_couch([couch_replicator]) end, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun connections_shared_after_release/1, - fun connections_not_shared_after_owner_death/1, - fun idle_connections_closed/1, - fun test_owner_monitors/1, - fun worker_discards_creds_on_create/1, - fun worker_discards_url_creds_after_request/1, - fun worker_discards_creds_in_headers_after_request/1, - fun worker_discards_proxy_creds_after_request/1 - ] - } - } - }. - - -connections_shared_after_release({Host, Port}) -> - ?_test(begin - URL = "http://" ++ Host ++ ":" ++ Port, - Self = self(), - {ok, Pid} = couch_replicator_connection:acquire(URL), - couch_replicator_connection:release(Pid), - spawn(fun() -> - Self ! couch_replicator_connection:acquire(URL) - end), - receive - {ok, Pid2} -> - ?assertEqual(Pid, Pid2) - end - end). - - -connections_not_shared_after_owner_death({Host, Port}) -> - ?_test(begin - URL = "http://" ++ Host ++ ":" ++ Port, - Self = self(), - spawn(fun() -> - Self ! couch_replicator_connection:acquire(URL), - error("simulate division by zero without compiler warning") - end), - receive - {ok, Pid} -> - {ok, Pid2} = couch_replicator_connection:acquire(URL), - ?assertNotEqual(Pid, Pid2), - MRef = monitor(process, Pid), - receive {'DOWN', MRef, process, Pid, _Reason} -> - ?assert(not is_process_alive(Pid)); - Other -> throw(Other) - end - end - end). - - -idle_connections_closed({Host, Port}) -> - ?_test(begin - URL = "http://" ++ Host ++ ":" ++ Port, - {ok, Pid} = couch_replicator_connection:acquire(URL), - couch_replicator_connection ! close_idle_connections, - ?assert(ets:member(couch_replicator_connection, Pid)), - % block until idle connections have closed - sys:get_status(couch_replicator_connection), - couch_replicator_connection:release(Pid), - couch_replicator_connection ! close_idle_connections, - % block until idle connections have closed - sys:get_status(couch_replicator_connection), - ?assert(not ets:member(couch_replicator_connection, Pid)) - end). - - -test_owner_monitors({Host, Port}) -> - ?_test(begin - URL = "http://" ++ Host ++ ":" ++ Port, - {ok, Worker0} = couch_replicator_connection:acquire(URL), - assert_monitors_equal([{process, self()}]), - couch_replicator_connection:release(Worker0), - assert_monitors_equal([]), - {Workers, Monitors} = lists:foldl(fun(_, {WAcc, MAcc}) -> - {ok, Worker1} = couch_replicator_connection:acquire(URL), - MAcc1 = [{process, self()} | MAcc], - assert_monitors_equal(MAcc1), - {[Worker1 | WAcc], MAcc1} - end, {[], []}, lists:seq(1,5)), - lists:foldl(fun(Worker2, Acc) -> - [_ | NewAcc] = Acc, - couch_replicator_connection:release(Worker2), - assert_monitors_equal(NewAcc), - NewAcc - end, Monitors, Workers) - end). - - -worker_discards_creds_on_create({Host, Port}) -> - ?_test(begin - {User, Pass, B64Auth} = user_pass(), - URL = "http://" ++ User ++ ":" ++ Pass ++ "@" ++ Host ++ ":" ++ Port, - {ok, WPid} = couch_replicator_connection:acquire(URL), - Internals = worker_internals(WPid), - ?assert(string:str(Internals, B64Auth) =:= 0), - ?assert(string:str(Internals, Pass) =:= 0) - end). - - -worker_discards_url_creds_after_request({Host, _}) -> - ?_test(begin - {User, Pass, B64Auth} = user_pass(), - {Port, ServerPid} = server(), - PortStr = integer_to_list(Port), - URL = "http://" ++ User ++ ":" ++ Pass ++ "@" ++ Host ++ ":" ++ PortStr, - {ok, WPid} = couch_replicator_connection:acquire(URL), - ?assertMatch({ok, "200", _, _}, send_req(WPid, URL, [], [])), - Internals = worker_internals(WPid), - ?assert(string:str(Internals, B64Auth) =:= 0), - ?assert(string:str(Internals, Pass) =:= 0), - couch_replicator_connection:release(WPid), - unlink(ServerPid), - exit(ServerPid, kill) - end). - - -worker_discards_creds_in_headers_after_request({Host, _}) -> - ?_test(begin - {_User, Pass, B64Auth} = user_pass(), - {Port, ServerPid} = server(), - PortStr = integer_to_list(Port), - URL = "http://" ++ Host ++ ":" ++ PortStr, - {ok, WPid} = couch_replicator_connection:acquire(URL), - Headers = [{"Authorization", "Basic " ++ B64Auth}], - ?assertMatch({ok, "200", _, _}, send_req(WPid, URL, Headers, [])), - Internals = worker_internals(WPid), - ?assert(string:str(Internals, B64Auth) =:= 0), - ?assert(string:str(Internals, Pass) =:= 0), - couch_replicator_connection:release(WPid), - unlink(ServerPid), - exit(ServerPid, kill) - end). - - -worker_discards_proxy_creds_after_request({Host, _}) -> - ?_test(begin - {User, Pass, B64Auth} = user_pass(), - {Port, ServerPid} = server(), - PortStr = integer_to_list(Port), - URL = "http://" ++ Host ++ ":" ++ PortStr, - {ok, WPid} = couch_replicator_connection:acquire(URL), - Opts = [ - {proxy_host, Host}, - {proxy_port, Port}, - {proxy_user, User}, - {proxy_pass, Pass} - ], - ?assertMatch({ok, "200", _, _}, send_req(WPid, URL, [], Opts)), - Internals = worker_internals(WPid), - ?assert(string:str(Internals, B64Auth) =:= 0), - ?assert(string:str(Internals, Pass) =:= 0), - couch_replicator_connection:release(WPid), - unlink(ServerPid), - exit(ServerPid, kill) - end). - - -send_req(WPid, URL, Headers, Opts) -> - ibrowse:send_req_direct(WPid, URL, Headers, get, [], Opts). - - -user_pass() -> - User = "specialuser", - Pass = "averysecretpassword", - B64Auth = ibrowse_lib:encode_base64(User ++ ":" ++ Pass), - {User, Pass, B64Auth}. - - -worker_internals(Pid) -> - Dict = io_lib:format("~p", [erlang:process_info(Pid, dictionary)]), - State = io_lib:format("~p", [sys:get_state(Pid)]), - lists:flatten([Dict, State]). - - -server() -> - {ok, LSock} = gen_tcp:listen(0, [{recbuf, 256}, {active, false}]), - {ok, LPort} = inet:port(LSock), - SPid = spawn_link(fun() -> server_responder(LSock) end), - {LPort, SPid}. - - -server_responder(LSock) -> - {ok, Sock} = gen_tcp:accept(LSock), - case gen_tcp:recv(Sock, 0) of - {ok, Data} -> - % sanity check that all the request data was received - ?assert(lists:prefix("GET ", Data)), - ?assert(lists:suffix("\r\n\r\n", Data)), - Res = ["HTTP/1.1 200 OK", "Content-Length: 0", "\r\n"], - ok = gen_tcp:send(Sock, string:join(Res, "\r\n")); - Other -> - gen_tcp:close(Sock), - throw({replication_eunit_tcp_server_crashed, Other}) - end, - server_responder(LSock). - - -assert_monitors_equal(ShouldBe) -> - sys:get_status(couch_replicator_connection), - {monitors, Monitors} = process_info(whereis(couch_replicator_connection), monitors), - ?assertEqual(Monitors, ShouldBe). diff --git a/src/couch_replicator/test/couch_replicator_create_target_with_options_tests.erl b/src/couch_replicator/test/couch_replicator_create_target_with_options_tests.erl deleted file mode 100644 index 63310d39e..000000000 --- a/src/couch_replicator/test/couch_replicator_create_target_with_options_tests.erl +++ /dev/null @@ -1,143 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_replicator_create_target_with_options_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include_lib("couch_replicator/src/couch_replicator.hrl"). - --define(USERNAME, "rep_admin"). --define(PASSWORD, "secret"). - -setup() -> - Ctx = test_util:start_couch([fabric, mem3, couch_replicator, chttpd]), - Hashed = couch_passwords:hash_admin_password(?PASSWORD), - ok = config:set("admins", ?USERNAME, ?b2l(Hashed), _Persist=false), - Source = ?tempdb(), - Target = ?tempdb(), - {Ctx, {Source, Target}}. - - -teardown({Ctx, {_Source, _Target}}) -> - config:delete("admins", ?USERNAME), - ok = test_util:stop_couch(Ctx). - - -create_target_with_options_replication_test_() -> - { - "Create target with range partitions tests", - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_create_target_with_q_4/1, - fun should_create_target_with_q_2_n_1/1, - fun should_create_target_with_default/1, - fun should_not_create_target_with_q_any/1 - ] - } - }. - - -should_create_target_with_q_4({_Ctx, {Source, Target}}) -> - RepObject = {[ - {<<"source">>, db_url(Source)}, - {<<"target">>, db_url(Target)}, - {<<"create_target">>, true}, - {<<"create_target_params">>, {[{<<"q">>, <<"4">>}]}} - ]}, - create_db(Source), - create_doc(Source), - {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), - - {ok, TargetInfo} = fabric:get_db_info(Target), - {ClusterInfo} = couch_util:get_value(cluster, TargetInfo), - delete_db(Source), - delete_db(Target), - ?_assertEqual(4, couch_util:get_value(q, ClusterInfo)). - - -should_create_target_with_q_2_n_1({_Ctx, {Source, Target}}) -> - RepObject = {[ - {<<"source">>, db_url(Source)}, - {<<"target">>, db_url(Target)}, - {<<"create_target">>, true}, - {<<"create_target_params">>, - {[{<<"q">>, <<"2">>}, {<<"n">>, <<"1">>}]}} - ]}, - create_db(Source), - create_doc(Source), - {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), - - {ok, TargetInfo} = fabric:get_db_info(Target), - {ClusterInfo} = couch_util:get_value(cluster, TargetInfo), - delete_db(Source), - delete_db(Target), - [ - ?_assertEqual(2, couch_util:get_value(q, ClusterInfo)), - ?_assertEqual(1, couch_util:get_value(n, ClusterInfo)) - ]. - - -should_create_target_with_default({_Ctx, {Source, Target}}) -> - RepObject = {[ - {<<"source">>, db_url(Source)}, - {<<"target">>, db_url(Target)}, - {<<"create_target">>, true} - ]}, - create_db(Source), - create_doc(Source), - {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), - - {ok, TargetInfo} = fabric:get_db_info(Target), - {ClusterInfo} = couch_util:get_value(cluster, TargetInfo), - Q = config:get("cluster", "q", "8"), - delete_db(Source), - delete_db(Target), - ?_assertEqual(list_to_integer(Q), couch_util:get_value(q, ClusterInfo)). - - -should_not_create_target_with_q_any({_Ctx, {Source, Target}}) -> - RepObject = {[ - {<<"source">>, db_url(Source)}, - {<<"target">>, db_url(Target)}, - {<<"create_target">>, false}, - {<<"create_target_params">>, {[{<<"q">>, <<"1">>}]}} - ]}, - create_db(Source), - create_doc(Source), - {error, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), - DbExist = is_list(catch mem3:shards(Target)), - delete_db(Source), - ?_assertEqual(false, DbExist). - - -create_doc(DbName) -> - Body = {[{<<"foo">>, <<"bar">>}]}, - NewDoc = #doc{body = Body}, - {ok, _} = fabric:update_doc(DbName, NewDoc, [?ADMIN_CTX]). - - -create_db(DbName) -> - ok = fabric:create_db(DbName, [?ADMIN_CTX]). - - -delete_db(DbName) -> - ok = fabric:delete_db(DbName, [?ADMIN_CTX]). - - -db_url(DbName) -> - Addr = config:get("chttpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(chttpd, port), - ?l2b(io_lib:format("http://~s:~s@~s:~b/~s", [?USERNAME, ?PASSWORD, Addr, - Port, DbName])). diff --git a/src/couch_replicator/test/couch_replicator_filtered_tests.erl b/src/couch_replicator/test/couch_replicator_filtered_tests.erl deleted file mode 100644 index d34e9f020..000000000 --- a/src/couch_replicator/test/couch_replicator_filtered_tests.erl +++ /dev/null @@ -1,244 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_replicator_filtered_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include_lib("couch_replicator/src/couch_replicator.hrl"). - --define(DDOC, {[ - {<<"_id">>, <<"_design/filter_ddoc">>}, - {<<"filters">>, {[ - {<<"testfilter">>, <<" - function(doc, req){if (doc.class == 'mammal') return true;} - ">>}, - {<<"queryfilter">>, <<" - function(doc, req) { - if (doc.class && req.query.starts) { - return doc.class.indexOf(req.query.starts) === 0; - } - else { - return false; - } - } - ">>} - ]}}, - {<<"views">>, {[ - {<<"mammals">>, {[ - {<<"map">>, <<" - function(doc) { - if (doc.class == 'mammal') { - emit(doc._id, null); - } - } - ">>} - ]}} - ]}} -]}). - -setup(_) -> - Ctx = test_util:start_couch([couch_replicator]), - Source = create_db(), - create_docs(Source), - Target = create_db(), - {Ctx, {Source, Target}}. - -teardown(_, {Ctx, {Source, Target}}) -> - delete_db(Source), - delete_db(Target), - ok = application:stop(couch_replicator), - ok = test_util:stop_couch(Ctx). - -filtered_replication_test_() -> - Pairs = [{local, local}, {local, remote}, - {remote, local}, {remote, remote}], - { - "Filtered replication tests", - { - foreachx, - fun setup/1, fun teardown/2, - [{Pair, fun should_succeed/2} || Pair <- Pairs] - } - }. - -query_filtered_replication_test_() -> - Pairs = [{local, local}, {local, remote}, - {remote, local}, {remote, remote}], - { - "Filtered with query replication tests", - { - foreachx, - fun setup/1, fun teardown/2, - [{Pair, fun should_succeed_with_query/2} || Pair <- Pairs] - } - }. - -view_filtered_replication_test_() -> - Pairs = [{local, local}], - { - "Filtered with a view replication tests", - { - foreachx, - fun setup/1, fun teardown/2, - [{Pair, fun should_succeed_with_view/2} || Pair <- Pairs] - } - }. - -should_succeed({From, To}, {_Ctx, {Source, Target}}) -> - RepObject = {[ - {<<"source">>, db_url(From, Source)}, - {<<"target">>, db_url(To, Target)}, - {<<"filter">>, <<"filter_ddoc/testfilter">>} - ]}, - {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), - %% FilteredFun is an Erlang version of following JS function - %% function(doc, req){if (doc.class == 'mammal') return true;} - FilterFun = fun(_DocId, {Props}) -> - couch_util:get_value(<<"class">>, Props) == <<"mammal">> - end, - {ok, TargetDbInfo, AllReplies} = compare_dbs(Source, Target, FilterFun), - {lists:flatten(io_lib:format("~p -> ~p", [From, To])), [ - {"Target DB has proper number of docs", - ?_assertEqual(1, proplists:get_value(doc_count, TargetDbInfo))}, - {"Target DB doesn't have deleted docs", - ?_assertEqual(0, proplists:get_value(doc_del_count, TargetDbInfo))}, - {"All the docs filtered as expected", - ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))} - ]}. - -should_succeed_with_query({From, To}, {_Ctx, {Source, Target}}) -> - RepObject = {[ - {<<"source">>, db_url(From, Source)}, - {<<"target">>, db_url(To, Target)}, - {<<"filter">>, <<"filter_ddoc/queryfilter">>}, - {<<"query_params">>, {[ - {<<"starts">>, <<"a">>} - ]}} - ]}, - {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), - FilterFun = fun(_DocId, {Props}) -> - case couch_util:get_value(<<"class">>, Props) of - <<"a", _/binary>> -> true; - _ -> false - end - end, - {ok, TargetDbInfo, AllReplies} = compare_dbs(Source, Target, FilterFun), - {lists:flatten(io_lib:format("~p -> ~p", [From, To])), [ - {"Target DB has proper number of docs", - ?_assertEqual(2, proplists:get_value(doc_count, TargetDbInfo))}, - {"Target DB doesn't have deleted docs", - ?_assertEqual(0, proplists:get_value(doc_del_count, TargetDbInfo))}, - {"All the docs filtered as expected", - ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))} - ]}. - -should_succeed_with_view({From, To}, {_Ctx, {Source, Target}}) -> - RepObject = {[ - {<<"source">>, db_url(From, Source)}, - {<<"target">>, db_url(To, Target)}, - {<<"filter">>, <<"_view">>}, - {<<"query_params">>, {[ - {<<"view">>, <<"filter_ddoc/mammals">>} - ]}} - ]}, - {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), - FilterFun = fun(_DocId, {Props}) -> - couch_util:get_value(<<"class">>, Props) == <<"mammal">> - end, - {ok, TargetDbInfo, AllReplies} = compare_dbs(Source, Target, FilterFun), - {lists:flatten(io_lib:format("~p -> ~p", [From, To])), [ - {"Target DB has proper number of docs", - ?_assertEqual(1, proplists:get_value(doc_count, TargetDbInfo))}, - {"Target DB doesn't have deleted docs", - ?_assertEqual(0, proplists:get_value(doc_del_count, TargetDbInfo))}, - {"All the docs filtered as expected", - ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))} - ]}. - -compare_dbs(Source, Target, FilterFun) -> - {ok, SourceDb} = couch_db:open_int(Source, []), - {ok, TargetDb} = couch_db:open_int(Target, []), - {ok, TargetDbInfo} = couch_db:get_db_info(TargetDb), - Fun = fun(FullDocInfo, Acc) -> - {ok, DocId, SourceDoc} = read_doc(SourceDb, FullDocInfo), - TargetReply = read_doc(TargetDb, DocId), - case FilterFun(DocId, SourceDoc) of - true -> - ValidReply = {ok, DocId, SourceDoc} == TargetReply, - {ok, [ValidReply|Acc]}; - false -> - ValidReply = {not_found, missing} == TargetReply, - {ok, [ValidReply|Acc]} - end - end, - {ok, AllReplies} = couch_db:fold_docs(SourceDb, Fun, [], []), - ok = couch_db:close(SourceDb), - ok = couch_db:close(TargetDb), - {ok, TargetDbInfo, AllReplies}. - -read_doc(Db, DocIdOrInfo) -> - case couch_db:open_doc(Db, DocIdOrInfo) of - {ok, Doc} -> - {Props} = couch_doc:to_json_obj(Doc, [attachments]), - DocId = couch_util:get_value(<<"_id">>, Props), - {ok, DocId, {Props}}; - Error -> - Error - end. - -create_db() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = couch_db:close(Db), - DbName. - -create_docs(DbName) -> - {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - DDoc = couch_doc:from_json_obj(?DDOC), - Doc1 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc1">>}, - {<<"class">>, <<"mammal">>}, - {<<"value">>, 1} - - ]}), - Doc2 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc2">>}, - {<<"class">>, <<"amphibians">>}, - {<<"value">>, 2} - - ]}), - Doc3 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc3">>}, - {<<"class">>, <<"reptiles">>}, - {<<"value">>, 3} - - ]}), - Doc4 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc4">>}, - {<<"class">>, <<"arthropods">>}, - {<<"value">>, 2} - - ]}), - {ok, _} = couch_db:update_docs(Db, [DDoc, Doc1, Doc2, Doc3, Doc4]), - couch_db:ensure_full_commit(Db), - couch_db:close(Db). - -delete_db(DbName) -> - ok = couch_server:delete(DbName, [?ADMIN_CTX]). - -db_url(local, DbName) -> - DbName; -db_url(remote, DbName) -> - Addr = config:get("httpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(couch_httpd, port), - ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])). diff --git a/src/couch_replicator/test/couch_replicator_httpc_pool_tests.erl b/src/couch_replicator/test/couch_replicator_httpc_pool_tests.erl deleted file mode 100644 index c4ad4e9b6..000000000 --- a/src/couch_replicator/test/couch_replicator_httpc_pool_tests.erl +++ /dev/null @@ -1,174 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_replicator_httpc_pool_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --define(TIMEOUT, 1000). - - -setup() -> - spawn_pool(). - -teardown(Pool) -> - stop_pool(Pool). - - -httpc_pool_test_() -> - { - "httpc pool tests", - { - setup, - fun() -> test_util:start_couch([couch_replicator]) end, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun should_block_new_clients_when_full/1, - fun should_replace_worker_on_death/1 - ] - } - } - }. - - -should_block_new_clients_when_full(Pool) -> - ?_test(begin - Client1 = spawn_client(Pool), - Client2 = spawn_client(Pool), - Client3 = spawn_client(Pool), - - ?assertEqual(ok, ping_client(Client1)), - ?assertEqual(ok, ping_client(Client2)), - ?assertEqual(ok, ping_client(Client3)), - - Worker1 = get_client_worker(Client1, "1"), - Worker2 = get_client_worker(Client2, "2"), - Worker3 = get_client_worker(Client3, "3"), - - ?assert(is_process_alive(Worker1)), - ?assert(is_process_alive(Worker2)), - ?assert(is_process_alive(Worker3)), - - ?assertNotEqual(Worker1, Worker2), - ?assertNotEqual(Worker2, Worker3), - ?assertNotEqual(Worker3, Worker1), - - Client4 = spawn_client(Pool), - ?assertEqual(timeout, ping_client(Client4)), - - ?assertEqual(ok, stop_client(Client1)), - ?assertEqual(ok, ping_client(Client4)), - - Worker4 = get_client_worker(Client4, "4"), - ?assertEqual(Worker1, Worker4), - - lists:foreach( - fun(C) -> - ?assertEqual(ok, stop_client(C)) - end, [Client2, Client3, Client4]) - end). - -should_replace_worker_on_death(Pool) -> - ?_test(begin - Client1 = spawn_client(Pool), - ?assertEqual(ok, ping_client(Client1)), - Worker1 = get_client_worker(Client1, "1"), - ?assert(is_process_alive(Worker1)), - - ?assertEqual(ok, kill_client_worker(Client1)), - ?assertNot(is_process_alive(Worker1)), - ?assertEqual(ok, stop_client(Client1)), - - Client2 = spawn_client(Pool), - ?assertEqual(ok, ping_client(Client2)), - Worker2 = get_client_worker(Client2, "2"), - ?assert(is_process_alive(Worker2)), - - ?assertNotEqual(Worker1, Worker2), - ?assertEqual(ok, stop_client(Client2)) - end). - - -spawn_client(Pool) -> - Parent = self(), - Ref = make_ref(), - Pid = spawn(fun() -> - {ok, Worker} = couch_replicator_httpc_pool:get_worker(Pool), - loop(Parent, Ref, Worker, Pool) - end), - {Pid, Ref}. - -ping_client({Pid, Ref}) -> - Pid ! ping, - receive - {pong, Ref} -> - ok - after ?TIMEOUT -> - timeout - end. - -get_client_worker({Pid, Ref}, ClientName) -> - Pid ! get_worker, - receive - {worker, Ref, Worker} -> - Worker - after ?TIMEOUT -> - erlang:error( - {assertion_failed, - [{module, ?MODULE}, {line, ?LINE}, - {reason, "Timeout getting client " ++ ClientName ++ " worker"}]}) - end. - -stop_client({Pid, Ref}) -> - Pid ! stop, - receive - {stop, Ref} -> - ok - after ?TIMEOUT -> - timeout - end. - -kill_client_worker({Pid, Ref}) -> - Pid ! get_worker, - receive - {worker, Ref, Worker} -> - exit(Worker, kill), - ok - after ?TIMEOUT -> - timeout - end. - -loop(Parent, Ref, Worker, Pool) -> - receive - ping -> - Parent ! {pong, Ref}, - loop(Parent, Ref, Worker, Pool); - get_worker -> - Parent ! {worker, Ref, Worker}, - loop(Parent, Ref, Worker, Pool); - stop -> - couch_replicator_httpc_pool:release_worker(Pool, Worker), - Parent ! {stop, Ref} - end. - -spawn_pool() -> - Host = config:get("httpd", "bind_address", "127.0.0.1"), - Port = config:get("httpd", "port", "5984"), - {ok, Pool} = couch_replicator_httpc_pool:start_link( - "http://" ++ Host ++ ":" ++ Port, [{max_connections, 3}]), - Pool. - -stop_pool(Pool) -> - ok = couch_replicator_httpc_pool:stop(Pool). diff --git a/src/couch_replicator/test/couch_replicator_id_too_long_tests.erl b/src/couch_replicator/test/couch_replicator_id_too_long_tests.erl deleted file mode 100644 index 70eda0566..000000000 --- a/src/couch_replicator/test/couch_replicator_id_too_long_tests.erl +++ /dev/null @@ -1,94 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_replicator_id_too_long_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include_lib("couch_replicator/src/couch_replicator.hrl"). - - -setup(_) -> - Ctx = test_util:start_couch([couch_replicator]), - Source = create_db(), - create_doc(Source), - Target = create_db(), - {Ctx, {Source, Target}}. - - -teardown(_, {Ctx, {Source, Target}}) -> - delete_db(Source), - delete_db(Target), - config:set("replicator", "max_document_id_length", "infinity"), - ok = test_util:stop_couch(Ctx). - - -id_too_long_replication_test_() -> - Pairs = [{local, local}, {local, remote}, - {remote, local}, {remote, remote}], - { - "Doc id too long tests", - { - foreachx, - fun setup/1, fun teardown/2, - [{Pair, fun should_succeed/2} || Pair <- Pairs] ++ - [{Pair, fun should_fail/2} || Pair <- Pairs] - } - }. - - -should_succeed({From, To}, {_Ctx, {Source, Target}}) -> - RepObject = {[ - {<<"source">>, db_url(From, Source)}, - {<<"target">>, db_url(To, Target)} - ]}, - config:set("replicator", "max_document_id_length", "5"), - {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), - ?_assertEqual(ok, couch_replicator_test_helper:compare_dbs(Source, Target)). - - -should_fail({From, To}, {_Ctx, {Source, Target}}) -> - RepObject = {[ - {<<"source">>, db_url(From, Source)}, - {<<"target">>, db_url(To, Target)} - ]}, - config:set("replicator", "max_document_id_length", "4"), - {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), - ?_assertError({badmatch, {not_found, missing}}, - couch_replicator_test_helper:compare_dbs(Source, Target)). - - -create_db() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = couch_db:close(Db), - DbName. - - -create_doc(DbName) -> - {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - Doc = couch_doc:from_json_obj({[{<<"_id">>, <<"12345">>}]}), - {ok, _} = couch_db:update_doc(Db, Doc, []), - couch_db:ensure_full_commit(Db), - couch_db:close(Db). - - -delete_db(DbName) -> - ok = couch_server:delete(DbName, [?ADMIN_CTX]). - - -db_url(local, DbName) -> - DbName; -db_url(remote, DbName) -> - Addr = config:get("httpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(couch_httpd, port), - ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])). diff --git a/src/couch_replicator/test/couch_replicator_large_atts_tests.erl b/src/couch_replicator/test/couch_replicator_large_atts_tests.erl deleted file mode 100644 index b9adf5c4b..000000000 --- a/src/couch_replicator/test/couch_replicator_large_atts_tests.erl +++ /dev/null @@ -1,124 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_replicator_large_atts_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --import(couch_replicator_test_helper, [ - db_url/1, - replicate/2, - compare_dbs/2 -]). - --define(ATT_SIZE_1, 2 * 1024 * 1024). --define(ATT_SIZE_2, round(6.6 * 1024 * 1024)). --define(DOCS_COUNT, 11). --define(TIMEOUT_EUNIT, 120). - - -setup() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = couch_db:close(Db), - DbName. - -setup(local) -> - setup(); -setup(remote) -> - {remote, setup()}; -setup({A, B}) -> - Ctx = test_util:start_couch([couch_replicator]), - config:set("attachments", "compressible_types", "text/*", false), - Source = setup(A), - Target = setup(B), - {Ctx, {Source, Target}}. - -teardown({remote, DbName}) -> - teardown(DbName); -teardown(DbName) -> - ok = couch_server:delete(DbName, [?ADMIN_CTX]), - ok. - -teardown(_, {Ctx, {Source, Target}}) -> - teardown(Source), - teardown(Target), - - ok = application:stop(couch_replicator), - ok = test_util:stop_couch(Ctx). - -large_atts_test_() -> - Pairs = [{local, local}, {local, remote}, - {remote, local}, {remote, remote}], - { - "Replicate docs with large attachments", - { - foreachx, - fun setup/1, fun teardown/2, - [{Pair, fun should_populate_replicate_compact/2} - || Pair <- Pairs] - } - }. - - -should_populate_replicate_compact({From, To}, {_Ctx, {Source, Target}}) -> - {lists:flatten(io_lib:format("~p -> ~p", [From, To])), - {inorder, [should_populate_source(Source), - should_replicate(Source, Target), - should_compare_databases(Source, Target)]}}. - -should_populate_source({remote, Source}) -> - should_populate_source(Source); -should_populate_source(Source) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source, ?DOCS_COUNT))}. - -should_replicate({remote, Source}, Target) -> - should_replicate(db_url(Source), Target); -should_replicate(Source, {remote, Target}) -> - should_replicate(Source, db_url(Target)); -should_replicate(Source, Target) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}. - -should_compare_databases({remote, Source}, Target) -> - should_compare_databases(Source, Target); -should_compare_databases(Source, {remote, Target}) -> - should_compare_databases(Source, Target); -should_compare_databases(Source, Target) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target))}. - - -populate_db(DbName, DocCount) -> - {ok, Db} = couch_db:open_int(DbName, []), - Docs = lists:foldl( - fun(DocIdCounter, Acc) -> - Doc = #doc{ - id = iolist_to_binary(["doc", integer_to_list(DocIdCounter)]), - body = {[]}, - atts = [ - att(<<"att1">>, ?ATT_SIZE_1, <<"text/plain">>), - att(<<"att2">>, ?ATT_SIZE_2, <<"app/binary">>) - ] - }, - [Doc | Acc] - end, - [], lists:seq(1, DocCount)), - {ok, _} = couch_db:update_docs(Db, Docs, []), - couch_db:close(Db). - -att(Name, Size, Type) -> - couch_att:new([ - {name, Name}, - {type, Type}, - {att_len, Size}, - {data, fun(Count) -> crypto:strong_rand_bytes(Count) end} - ]). diff --git a/src/couch_replicator/test/couch_replicator_many_leaves_tests.erl b/src/couch_replicator/test/couch_replicator_many_leaves_tests.erl deleted file mode 100644 index eee5b1647..000000000 --- a/src/couch_replicator/test/couch_replicator_many_leaves_tests.erl +++ /dev/null @@ -1,206 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_replicator_many_leaves_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --import(couch_replicator_test_helper, [ - db_url/1, - replicate/2 -]). - --define(DOCS_CONFLICTS, [ - {<<"doc1">>, 10}, - {<<"doc2">>, 100}, - % a number > MaxURLlength (7000) / length(DocRevisionString) - {<<"doc3">>, 210} -]). --define(NUM_ATTS, 2). --define(TIMEOUT_EUNIT, 60). --define(i2l(I), integer_to_list(I)). --define(io2b(Io), iolist_to_binary(Io)). - -setup() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = couch_db:close(Db), - DbName. - -setup(local) -> - setup(); -setup(remote) -> - {remote, setup()}; -setup({A, B}) -> - Ctx = test_util:start_couch([couch_replicator]), - Source = setup(A), - Target = setup(B), - {Ctx, {Source, Target}}. - -teardown({remote, DbName}) -> - teardown(DbName); -teardown(DbName) -> - ok = couch_server:delete(DbName, [?ADMIN_CTX]), - ok. - -teardown(_, {Ctx, {Source, Target}}) -> - teardown(Source), - teardown(Target), - ok = application:stop(couch_replicator), - ok = test_util:stop_couch(Ctx). - -docs_with_many_leaves_test_() -> - Pairs = [{local, local}, {local, remote}, - {remote, local}, {remote, remote}], - { - "Replicate documents with many leaves", - { - foreachx, - fun setup/1, fun teardown/2, - [{Pair, fun should_populate_replicate_compact/2} - || Pair <- Pairs] - } - }. - - -should_populate_replicate_compact({From, To}, {_Ctx, {Source, Target}}) -> - {lists:flatten(io_lib:format("~p -> ~p", [From, To])), - {inorder, [ - should_populate_source(Source), - should_replicate(Source, Target), - should_verify_target(Source, Target), - should_add_attachments_to_source(Source), - should_replicate(Source, Target), - should_verify_target(Source, Target) - ]}}. - -should_populate_source({remote, Source}) -> - should_populate_source(Source); -should_populate_source(Source) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source))}. - -should_replicate({remote, Source}, Target) -> - should_replicate(db_url(Source), Target); -should_replicate(Source, {remote, Target}) -> - should_replicate(Source, db_url(Target)); -should_replicate(Source, Target) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}. - -should_verify_target({remote, Source}, Target) -> - should_verify_target(Source, Target); -should_verify_target(Source, {remote, Target}) -> - should_verify_target(Source, Target); -should_verify_target(Source, Target) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(begin - {ok, SourceDb} = couch_db:open_int(Source, []), - {ok, TargetDb} = couch_db:open_int(Target, []), - verify_target(SourceDb, TargetDb, ?DOCS_CONFLICTS), - ok = couch_db:close(SourceDb), - ok = couch_db:close(TargetDb) - end)}. - -should_add_attachments_to_source({remote, Source}) -> - should_add_attachments_to_source(Source); -should_add_attachments_to_source(Source) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(begin - {ok, SourceDb} = couch_db:open_int(Source, []), - add_attachments(SourceDb, ?NUM_ATTS, ?DOCS_CONFLICTS), - ok = couch_db:close(SourceDb) - end)}. - -populate_db(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - lists:foreach( - fun({DocId, NumConflicts}) -> - Value = <<"0">>, - Doc = #doc{ - id = DocId, - body = {[ {<<"value">>, Value} ]} - }, - {ok, _} = couch_db:update_doc(Db, Doc, []), - {ok, _} = add_doc_siblings(Db, DocId, NumConflicts) - end, ?DOCS_CONFLICTS), - couch_db:close(Db). - -add_doc_siblings(Db, DocId, NumLeaves) when NumLeaves > 0 -> - add_doc_siblings(Db, DocId, NumLeaves, [], []). - -add_doc_siblings(Db, _DocId, 0, AccDocs, AccRevs) -> - {ok, []} = couch_db:update_docs(Db, AccDocs, [], replicated_changes), - {ok, AccRevs}; - -add_doc_siblings(Db, DocId, NumLeaves, AccDocs, AccRevs) -> - Value = ?l2b(?i2l(NumLeaves)), - Rev = couch_hash:md5_hash(Value), - Doc = #doc{ - id = DocId, - revs = {1, [Rev]}, - body = {[ {<<"value">>, Value} ]} - }, - add_doc_siblings(Db, DocId, NumLeaves - 1, - [Doc | AccDocs], [{1, Rev} | AccRevs]). - -verify_target(_SourceDb, _TargetDb, []) -> - ok; -verify_target(SourceDb, TargetDb, [{DocId, NumConflicts} | Rest]) -> - {ok, SourceLookups} = couch_db:open_doc_revs( - SourceDb, - DocId, - all, - [conflicts, deleted_conflicts]), - {ok, TargetLookups} = couch_db:open_doc_revs( - TargetDb, - DocId, - all, - [conflicts, deleted_conflicts]), - SourceDocs = [Doc || {ok, Doc} <- SourceLookups], - TargetDocs = [Doc || {ok, Doc} <- TargetLookups], - Total = NumConflicts + 1, - ?assertEqual(Total, length(TargetDocs)), - lists:foreach( - fun({SourceDoc, TargetDoc}) -> - SourceJson = couch_doc:to_json_obj(SourceDoc, [attachments]), - TargetJson = couch_doc:to_json_obj(TargetDoc, [attachments]), - ?assertEqual(SourceJson, TargetJson) - end, - lists:zip(SourceDocs, TargetDocs)), - verify_target(SourceDb, TargetDb, Rest). - -add_attachments(_SourceDb, _NumAtts, []) -> - ok; -add_attachments(SourceDb, NumAtts, [{DocId, NumConflicts} | Rest]) -> - {ok, SourceLookups} = couch_db:open_doc_revs(SourceDb, DocId, all, []), - SourceDocs = [Doc || {ok, Doc} <- SourceLookups], - Total = NumConflicts + 1, - ?assertEqual(Total, length(SourceDocs)), - NewDocs = lists:foldl( - fun(#doc{atts = Atts, revs = {Pos, [Rev | _]}} = Doc, Acc) -> - NewAtts = lists:foldl(fun(I, AttAcc) -> - AttData = crypto:strong_rand_bytes(100), - NewAtt = couch_att:new([ - {name, ?io2b(["att_", ?i2l(I), "_", - couch_doc:rev_to_str({Pos, Rev})])}, - {type, <<"application/foobar">>}, - {att_len, byte_size(AttData)}, - {data, AttData} - ]), - [NewAtt | AttAcc] - end, [], lists:seq(1, NumAtts)), - [Doc#doc{atts = Atts ++ NewAtts} | Acc] - end, - [], SourceDocs), - {ok, UpdateResults} = couch_db:update_docs(SourceDb, NewDocs, []), - NewRevs = [R || {ok, R} <- UpdateResults], - ?assertEqual(length(NewDocs), length(NewRevs)), - add_attachments(SourceDb, NumAtts, Rest). - diff --git a/src/couch_replicator/test/couch_replicator_missing_stubs_tests.erl b/src/couch_replicator/test/couch_replicator_missing_stubs_tests.erl deleted file mode 100644 index c1681781f..000000000 --- a/src/couch_replicator/test/couch_replicator_missing_stubs_tests.erl +++ /dev/null @@ -1,157 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_replicator_missing_stubs_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --import(couch_replicator_test_helper, [ - db_url/1, - replicate/2, - compare_dbs/2 -]). - --define(REVS_LIMIT, 3). --define(TIMEOUT_EUNIT, 30). - -setup() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = couch_db:close(Db), - DbName. - -setup(local) -> - setup(); -setup(remote) -> - {remote, setup()}; -setup({A, B}) -> - Ctx = test_util:start_couch([couch_replicator]), - Source = setup(A), - Target = setup(B), - {Ctx, {Source, Target}}. - -teardown({remote, DbName}) -> - teardown(DbName); -teardown(DbName) -> - ok = couch_server:delete(DbName, [?ADMIN_CTX]), - ok. - -teardown(_, {Ctx, {Source, Target}}) -> - teardown(Source), - teardown(Target), - ok = application:stop(couch_replicator), - ok = test_util:stop_couch(Ctx). - -missing_stubs_test_() -> - Pairs = [{local, local}, {local, remote}, - {remote, local}, {remote, remote}], - { - "Replicate docs with missing stubs (COUCHDB-1365)", - { - foreachx, - fun setup/1, fun teardown/2, - [{Pair, fun should_replicate_docs_with_missed_att_stubs/2} - || Pair <- Pairs] - } - }. - - -should_replicate_docs_with_missed_att_stubs({From, To}, {_Ctx, {Source, Target}}) -> - {lists:flatten(io_lib:format("~p -> ~p", [From, To])), - {inorder, [ - should_populate_source(Source), - should_set_target_revs_limit(Target, ?REVS_LIMIT), - should_replicate(Source, Target), - should_compare_databases(Source, Target), - should_update_source_docs(Source, ?REVS_LIMIT * 2), - should_replicate(Source, Target), - should_compare_databases(Source, Target) - ]}}. - -should_populate_source({remote, Source}) -> - should_populate_source(Source); -should_populate_source(Source) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source))}. - -should_replicate({remote, Source}, Target) -> - should_replicate(db_url(Source), Target); -should_replicate(Source, {remote, Target}) -> - should_replicate(Source, db_url(Target)); -should_replicate(Source, Target) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}. - -should_set_target_revs_limit({remote, Target}, RevsLimit) -> - should_set_target_revs_limit(Target, RevsLimit); -should_set_target_revs_limit(Target, RevsLimit) -> - ?_test(begin - {ok, Db} = couch_db:open_int(Target, [?ADMIN_CTX]), - ?assertEqual(ok, couch_db:set_revs_limit(Db, RevsLimit)), - ok = couch_db:close(Db) - end). - -should_compare_databases({remote, Source}, Target) -> - should_compare_databases(Source, Target); -should_compare_databases(Source, {remote, Target}) -> - should_compare_databases(Source, Target); -should_compare_databases(Source, Target) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target))}. - -should_update_source_docs({remote, Source}, Times) -> - should_update_source_docs(Source, Times); -should_update_source_docs(Source, Times) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(update_db_docs(Source, Times))}. - - -populate_db(DbName) -> - {ok, Db} = couch_db:open_int(DbName, []), - AttData = crypto:strong_rand_bytes(6000), - Doc = #doc{ - id = <<"doc1">>, - atts = [ - couch_att:new([ - {name, <<"doc1_att1">>}, - {type, <<"application/foobar">>}, - {att_len, byte_size(AttData)}, - {data, AttData} - ]) - ] - }, - {ok, _} = couch_db:update_doc(Db, Doc, []), - couch_db:close(Db). - -update_db_docs(DbName, Times) -> - {ok, Db} = couch_db:open_int(DbName, []), - {ok, _} = couch_db:fold_docs( - Db, - fun(FDI, Acc) -> db_fold_fun(FDI, Acc) end, - {DbName, Times}, - []), - ok = couch_db:close(Db). - -db_fold_fun(FullDocInfo, {DbName, Times}) -> - {ok, Db} = couch_db:open_int(DbName, []), - {ok, Doc} = couch_db:open_doc(Db, FullDocInfo), - lists:foldl( - fun(_, {Pos, RevId}) -> - {ok, Db2} = couch_db:reopen(Db), - NewDocVersion = Doc#doc{ - revs = {Pos, [RevId]}, - body = {[{<<"value">>, base64:encode(crypto:strong_rand_bytes(100))}]} - }, - {ok, NewRev} = couch_db:update_doc(Db2, NewDocVersion, []), - NewRev - end, - {element(1, Doc#doc.revs), hd(element(2, Doc#doc.revs))}, - lists:seq(1, Times)), - ok = couch_db:close(Db), - {ok, {DbName, Times}}. diff --git a/src/couch_replicator/test/couch_replicator_proxy_tests.erl b/src/couch_replicator/test/couch_replicator_proxy_tests.erl deleted file mode 100644 index 4f545bcb5..000000000 --- a/src/couch_replicator/test/couch_replicator_proxy_tests.erl +++ /dev/null @@ -1,69 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_replicator_proxy_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch_replicator/src/couch_replicator.hrl"). --include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl"). - - -setup() -> - ok. - - -teardown(_) -> - ok. - - -replicator_proxy_test_() -> - { - "replicator proxy tests", - { - setup, - fun() -> test_util:start_couch([couch_replicator]) end, fun test_util:stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun parse_rep_doc_without_proxy/1, - fun parse_rep_doc_with_proxy/1 - ] - } - } - }. - - -parse_rep_doc_without_proxy(_) -> - ?_test(begin - NoProxyDoc = {[ - {<<"source">>, <<"http://unproxied.com">>}, - {<<"target">>, <<"http://otherunproxied.com">>} - ]}, - Rep = couch_replicator_docs:parse_rep_doc(NoProxyDoc), - ?assertEqual((Rep#rep.source)#httpdb.proxy_url, undefined), - ?assertEqual((Rep#rep.target)#httpdb.proxy_url, undefined) - end). - - -parse_rep_doc_with_proxy(_) -> - ?_test(begin - ProxyURL = <<"http://myproxy.com">>, - ProxyDoc = {[ - {<<"source">>, <<"http://unproxied.com">>}, - {<<"target">>, <<"http://otherunproxied.com">>}, - {<<"proxy">>, ProxyURL} - ]}, - Rep = couch_replicator_docs:parse_rep_doc(ProxyDoc), - ?assertEqual((Rep#rep.source)#httpdb.proxy_url, binary_to_list(ProxyURL)), - ?assertEqual((Rep#rep.target)#httpdb.proxy_url, binary_to_list(ProxyURL)) - end). diff --git a/src/couch_replicator/test/couch_replicator_rate_limiter_tests.erl b/src/couch_replicator/test/couch_replicator_rate_limiter_tests.erl deleted file mode 100644 index 034550aec..000000000 --- a/src/couch_replicator/test/couch_replicator_rate_limiter_tests.erl +++ /dev/null @@ -1,89 +0,0 @@ --module(couch_replicator_rate_limiter_tests). - --include_lib("couch/include/couch_eunit.hrl"). - - -rate_limiter_test_() -> - { - foreach, - fun setup/0, - fun teardown/1, - [ - t_new_key(), - t_1_failure(), - t_2_failures_back_to_back(), - t_2_failures(), - t_success_threshold(), - t_1_failure_2_successes() - ] - }. - - -t_new_key() -> - ?_test(begin - ?assertEqual(0, couch_replicator_rate_limiter:interval({"foo", get})) - end). - - -t_1_failure() -> - ?_test(begin - ?assertEqual(24, couch_replicator_rate_limiter:failure({"foo", get})) - end). - - -t_2_failures() -> - ?_test(begin - couch_replicator_rate_limiter:failure({"foo", get}), - low_pass_filter_delay(), - Interval = couch_replicator_rate_limiter:failure({"foo", get}), - ?assertEqual(29, Interval) - end). - - -t_2_failures_back_to_back() -> - ?_test(begin - couch_replicator_rate_limiter:failure({"foo", get}), - Interval = couch_replicator_rate_limiter:failure({"foo", get}), - ?assertEqual(24, Interval) - end). - - -t_success_threshold() -> - ?_test(begin - Interval = couch_replicator_rate_limiter:success({"foo", get}), - ?assertEqual(0, Interval), - Interval = couch_replicator_rate_limiter:success({"foo", get}), - ?assertEqual(0, Interval) - end). - - -t_1_failure_2_successes() -> - ?_test(begin - couch_replicator_rate_limiter:failure({"foo", get}), - low_pass_filter_delay(), - Succ1 = couch_replicator_rate_limiter:success({"foo", get}), - ?assertEqual(20, Succ1), - low_pass_filter_delay(), - Succ2 = couch_replicator_rate_limiter:success({"foo", get}), - ?assertEqual(0, Succ2) - end). - - -low_pass_filter_delay() -> - timer:sleep(100). - - -setup() -> - {ok, Pid} = couch_replicator_rate_limiter:start_link(), - Pid. - - -teardown(Pid) -> - Ref = erlang:monitor(process, Pid), - unlink(Pid), - exit(Pid, kill), - receive - {'DOWN', Ref, process, Pid, _} -> - ok - end, - ok. diff --git a/src/couch_replicator/test/couch_replicator_retain_stats_between_job_runs.erl b/src/couch_replicator/test/couch_replicator_retain_stats_between_job_runs.erl deleted file mode 100644 index 3b7377b78..000000000 --- a/src/couch_replicator/test/couch_replicator_retain_stats_between_job_runs.erl +++ /dev/null @@ -1,160 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_replicator_retain_stats_between_job_runs). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include_lib("couch_replicator/src/couch_replicator.hrl"). - --define(DELAY, 500). --define(TIMEOUT, 60000). --define(i2l(I), integer_to_list(I)). --define(io2b(Io), iolist_to_binary(Io)). - - -setup() -> - Ctx = test_util:start_couch([couch_replicator]), - Source = setup_db(), - Target = setup_db(), - {Ctx, {Source, Target}}. - - -teardown({Ctx, {Source, Target}}) -> - teardown_db(Source), - teardown_db(Target), - ok = application:stop(couch_replicator), - ok = test_util:stop_couch(Ctx). - - -stats_retained_test_() -> - { - setup, - fun setup/0, - fun teardown/1, - fun t_stats_retained/1 - }. - - -t_stats_retained({_Ctx, {Source, Target}}) -> - ?_test(begin - populate_db(Source, 42), - {ok, RepPid, RepId} = replicate(Source, Target), - wait_target_in_sync(Source, Target), - check_active_tasks(42, 42), - reschedule_job(RepPid), - check_active_tasks(42, 42), - couch_replicator_scheduler:remove_job(RepId) - end). - - -setup_db() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = couch_db:close(Db), - DbName. - - -teardown_db(DbName) -> - ok = couch_server:delete(DbName, [?ADMIN_CTX]), - ok. - - -reschedule_job(RepPid) -> - Ref = erlang:monitor(process, RepPid), - gen_server:cast(couch_replicator_scheduler, {set_max_jobs, 0}), - couch_replicator_scheduler:reschedule(), - receive - {'DOWN', Ref, _, _, _} -> ok - after ?TIMEOUT -> - erlang:error(timeout) - end, - gen_server:cast(couch_replicator_scheduler, {set_max_jobs, 500}), - couch_replicator_scheduler:reschedule(). - - -check_active_tasks(DocsRead, DocsWritten) -> - RepTask = wait_for_task_status(), - ?assertNotEqual(timeout, RepTask), - ?assertEqual(DocsRead, couch_util:get_value(docs_read, RepTask)), - ?assertEqual(DocsWritten, couch_util:get_value(docs_written, RepTask)). - - -replication_tasks() -> - lists:filter(fun(P) -> - couch_util:get_value(type, P) =:= replication - end, couch_task_status:all()). - - -wait_for_task_status() -> - test_util:wait(fun() -> - case replication_tasks() of - [] -> wait; - [RepTask] -> RepTask - end - end). - - -populate_db(DbName, DocCount) -> - {ok, Db} = couch_db:open_int(DbName, []), - Docs = lists:foldl( - fun(DocIdCounter, Acc) -> - Id = ?io2b(["doc", ?i2l(DocIdCounter)]), - Doc = #doc{id = Id, body = {[]}}, - [Doc | Acc] - end, - [], lists:seq(1, DocCount)), - {ok, _} = couch_db:update_docs(Db, Docs, []), - ok = couch_db:close(Db). - - -wait_target_in_sync(Source, Target) -> - {ok, SourceDb} = couch_db:open_int(Source, []), - {ok, SourceInfo} = couch_db:get_db_info(SourceDb), - ok = couch_db:close(SourceDb), - SourceDocCount = couch_util:get_value(doc_count, SourceInfo), - wait_target_in_sync_loop(SourceDocCount, Target, 300). - - -wait_target_in_sync_loop(_DocCount, _TargetName, 0) -> - erlang:error({assertion_failed, [ - {module, ?MODULE}, {line, ?LINE}, - {reason, "Could not get source and target databases in sync"} - ]}); - -wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) -> - {ok, Target} = couch_db:open_int(TargetName, []), - {ok, TargetInfo} = couch_db:get_db_info(Target), - ok = couch_db:close(Target), - TargetDocCount = couch_util:get_value(doc_count, TargetInfo), - case TargetDocCount == DocCount of - true -> - true; - false -> - ok = timer:sleep(?DELAY), - wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft - 1) - end. - - -replicate(Source, Target) -> - SrcUrl = couch_replicator_test_helper:db_url(Source), - TgtUrl = couch_replicator_test_helper:db_url(Target), - RepObject = {[ - {<<"source">>, SrcUrl}, - {<<"target">>, TgtUrl}, - {<<"continuous">>, true} - ]}, - {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_USER), - ok = couch_replicator_scheduler:add_job(Rep), - couch_replicator_scheduler:reschedule(), - Pid = couch_replicator_test_helper:get_pid(Rep#rep.id), - {ok, Pid, Rep#rep.id}. diff --git a/src/couch_replicator/test/couch_replicator_selector_tests.erl b/src/couch_replicator/test/couch_replicator_selector_tests.erl deleted file mode 100644 index a7f4c5df3..000000000 --- a/src/couch_replicator/test/couch_replicator_selector_tests.erl +++ /dev/null @@ -1,121 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_replicator_selector_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include_lib("couch_replicator/src/couch_replicator.hrl"). - - -setup(_) -> - Ctx = test_util:start_couch([couch_replicator]), - Source = create_db(), - create_docs(Source), - Target = create_db(), - {Ctx, {Source, Target}}. - -teardown(_, {Ctx, {Source, Target}}) -> - delete_db(Source), - delete_db(Target), - ok = application:stop(couch_replicator), - ok = test_util:stop_couch(Ctx). - -selector_replication_test_() -> - Pairs = [{local, local}, {local, remote}, - {remote, local}, {remote, remote}], - { - "Selector filtered replication tests", - { - foreachx, - fun setup/1, fun teardown/2, - [{Pair, fun should_succeed/2} || Pair <- Pairs] - } - }. - -should_succeed({From, To}, {_Ctx, {Source, Target}}) -> - RepObject = {[ - {<<"source">>, db_url(From, Source)}, - {<<"target">>, db_url(To, Target)}, - {<<"selector">>, {[{<<"_id">>, <<"doc2">>}]}} - ]}, - {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), - %% FilteredFun is an Erlang version of following mango selector - FilterFun = fun(_DocId, {Props}) -> - couch_util:get_value(<<"_id">>, Props) == <<"doc2">> - end, - {ok, TargetDbInfo, AllReplies} = compare_dbs(Source, Target, FilterFun), - {lists:flatten(io_lib:format("~p -> ~p", [From, To])), [ - {"Target DB has proper number of docs", - ?_assertEqual(1, proplists:get_value(doc_count, TargetDbInfo))}, - {"All the docs selected as expected", - ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))} - ]}. - -compare_dbs(Source, Target, FilterFun) -> - {ok, SourceDb} = couch_db:open_int(Source, []), - {ok, TargetDb} = couch_db:open_int(Target, []), - {ok, TargetDbInfo} = couch_db:get_db_info(TargetDb), - Fun = fun(FullDocInfo, Acc) -> - {ok, DocId, SourceDoc} = read_doc(SourceDb, FullDocInfo), - TargetReply = read_doc(TargetDb, DocId), - case FilterFun(DocId, SourceDoc) of - true -> - ValidReply = {ok, DocId, SourceDoc} == TargetReply, - {ok, [ValidReply|Acc]}; - false -> - ValidReply = {not_found, missing} == TargetReply, - {ok, [ValidReply|Acc]} - end - end, - {ok, AllReplies} = couch_db:fold_docs(SourceDb, Fun, [], []), - ok = couch_db:close(SourceDb), - ok = couch_db:close(TargetDb), - {ok, TargetDbInfo, AllReplies}. - -read_doc(Db, DocIdOrInfo) -> - case couch_db:open_doc(Db, DocIdOrInfo) of - {ok, Doc} -> - {Props} = couch_doc:to_json_obj(Doc, [attachments]), - DocId = couch_util:get_value(<<"_id">>, Props), - {ok, DocId, {Props}}; - Error -> - Error - end. - -create_db() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = couch_db:close(Db), - DbName. - -create_docs(DbName) -> - {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - Doc1 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc1">>} - ]}), - Doc2 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc2">>} - ]}), - {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2]), - couch_db:ensure_full_commit(Db), - couch_db:close(Db). - -delete_db(DbName) -> - ok = couch_server:delete(DbName, [?ADMIN_CTX]). - -db_url(local, DbName) -> - DbName; -db_url(remote, DbName) -> - Addr = config:get("httpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(couch_httpd, port), - ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])). diff --git a/src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl b/src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl deleted file mode 100644 index af3a285f5..000000000 --- a/src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl +++ /dev/null @@ -1,188 +0,0 @@ --module(couch_replicator_small_max_request_size_target). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --import(couch_replicator_test_helper, [ - db_url/1, - replicate/1, - compare_dbs/3 -]). - --define(TIMEOUT_EUNIT, 360). - - -setup() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = couch_db:close(Db), - DbName. - - -setup(local) -> - setup(); - -setup(remote) -> - {remote, setup()}; - -setup({A, B}) -> - Ctx = test_util:start_couch([couch_replicator]), - config:set("httpd", "max_http_request_size", "10000", false), - Source = setup(A), - Target = setup(B), - {Ctx, {Source, Target}}. - - -teardown({remote, DbName}) -> - teardown(DbName); -teardown(DbName) -> - ok = couch_server:delete(DbName, [?ADMIN_CTX]), - ok. - -teardown(_, {Ctx, {Source, Target}}) -> - teardown(Source), - teardown(Target), - ok = application:stop(couch_replicator), - ok = test_util:stop_couch(Ctx). - - -reduce_max_request_size_test_() -> - Pairs = [{local, remote}, {remote, remote}], - { - "Replicate docs when target has a small max_http_request_size", - { - foreachx, - fun setup/1, fun teardown/2, - [{Pair, fun should_replicate_all_docs/2} - || Pair <- Pairs] - ++ [{Pair, fun should_replicate_one/2} - || Pair <- Pairs] - % Disabled. See issue 574. Sometimes PUTs with a doc and - % attachment which exceed maximum request size are simply - % closed instead of returning a 413 request. That makes these - % tests flaky. - ++ [{Pair, fun should_replicate_one_with_attachment/2} - || Pair <- Pairs] - } - }. - - -% Test documents which are below max_http_request_size but when batched, batch size -% will be greater than max_http_request_size. Replicator could automatically split -% the batch into smaller batches and POST those separately. -should_replicate_all_docs({From, To}, {_Ctx, {Source, Target}}) -> - {lists:flatten(io_lib:format("~p -> ~p", [From, To])), - {inorder, [should_populate_source(Source), - should_replicate(Source, Target), - should_compare_databases(Source, Target, [])]}}. - - -% If a document is too large to post as a single request, that document is -% skipped but replication overall will make progress and not crash. -should_replicate_one({From, To}, {_Ctx, {Source, Target}}) -> - {lists:flatten(io_lib:format("~p -> ~p", [From, To])), - {inorder, [should_populate_source_one_large_one_small(Source), - should_replicate(Source, Target), - should_compare_databases(Source, Target, [<<"doc0">>])]}}. - - -% If a document has an attachment > 64 * 1024 bytes, replicator will switch to -% POST-ing individual documents directly and skip bulk_docs. Test that case -% separately -% See note in main test function why this was disabled. -should_replicate_one_with_attachment({From, To}, {_Ctx, {Source, Target}}) -> - {lists:flatten(io_lib:format("~p -> ~p", [From, To])), - {inorder, [should_populate_source_one_large_attachment(Source), - should_populate_source(Source), - should_replicate(Source, Target), - should_compare_databases(Source, Target, [<<"doc0">>])]}}. - - -should_populate_source({remote, Source}) -> - should_populate_source(Source); - -should_populate_source(Source) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(add_docs(Source, 5, 3000, 0))}. - - -should_populate_source_one_large_one_small({remote, Source}) -> - should_populate_source_one_large_one_small(Source); - -should_populate_source_one_large_one_small(Source) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_one_small(Source, 12000, 3000))}. - - -should_populate_source_one_large_attachment({remote, Source}) -> - should_populate_source_one_large_attachment(Source); - -should_populate_source_one_large_attachment(Source) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_attachment(Source, 70000, 70000))}. - - -should_replicate({remote, Source}, Target) -> - should_replicate(db_url(Source), Target); - -should_replicate(Source, {remote, Target}) -> - should_replicate(Source, db_url(Target)); - -should_replicate(Source, Target) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}. - - -should_compare_databases({remote, Source}, Target, ExceptIds) -> - should_compare_databases(Source, Target, ExceptIds); - -should_compare_databases(Source, {remote, Target}, ExceptIds) -> - should_compare_databases(Source, Target, ExceptIds); - -should_compare_databases(Source, Target, ExceptIds) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target, ExceptIds))}. - - -binary_chunk(Size) when is_integer(Size), Size > 0 -> - << <<"x">> || _ <- lists:seq(1, Size) >>. - - -add_docs(DbName, DocCount, DocSize, AttSize) -> - [begin - DocId = iolist_to_binary(["doc", integer_to_list(Id)]), - add_doc(DbName, DocId, DocSize, AttSize) - end || Id <- lists:seq(1, DocCount)], - ok. - - -one_large_one_small(DbName, Large, Small) -> - add_doc(DbName, <<"doc0">>, Large, 0), - add_doc(DbName, <<"doc1">>, Small, 0). - - -one_large_attachment(DbName, Size, AttSize) -> - add_doc(DbName, <<"doc0">>, Size, AttSize). - - -add_doc(DbName, DocId, Size, AttSize) when is_binary(DocId) -> - {ok, Db} = couch_db:open_int(DbName, []), - Doc0 = #doc{id = DocId, body = {[{<<"x">>, binary_chunk(Size)}]}}, - Doc = Doc0#doc{atts = atts(AttSize)}, - {ok, _} = couch_db:update_doc(Db, Doc, []), - couch_db:close(Db). - - -atts(0) -> - []; - -atts(Size) -> - [couch_att:new([ - {name, <<"att1">>}, - {type, <<"app/binary">>}, - {att_len, Size}, - {data, fun(Bytes) -> binary_chunk(Bytes) end} - ])]. - - -replicate(Source, Target) -> - replicate({[ - {<<"source">>, Source}, - {<<"target">>, Target}, - {<<"worker_processes">>, "1"} % This make batch_size predictable - ]}). diff --git a/src/couch_replicator/test/couch_replicator_test_helper.erl b/src/couch_replicator/test/couch_replicator_test_helper.erl deleted file mode 100644 index fd0409164..000000000 --- a/src/couch_replicator/test/couch_replicator_test_helper.erl +++ /dev/null @@ -1,135 +0,0 @@ --module(couch_replicator_test_helper). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include_lib("couch_replicator/src/couch_replicator.hrl"). - --export([ - compare_dbs/2, - compare_dbs/3, - db_url/1, - replicate/1, - get_pid/1, - replicate/2 -]). - - -compare_dbs(Source, Target) -> - compare_dbs(Source, Target, []). - - -compare_dbs(Source, Target, ExceptIds) -> - {ok, SourceDb} = couch_db:open_int(Source, []), - {ok, TargetDb} = couch_db:open_int(Target, []), - - Fun = fun(FullDocInfo, Acc) -> - {ok, DocSource} = couch_db:open_doc(SourceDb, FullDocInfo), - Id = DocSource#doc.id, - case lists:member(Id, ExceptIds) of - true -> - ?assertEqual(not_found, couch_db:get_doc_info(TargetDb, Id)); - false -> - {ok, TDoc} = couch_db:open_doc(TargetDb, Id), - compare_docs(DocSource, TDoc) - end, - {ok, Acc} - end, - - {ok, _} = couch_db:fold_docs(SourceDb, Fun, [], []), - ok = couch_db:close(SourceDb), - ok = couch_db:close(TargetDb). - - -compare_docs(Doc1, Doc2) -> - ?assertEqual(Doc1#doc.body, Doc2#doc.body), - #doc{atts = Atts1} = Doc1, - #doc{atts = Atts2} = Doc2, - ?assertEqual(lists:sort([couch_att:fetch(name, Att) || Att <- Atts1]), - lists:sort([couch_att:fetch(name, Att) || Att <- Atts2])), - FunCompareAtts = fun(Att) -> - AttName = couch_att:fetch(name, Att), - {ok, AttTarget} = find_att(Atts2, AttName), - SourceMd5 = att_md5(Att), - TargetMd5 = att_md5(AttTarget), - case AttName of - <<"att1">> -> - ?assertEqual(gzip, couch_att:fetch(encoding, Att)), - ?assertEqual(gzip, couch_att:fetch(encoding, AttTarget)), - DecSourceMd5 = att_decoded_md5(Att), - DecTargetMd5 = att_decoded_md5(AttTarget), - ?assertEqual(DecSourceMd5, DecTargetMd5); - _ -> - ?assertEqual(identity, couch_att:fetch(encoding, AttTarget)), - ?assertEqual(identity, couch_att:fetch(encoding, AttTarget)) - end, - ?assertEqual(SourceMd5, TargetMd5), - ?assert(is_integer(couch_att:fetch(disk_len, Att))), - ?assert(is_integer(couch_att:fetch(att_len, Att))), - ?assert(is_integer(couch_att:fetch(disk_len, AttTarget))), - ?assert(is_integer(couch_att:fetch(att_len, AttTarget))), - ?assertEqual(couch_att:fetch(disk_len, Att), - couch_att:fetch(disk_len, AttTarget)), - ?assertEqual(couch_att:fetch(att_len, Att), - couch_att:fetch(att_len, AttTarget)), - ?assertEqual(couch_att:fetch(type, Att), - couch_att:fetch(type, AttTarget)), - ?assertEqual(couch_att:fetch(md5, Att), - couch_att:fetch(md5, AttTarget)) - end, - lists:foreach(FunCompareAtts, Atts1). - - -find_att([], _Name) -> - nil; -find_att([Att | Rest], Name) -> - case couch_att:fetch(name, Att) of - Name -> - {ok, Att}; - _ -> - find_att(Rest, Name) - end. - - -att_md5(Att) -> - Md50 = couch_att:foldl( - Att, - fun(Chunk, Acc) -> couch_hash:md5_hash_update(Acc, Chunk) end, - couch_hash:md5_hash_init()), - couch_hash:md5_hash_final(Md50). - -att_decoded_md5(Att) -> - Md50 = couch_att:foldl_decode( - Att, - fun(Chunk, Acc) -> couch_hash:md5_hash_update(Acc, Chunk) end, - couch_hash:md5_hash_init()), - couch_hash:md5_hash_final(Md50). - -db_url(DbName) -> - iolist_to_binary([ - "http://", config:get("httpd", "bind_address", "127.0.0.1"), - ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)), - "/", DbName - ]). - -get_pid(RepId) -> - Pid = global:whereis_name({couch_replicator_scheduler_job,RepId}), - ?assert(is_pid(Pid)), - Pid. - -replicate(Source, Target) -> - replicate({[ - {<<"source">>, Source}, - {<<"target">>, Target} - ]}). - -replicate({[_ | _]} = RepObject) -> - {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_USER), - ok = couch_replicator_scheduler:add_job(Rep), - couch_replicator_scheduler:reschedule(), - Pid = get_pid(Rep#rep.id), - MonRef = erlang:monitor(process, Pid), - receive - {'DOWN', MonRef, process, Pid, _} -> - ok - end, - ok = couch_replicator_scheduler:remove_job(Rep#rep.id). diff --git a/src/couch_replicator/test/couch_replicator_use_checkpoints_tests.erl b/src/couch_replicator/test/couch_replicator_use_checkpoints_tests.erl deleted file mode 100644 index c2fcf8bf1..000000000 --- a/src/couch_replicator/test/couch_replicator_use_checkpoints_tests.erl +++ /dev/null @@ -1,181 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(couch_replicator_use_checkpoints_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --import(couch_replicator_test_helper, [ - db_url/1, - replicate/1 -]). - --define(DOCS_COUNT, 100). --define(TIMEOUT_EUNIT, 30). --define(i2l(I), integer_to_list(I)). --define(io2b(Io), iolist_to_binary(Io)). - - -start(false) -> - fun - ({finished, _, {CheckpointHistory}}) -> - ?assertEqual([{<<"use_checkpoints">>,false}], CheckpointHistory); - (_) -> - ok - end; -start(true) -> - fun - ({finished, _, {CheckpointHistory}}) -> - ?assertNotEqual(false, lists:keyfind(<<"session_id">>, - 1, CheckpointHistory)); - (_) -> - ok - end. - -stop(_, _) -> - ok. - -setup() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = couch_db:close(Db), - DbName. - -setup(local) -> - setup(); -setup(remote) -> - {remote, setup()}; -setup({_, Fun, {A, B}}) -> - Ctx = test_util:start_couch([couch_replicator]), - {ok, Listener} = couch_replicator_notifier:start_link(Fun), - Source = setup(A), - Target = setup(B), - {Ctx, {Source, Target, Listener}}. - -teardown({remote, DbName}) -> - teardown(DbName); -teardown(DbName) -> - ok = couch_server:delete(DbName, [?ADMIN_CTX]), - ok. - -teardown(_, {Ctx, {Source, Target, Listener}}) -> - teardown(Source), - teardown(Target), - - couch_replicator_notifier:stop(Listener), - ok = application:stop(couch_replicator), - ok = test_util:stop_couch(Ctx). - -use_checkpoints_test_() -> - { - "Replication use_checkpoints feature tests", - { - foreachx, - fun start/1, fun stop/2, - [{UseCheckpoints, fun use_checkpoints_tests/2} - || UseCheckpoints <- [false, true]] - } - }. - -use_checkpoints_tests(UseCheckpoints, Fun) -> - Pairs = [{local, local}, {local, remote}, - {remote, local}, {remote, remote}], - { - "use_checkpoints: " ++ atom_to_list(UseCheckpoints), - { - foreachx, - fun setup/1, fun teardown/2, - [{{UseCheckpoints, Fun, Pair}, fun should_test_checkpoints/2} - || Pair <- Pairs] - } - }. - -should_test_checkpoints({UseCheckpoints, _, {From, To}}, {_Ctx, {Source, Target, _}}) -> - should_test_checkpoints(UseCheckpoints, {From, To}, {Source, Target}). -should_test_checkpoints(UseCheckpoints, {From, To}, {Source, Target}) -> - {lists:flatten(io_lib:format("~p -> ~p", [From, To])), - {inorder, [ - should_populate_source(Source, ?DOCS_COUNT), - should_replicate(Source, Target, UseCheckpoints), - should_compare_databases(Source, Target) - ]}}. - -should_populate_source({remote, Source}, DocCount) -> - should_populate_source(Source, DocCount); -should_populate_source(Source, DocCount) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source, DocCount))}. - -should_replicate({remote, Source}, Target, UseCheckpoints) -> - should_replicate(db_url(Source), Target, UseCheckpoints); -should_replicate(Source, {remote, Target}, UseCheckpoints) -> - should_replicate(Source, db_url(Target), UseCheckpoints); -should_replicate(Source, Target, UseCheckpoints) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target, UseCheckpoints))}. - -should_compare_databases({remote, Source}, Target) -> - should_compare_databases(Source, Target); -should_compare_databases(Source, {remote, Target}) -> - should_compare_databases(Source, Target); -should_compare_databases(Source, Target) -> - {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target))}. - - -populate_db(DbName, DocCount) -> - {ok, Db} = couch_db:open_int(DbName, []), - Docs = lists:foldl( - fun(DocIdCounter, Acc) -> - Id = ?io2b(["doc", ?i2l(DocIdCounter)]), - Value = ?io2b(["val", ?i2l(DocIdCounter)]), - Doc = #doc{ - id = Id, - body = {[ {<<"value">>, Value} ]} - }, - [Doc | Acc] - end, - [], lists:seq(1, DocCount)), - {ok, _} = couch_db:update_docs(Db, Docs, []), - ok = couch_db:close(Db). - -compare_dbs(Source, Target) -> - {ok, SourceDb} = couch_db:open_int(Source, []), - {ok, TargetDb} = couch_db:open_int(Target, []), - Fun = fun(FullDocInfo, Acc) -> - {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo), - {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]), - DocId = couch_util:get_value(<<"_id">>, Props), - DocTarget = case couch_db:open_doc(TargetDb, DocId) of - {ok, DocT} -> - DocT; - Error -> - erlang:error( - {assertion_failed, - [{module, ?MODULE}, {line, ?LINE}, - {reason, lists:concat(["Error opening document '", - ?b2l(DocId), "' from target: ", - couch_util:to_list(Error)])}]}) - end, - DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]), - ?assertEqual(DocJson, DocTargetJson), - {ok, Acc} - end, - {ok, _} = couch_db:fold_docs(SourceDb, Fun, [], []), - ok = couch_db:close(SourceDb), - ok = couch_db:close(TargetDb). - -replicate(Source, Target, UseCheckpoints) -> - replicate({[ - {<<"source">>, Source}, - {<<"target">>, Target}, - {<<"use_checkpoints">>, UseCheckpoints} - ]}). - diff --git a/src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl b/src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl new file mode 100644 index 000000000..7fe84d2d9 --- /dev/null +++ b/src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl @@ -0,0 +1,104 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_replicator_attachments_too_large). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("couch_replicator/src/couch_replicator.hrl"). + + +setup(_) -> + Ctx = test_util:start_couch([couch_replicator]), + Source = create_db(), + create_doc_with_attachment(Source, <<"doc">>, 1000), + Target = create_db(), + {Ctx, {Source, Target}}. + + +teardown(_, {Ctx, {Source, Target}}) -> + delete_db(Source), + delete_db(Target), + config:delete("couchdb", "max_attachment_size"), + ok = test_util:stop_couch(Ctx). + + +attachment_too_large_replication_test_() -> + Pairs = [{local, remote}, {remote, local}, {remote, remote}], + { + "Attachment size too large replication tests", + { + foreachx, + fun setup/1, fun teardown/2, + [{Pair, fun should_succeed/2} || Pair <- Pairs] ++ + [{Pair, fun should_fail/2} || Pair <- Pairs] + } + }. + + +should_succeed({From, To}, {_Ctx, {Source, Target}}) -> + RepObject = {[ + {<<"source">>, db_url(From, Source)}, + {<<"target">>, db_url(To, Target)} + ]}, + config:set("couchdb", "max_attachment_size", "1000", _Persist = false), + {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), + ?_assertEqual(ok, couch_replicator_test_helper:compare_dbs(Source, Target)). + + +should_fail({From, To}, {_Ctx, {Source, Target}}) -> + RepObject = {[ + {<<"source">>, db_url(From, Source)}, + {<<"target">>, db_url(To, Target)} + ]}, + config:set("couchdb", "max_attachment_size", "999", _Persist = false), + {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), + ?_assertError({badmatch, {not_found, missing}}, + couch_replicator_test_helper:compare_dbs(Source, Target)). + + +create_db() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + DbName. + + +create_doc_with_attachment(DbName, DocId, AttSize) -> + {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), + Doc = #doc{id = DocId, atts = att(AttSize)}, + {ok, _} = couch_db:update_doc(Db, Doc, []), + couch_db:close(Db), + ok. + + +att(Size) when is_integer(Size), Size >= 1 -> + [couch_att:new([ + {name, <<"att">>}, + {type, <<"app/binary">>}, + {att_len, Size}, + {data, fun(_Bytes) -> + << <<"x">> || _ <- lists:seq(1, Size) >> + end} + ])]. + + +delete_db(DbName) -> + ok = couch_server:delete(DbName, [?ADMIN_CTX]). + + +db_url(local, DbName) -> + DbName; +db_url(remote, DbName) -> + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(couch_httpd, port), + ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])). diff --git a/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl new file mode 100644 index 000000000..7cc530c19 --- /dev/null +++ b/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl @@ -0,0 +1,455 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_replicator_compact_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("couch_replicator/src/couch_replicator.hrl"). + +-import(couch_replicator_test_helper, [ + db_url/1, + get_pid/1 +]). + +-define(ATTFILE, filename:join([?FIXTURESDIR, "logo.png"])). +-define(DELAY, 500). +-define(TIMEOUT, 360000). +-define(TIMEOUT_WRITER, 100000). +-define(TIMEOUT_EUNIT, ?TIMEOUT div 1000 + 70). + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + DbName. + +setup(local) -> + setup(); +setup(remote) -> + {remote, setup()}; +setup({A, B}) -> + Ctx = test_util:start_couch([couch_replicator]), + Source = setup(A), + Target = setup(B), + {Ctx, {Source, Target}}. + +teardown({remote, DbName}) -> + teardown(DbName); +teardown(DbName) -> + ok = couch_server:delete(DbName, [?ADMIN_CTX]), + ok. + +teardown(_, {Ctx, {Source, Target}}) -> + teardown(Source), + teardown(Target), + ok = application:stop(couch_replicator), + ok = test_util:stop_couch(Ctx). + +compact_test_() -> + Pairs = [{local, local}, {local, remote}, + {remote, local}, {remote, remote}], + { + "Compaction during replication tests", + { + foreachx, + fun setup/1, fun teardown/2, + [{Pair, fun should_populate_replicate_compact/2} + || Pair <- Pairs] + } + }. + + +should_populate_replicate_compact({From, To}, {_Ctx, {Source, Target}}) -> + {ok, RepPid, RepId} = replicate(Source, Target), + {lists:flatten(io_lib:format("~p -> ~p", [From, To])), + {inorder, [ + should_run_replication(RepPid, RepId, Source, Target), + should_all_processes_be_alive(RepPid, Source, Target), + should_populate_and_compact(RepPid, Source, Target, 50, 3), + should_wait_target_in_sync(Source, Target), + should_ensure_replication_still_running(RepPid, RepId, Source, Target), + should_cancel_replication(RepId, RepPid), + should_compare_databases(Source, Target) + ]}}. + +should_all_processes_be_alive(RepPid, Source, Target) -> + ?_test(begin + {ok, SourceDb} = reopen_db(Source), + {ok, TargetDb} = reopen_db(Target), + ?assert(is_process_alive(RepPid)), + ?assert(is_process_alive(couch_db:get_pid(SourceDb))), + ?assert(is_process_alive(couch_db:get_pid(TargetDb))) + end). + +should_run_replication(RepPid, RepId, Source, Target) -> + ?_test(check_active_tasks(RepPid, RepId, Source, Target)). + +should_ensure_replication_still_running(RepPid, RepId, Source, Target) -> + ?_test(check_active_tasks(RepPid, RepId, Source, Target)). + +check_active_tasks(RepPid, {BaseId, Ext} = _RepId, Src, Tgt) -> + Source = case Src of + {remote, NameSrc} -> + <<(db_url(NameSrc))/binary, $/>>; + _ -> + Src + end, + Target = case Tgt of + {remote, NameTgt} -> + <<(db_url(NameTgt))/binary, $/>>; + _ -> + Tgt + end, + FullRepId = ?l2b(BaseId ++ Ext), + Pid = ?l2b(pid_to_list(RepPid)), + RepTasks = wait_for_task_status(), + ?assertNotEqual(timeout, RepTasks), + [RepTask] = RepTasks, + ?assertEqual(Pid, couch_util:get_value(pid, RepTask)), + ?assertEqual(FullRepId, couch_util:get_value(replication_id, RepTask)), + ?assertEqual(true, couch_util:get_value(continuous, RepTask)), + ?assertEqual(Source, couch_util:get_value(source, RepTask)), + ?assertEqual(Target, couch_util:get_value(target, RepTask)), + ?assert(is_integer(couch_util:get_value(docs_read, RepTask))), + ?assert(is_integer(couch_util:get_value(docs_written, RepTask))), + ?assert(is_integer(couch_util:get_value(doc_write_failures, RepTask))), + ?assert(is_integer(couch_util:get_value(revisions_checked, RepTask))), + ?assert(is_integer(couch_util:get_value(missing_revisions_found, RepTask))), + ?assert(is_integer(couch_util:get_value(checkpointed_source_seq, RepTask))), + ?assert(is_integer(couch_util:get_value(source_seq, RepTask))), + Pending = couch_util:get_value(changes_pending, RepTask), + ?assert(is_integer(Pending)). + +replication_tasks() -> + lists:filter(fun(P) -> + couch_util:get_value(type, P) =:= replication + end, couch_task_status:all()). + + +wait_for_task_status() -> + test_util:wait(fun() -> + case replication_tasks() of + [] -> + wait; + Tasks -> + Tasks + end + end). + +should_cancel_replication(RepId, RepPid) -> + ?_assertNot(begin + ok = couch_replicator_scheduler:remove_job(RepId), + is_process_alive(RepPid) + end). + +should_populate_and_compact(RepPid, Source, Target, BatchSize, Rounds) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(begin + {ok, SourceDb0} = reopen_db(Source), + Writer = spawn_writer(SourceDb0), + lists:foreach( + fun(N) -> + {ok, SourceDb} = reopen_db(Source), + {ok, TargetDb} = reopen_db(Target), + pause_writer(Writer), + + compact_db("source", SourceDb), + ?assert(is_process_alive(RepPid)), + ?assert(is_process_alive(couch_db:get_pid(SourceDb))), + wait_for_compaction("source", SourceDb), + + compact_db("target", TargetDb), + ?assert(is_process_alive(RepPid)), + ?assert(is_process_alive(couch_db:get_pid(TargetDb))), + wait_for_compaction("target", TargetDb), + + {ok, SourceDb2} = reopen_db(SourceDb), + {ok, TargetDb2} = reopen_db(TargetDb), + + resume_writer(Writer), + wait_writer(Writer, BatchSize * N), + + compact_db("source", SourceDb2), + ?assert(is_process_alive(RepPid)), + ?assert(is_process_alive(couch_db:get_pid(SourceDb2))), + pause_writer(Writer), + wait_for_compaction("source", SourceDb2), + resume_writer(Writer), + + compact_db("target", TargetDb2), + ?assert(is_process_alive(RepPid)), + ?assert(is_process_alive(couch_db:get_pid(TargetDb2))), + pause_writer(Writer), + wait_for_compaction("target", TargetDb2), + resume_writer(Writer) + end, lists:seq(1, Rounds)), + stop_writer(Writer) + end)}. + +should_wait_target_in_sync({remote, Source}, Target) -> + should_wait_target_in_sync(Source, Target); +should_wait_target_in_sync(Source, {remote, Target}) -> + should_wait_target_in_sync(Source, Target); +should_wait_target_in_sync(Source, Target) -> + {timeout, ?TIMEOUT_EUNIT, ?_assert(begin + {ok, SourceDb} = couch_db:open_int(Source, []), + {ok, SourceInfo} = couch_db:get_db_info(SourceDb), + ok = couch_db:close(SourceDb), + SourceDocCount = couch_util:get_value(doc_count, SourceInfo), + wait_target_in_sync_loop(SourceDocCount, Target, 300) + end)}. + +wait_target_in_sync_loop(_DocCount, _TargetName, 0) -> + erlang:error( + {assertion_failed, + [{module, ?MODULE}, {line, ?LINE}, + {reason, "Could not get source and target databases in sync"}]}); +wait_target_in_sync_loop(DocCount, {remote, TargetName}, RetriesLeft) -> + wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft); +wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) -> + {ok, Target} = couch_db:open_int(TargetName, []), + {ok, TargetInfo} = couch_db:get_db_info(Target), + ok = couch_db:close(Target), + TargetDocCount = couch_util:get_value(doc_count, TargetInfo), + case TargetDocCount == DocCount of + true -> + true; + false -> + ok = timer:sleep(?DELAY), + wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft - 1) + end. + +should_compare_databases({remote, Source}, Target) -> + should_compare_databases(Source, Target); +should_compare_databases(Source, {remote, Target}) -> + should_compare_databases(Source, Target); +should_compare_databases(Source, Target) -> + {timeout, 35, ?_test(begin + {ok, SourceDb} = couch_db:open_int(Source, []), + {ok, TargetDb} = couch_db:open_int(Target, []), + Fun = fun(FullDocInfo, Acc) -> + {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo), + {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]), + DocId = couch_util:get_value(<<"_id">>, Props), + DocTarget = case couch_db:open_doc(TargetDb, DocId) of + {ok, DocT} -> + DocT; + Error -> + erlang:error( + {assertion_failed, + [{module, ?MODULE}, {line, ?LINE}, + {reason, lists:concat(["Error opening document '", + ?b2l(DocId), "' from target: ", + couch_util:to_list(Error)])}]}) + end, + DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]), + ?assertEqual(DocJson, DocTargetJson), + {ok, Acc} + end, + {ok, _} = couch_db:fold_docs(SourceDb, Fun, [], []), + ok = couch_db:close(SourceDb), + ok = couch_db:close(TargetDb) + end)}. + + +reopen_db({remote, Db}) -> + reopen_db(Db); +reopen_db(DbName) when is_binary(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + ok = couch_db:close(Db), + {ok, Db}; +reopen_db(Db) -> + reopen_db(couch_db:name(Db)). + + +compact_db(Type, Db0) -> + Name = couch_db:name(Db0), + {ok, Db} = couch_db:open_int(Name, []), + {ok, CompactPid} = couch_db:start_compact(Db), + MonRef = erlang:monitor(process, CompactPid), + receive + {'DOWN', MonRef, process, CompactPid, normal} -> + ok; + {'DOWN', MonRef, process, CompactPid, noproc} -> + ok; + {'DOWN', MonRef, process, CompactPid, Reason} -> + erlang:error( + {assertion_failed, + [{module, ?MODULE}, {line, ?LINE}, + {reason, + lists:concat(["Error compacting ", Type, " database ", + ?b2l(Name), ": ", + couch_util:to_list(Reason)])}]}) + after ?TIMEOUT -> + erlang:error( + {assertion_failed, + [{module, ?MODULE}, {line, ?LINE}, + {reason, lists:concat(["Compaction for ", Type, " database ", + ?b2l(Name), " didn't finish"])}]}) + end, + ok = couch_db:close(Db). + +wait_for_compaction(Type, Db) -> + case couch_db:wait_for_compaction(Db) of + ok -> + ok; + {error, noproc} -> + ok; + {error, Reason} -> + erlang:error( + {assertion_failed, + [{module, ?MODULE}, {line, ?LINE}, + {reason, lists:concat(["Compaction of ", Type, + " database failed with: ", Reason])}]}) + end. + +replicate({remote, Db}, Target) -> + replicate(db_url(Db), Target); + +replicate(Source, {remote, Db}) -> + replicate(Source, db_url(Db)); + +replicate(Source, Target) -> + RepObject = {[ + {<<"source">>, Source}, + {<<"target">>, Target}, + {<<"continuous">>, true} + ]}, + {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_USER), + ok = couch_replicator_scheduler:add_job(Rep), + couch_replicator_scheduler:reschedule(), + Pid = get_pid(Rep#rep.id), + {ok, Pid, Rep#rep.id}. + + +wait_writer(Pid, NumDocs) -> + case get_writer_num_docs_written(Pid) of + N when N >= NumDocs -> + ok; + _ -> + wait_writer(Pid, NumDocs) + end. + +spawn_writer(Db) -> + Parent = self(), + Pid = spawn(fun() -> writer_loop(Db, Parent, 0) end), + Pid. + + +pause_writer(Pid) -> + Ref = make_ref(), + Pid ! {pause, Ref}, + receive + {paused, Ref} -> + ok + after ?TIMEOUT_WRITER -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, "Failed to pause source database writer"}]}) + end. + +resume_writer(Pid) -> + Ref = make_ref(), + Pid ! {continue, Ref}, + receive + {ok, Ref} -> + ok + after ?TIMEOUT_WRITER -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, "Failed to pause source database writer"}]}) + end. + +get_writer_num_docs_written(Pid) -> + Ref = make_ref(), + Pid ! {get_count, Ref}, + receive + {count, Ref, Count} -> + Count + after ?TIMEOUT_WRITER -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, "Timeout getting number of documents written" + " from source database writer"}]}) + end. + +stop_writer(Pid) -> + Ref = make_ref(), + Pid ! {stop, Ref}, + receive + {stopped, Ref, DocsWritten} -> + MonRef = erlang:monitor(process, Pid), + receive + {'DOWN', MonRef, process, Pid, _Reason} -> + DocsWritten + after ?TIMEOUT -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, "Timeout stopping source database writer"}]}) + end + after ?TIMEOUT_WRITER -> + erlang:error({assertion_failed, + [{module, ?MODULE}, + {line, ?LINE}, + {reason, "Timeout stopping source database writer"}]}) + end. + +writer_loop(Db0, Parent, Counter) -> + DbName = couch_db:name(Db0), + {ok, Data} = file:read_file(?ATTFILE), + maybe_pause(Parent, Counter), + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, ?l2b(integer_to_list(Counter + 1))}, + {<<"value">>, Counter + 1}, + {<<"_attachments">>, {[ + {<<"icon1.png">>, {[ + {<<"data">>, base64:encode(Data)}, + {<<"content_type">>, <<"image/png">>} + ]}}, + {<<"icon2.png">>, {[ + {<<"data">>, base64:encode(iolist_to_binary([Data, Data]))}, + {<<"content_type">>, <<"image/png">>} + ]}} + ]}} + ]}), + maybe_pause(Parent, Counter), + {ok, Db} = couch_db:open_int(DbName, []), + {ok, _} = couch_db:update_doc(Db, Doc, []), + ok = couch_db:close(Db), + receive + {get_count, Ref} -> + Parent ! {count, Ref, Counter + 1}, + writer_loop(Db, Parent, Counter + 1); + {stop, Ref} -> + Parent ! {stopped, Ref, Counter + 1} + after 0 -> + timer:sleep(?DELAY), + writer_loop(Db, Parent, Counter + 1) + end. + +maybe_pause(Parent, Counter) -> + receive + {get_count, Ref} -> + Parent ! {count, Ref, Counter}; + {pause, Ref} -> + Parent ! {paused, Ref}, + receive + {continue, Ref2} -> + Parent ! {ok, Ref2} + end + after 0 -> + ok + end. diff --git a/src/couch_replicator/test/eunit/couch_replicator_connection_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_connection_tests.erl new file mode 100644 index 000000000..e75cc5a63 --- /dev/null +++ b/src/couch_replicator/test/eunit/couch_replicator_connection_tests.erl @@ -0,0 +1,241 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_replicator_connection_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 1000). + + +setup() -> + Host = config:get("httpd", "bind_address", "127.0.0.1"), + Port = config:get("httpd", "port", "5984"), + {Host, Port}. + +teardown(_) -> + ok. + + +httpc_pool_test_() -> + { + "replicator connection sharing tests", + { + setup, + fun() -> test_util:start_couch([couch_replicator]) end, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun connections_shared_after_release/1, + fun connections_not_shared_after_owner_death/1, + fun idle_connections_closed/1, + fun test_owner_monitors/1, + fun worker_discards_creds_on_create/1, + fun worker_discards_url_creds_after_request/1, + fun worker_discards_creds_in_headers_after_request/1, + fun worker_discards_proxy_creds_after_request/1 + ] + } + } + }. + + +connections_shared_after_release({Host, Port}) -> + ?_test(begin + URL = "http://" ++ Host ++ ":" ++ Port, + Self = self(), + {ok, Pid} = couch_replicator_connection:acquire(URL), + couch_replicator_connection:release(Pid), + spawn(fun() -> + Self ! couch_replicator_connection:acquire(URL) + end), + receive + {ok, Pid2} -> + ?assertEqual(Pid, Pid2) + end + end). + + +connections_not_shared_after_owner_death({Host, Port}) -> + ?_test(begin + URL = "http://" ++ Host ++ ":" ++ Port, + Self = self(), + spawn(fun() -> + Self ! couch_replicator_connection:acquire(URL), + error("simulate division by zero without compiler warning") + end), + receive + {ok, Pid} -> + {ok, Pid2} = couch_replicator_connection:acquire(URL), + ?assertNotEqual(Pid, Pid2), + MRef = monitor(process, Pid), + receive {'DOWN', MRef, process, Pid, _Reason} -> + ?assert(not is_process_alive(Pid)); + Other -> throw(Other) + end + end + end). + + +idle_connections_closed({Host, Port}) -> + ?_test(begin + URL = "http://" ++ Host ++ ":" ++ Port, + {ok, Pid} = couch_replicator_connection:acquire(URL), + couch_replicator_connection ! close_idle_connections, + ?assert(ets:member(couch_replicator_connection, Pid)), + % block until idle connections have closed + sys:get_status(couch_replicator_connection), + couch_replicator_connection:release(Pid), + couch_replicator_connection ! close_idle_connections, + % block until idle connections have closed + sys:get_status(couch_replicator_connection), + ?assert(not ets:member(couch_replicator_connection, Pid)) + end). + + +test_owner_monitors({Host, Port}) -> + ?_test(begin + URL = "http://" ++ Host ++ ":" ++ Port, + {ok, Worker0} = couch_replicator_connection:acquire(URL), + assert_monitors_equal([{process, self()}]), + couch_replicator_connection:release(Worker0), + assert_monitors_equal([]), + {Workers, Monitors} = lists:foldl(fun(_, {WAcc, MAcc}) -> + {ok, Worker1} = couch_replicator_connection:acquire(URL), + MAcc1 = [{process, self()} | MAcc], + assert_monitors_equal(MAcc1), + {[Worker1 | WAcc], MAcc1} + end, {[], []}, lists:seq(1,5)), + lists:foldl(fun(Worker2, Acc) -> + [_ | NewAcc] = Acc, + couch_replicator_connection:release(Worker2), + assert_monitors_equal(NewAcc), + NewAcc + end, Monitors, Workers) + end). + + +worker_discards_creds_on_create({Host, Port}) -> + ?_test(begin + {User, Pass, B64Auth} = user_pass(), + URL = "http://" ++ User ++ ":" ++ Pass ++ "@" ++ Host ++ ":" ++ Port, + {ok, WPid} = couch_replicator_connection:acquire(URL), + Internals = worker_internals(WPid), + ?assert(string:str(Internals, B64Auth) =:= 0), + ?assert(string:str(Internals, Pass) =:= 0) + end). + + +worker_discards_url_creds_after_request({Host, _}) -> + ?_test(begin + {User, Pass, B64Auth} = user_pass(), + {Port, ServerPid} = server(), + PortStr = integer_to_list(Port), + URL = "http://" ++ User ++ ":" ++ Pass ++ "@" ++ Host ++ ":" ++ PortStr, + {ok, WPid} = couch_replicator_connection:acquire(URL), + ?assertMatch({ok, "200", _, _}, send_req(WPid, URL, [], [])), + Internals = worker_internals(WPid), + ?assert(string:str(Internals, B64Auth) =:= 0), + ?assert(string:str(Internals, Pass) =:= 0), + couch_replicator_connection:release(WPid), + unlink(ServerPid), + exit(ServerPid, kill) + end). + + +worker_discards_creds_in_headers_after_request({Host, _}) -> + ?_test(begin + {_User, Pass, B64Auth} = user_pass(), + {Port, ServerPid} = server(), + PortStr = integer_to_list(Port), + URL = "http://" ++ Host ++ ":" ++ PortStr, + {ok, WPid} = couch_replicator_connection:acquire(URL), + Headers = [{"Authorization", "Basic " ++ B64Auth}], + ?assertMatch({ok, "200", _, _}, send_req(WPid, URL, Headers, [])), + Internals = worker_internals(WPid), + ?assert(string:str(Internals, B64Auth) =:= 0), + ?assert(string:str(Internals, Pass) =:= 0), + couch_replicator_connection:release(WPid), + unlink(ServerPid), + exit(ServerPid, kill) + end). + + +worker_discards_proxy_creds_after_request({Host, _}) -> + ?_test(begin + {User, Pass, B64Auth} = user_pass(), + {Port, ServerPid} = server(), + PortStr = integer_to_list(Port), + URL = "http://" ++ Host ++ ":" ++ PortStr, + {ok, WPid} = couch_replicator_connection:acquire(URL), + Opts = [ + {proxy_host, Host}, + {proxy_port, Port}, + {proxy_user, User}, + {proxy_pass, Pass} + ], + ?assertMatch({ok, "200", _, _}, send_req(WPid, URL, [], Opts)), + Internals = worker_internals(WPid), + ?assert(string:str(Internals, B64Auth) =:= 0), + ?assert(string:str(Internals, Pass) =:= 0), + couch_replicator_connection:release(WPid), + unlink(ServerPid), + exit(ServerPid, kill) + end). + + +send_req(WPid, URL, Headers, Opts) -> + ibrowse:send_req_direct(WPid, URL, Headers, get, [], Opts). + + +user_pass() -> + User = "specialuser", + Pass = "averysecretpassword", + B64Auth = ibrowse_lib:encode_base64(User ++ ":" ++ Pass), + {User, Pass, B64Auth}. + + +worker_internals(Pid) -> + Dict = io_lib:format("~p", [erlang:process_info(Pid, dictionary)]), + State = io_lib:format("~p", [sys:get_state(Pid)]), + lists:flatten([Dict, State]). + + +server() -> + {ok, LSock} = gen_tcp:listen(0, [{recbuf, 256}, {active, false}]), + {ok, LPort} = inet:port(LSock), + SPid = spawn_link(fun() -> server_responder(LSock) end), + {LPort, SPid}. + + +server_responder(LSock) -> + {ok, Sock} = gen_tcp:accept(LSock), + case gen_tcp:recv(Sock, 0) of + {ok, Data} -> + % sanity check that all the request data was received + ?assert(lists:prefix("GET ", Data)), + ?assert(lists:suffix("\r\n\r\n", Data)), + Res = ["HTTP/1.1 200 OK", "Content-Length: 0", "\r\n"], + ok = gen_tcp:send(Sock, string:join(Res, "\r\n")); + Other -> + gen_tcp:close(Sock), + throw({replication_eunit_tcp_server_crashed, Other}) + end, + server_responder(LSock). + + +assert_monitors_equal(ShouldBe) -> + sys:get_status(couch_replicator_connection), + {monitors, Monitors} = process_info(whereis(couch_replicator_connection), monitors), + ?assertEqual(Monitors, ShouldBe). diff --git a/src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl new file mode 100644 index 000000000..63310d39e --- /dev/null +++ b/src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl @@ -0,0 +1,143 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_replicator_create_target_with_options_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("couch_replicator/src/couch_replicator.hrl"). + +-define(USERNAME, "rep_admin"). +-define(PASSWORD, "secret"). + +setup() -> + Ctx = test_util:start_couch([fabric, mem3, couch_replicator, chttpd]), + Hashed = couch_passwords:hash_admin_password(?PASSWORD), + ok = config:set("admins", ?USERNAME, ?b2l(Hashed), _Persist=false), + Source = ?tempdb(), + Target = ?tempdb(), + {Ctx, {Source, Target}}. + + +teardown({Ctx, {_Source, _Target}}) -> + config:delete("admins", ?USERNAME), + ok = test_util:stop_couch(Ctx). + + +create_target_with_options_replication_test_() -> + { + "Create target with range partitions tests", + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_create_target_with_q_4/1, + fun should_create_target_with_q_2_n_1/1, + fun should_create_target_with_default/1, + fun should_not_create_target_with_q_any/1 + ] + } + }. + + +should_create_target_with_q_4({_Ctx, {Source, Target}}) -> + RepObject = {[ + {<<"source">>, db_url(Source)}, + {<<"target">>, db_url(Target)}, + {<<"create_target">>, true}, + {<<"create_target_params">>, {[{<<"q">>, <<"4">>}]}} + ]}, + create_db(Source), + create_doc(Source), + {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), + + {ok, TargetInfo} = fabric:get_db_info(Target), + {ClusterInfo} = couch_util:get_value(cluster, TargetInfo), + delete_db(Source), + delete_db(Target), + ?_assertEqual(4, couch_util:get_value(q, ClusterInfo)). + + +should_create_target_with_q_2_n_1({_Ctx, {Source, Target}}) -> + RepObject = {[ + {<<"source">>, db_url(Source)}, + {<<"target">>, db_url(Target)}, + {<<"create_target">>, true}, + {<<"create_target_params">>, + {[{<<"q">>, <<"2">>}, {<<"n">>, <<"1">>}]}} + ]}, + create_db(Source), + create_doc(Source), + {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), + + {ok, TargetInfo} = fabric:get_db_info(Target), + {ClusterInfo} = couch_util:get_value(cluster, TargetInfo), + delete_db(Source), + delete_db(Target), + [ + ?_assertEqual(2, couch_util:get_value(q, ClusterInfo)), + ?_assertEqual(1, couch_util:get_value(n, ClusterInfo)) + ]. + + +should_create_target_with_default({_Ctx, {Source, Target}}) -> + RepObject = {[ + {<<"source">>, db_url(Source)}, + {<<"target">>, db_url(Target)}, + {<<"create_target">>, true} + ]}, + create_db(Source), + create_doc(Source), + {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), + + {ok, TargetInfo} = fabric:get_db_info(Target), + {ClusterInfo} = couch_util:get_value(cluster, TargetInfo), + Q = config:get("cluster", "q", "8"), + delete_db(Source), + delete_db(Target), + ?_assertEqual(list_to_integer(Q), couch_util:get_value(q, ClusterInfo)). + + +should_not_create_target_with_q_any({_Ctx, {Source, Target}}) -> + RepObject = {[ + {<<"source">>, db_url(Source)}, + {<<"target">>, db_url(Target)}, + {<<"create_target">>, false}, + {<<"create_target_params">>, {[{<<"q">>, <<"1">>}]}} + ]}, + create_db(Source), + create_doc(Source), + {error, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), + DbExist = is_list(catch mem3:shards(Target)), + delete_db(Source), + ?_assertEqual(false, DbExist). + + +create_doc(DbName) -> + Body = {[{<<"foo">>, <<"bar">>}]}, + NewDoc = #doc{body = Body}, + {ok, _} = fabric:update_doc(DbName, NewDoc, [?ADMIN_CTX]). + + +create_db(DbName) -> + ok = fabric:create_db(DbName, [?ADMIN_CTX]). + + +delete_db(DbName) -> + ok = fabric:delete_db(DbName, [?ADMIN_CTX]). + + +db_url(DbName) -> + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(chttpd, port), + ?l2b(io_lib:format("http://~s:~s@~s:~b/~s", [?USERNAME, ?PASSWORD, Addr, + Port, DbName])). diff --git a/src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl new file mode 100644 index 000000000..d34e9f020 --- /dev/null +++ b/src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl @@ -0,0 +1,244 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_replicator_filtered_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("couch_replicator/src/couch_replicator.hrl"). + +-define(DDOC, {[ + {<<"_id">>, <<"_design/filter_ddoc">>}, + {<<"filters">>, {[ + {<<"testfilter">>, <<" + function(doc, req){if (doc.class == 'mammal') return true;} + ">>}, + {<<"queryfilter">>, <<" + function(doc, req) { + if (doc.class && req.query.starts) { + return doc.class.indexOf(req.query.starts) === 0; + } + else { + return false; + } + } + ">>} + ]}}, + {<<"views">>, {[ + {<<"mammals">>, {[ + {<<"map">>, <<" + function(doc) { + if (doc.class == 'mammal') { + emit(doc._id, null); + } + } + ">>} + ]}} + ]}} +]}). + +setup(_) -> + Ctx = test_util:start_couch([couch_replicator]), + Source = create_db(), + create_docs(Source), + Target = create_db(), + {Ctx, {Source, Target}}. + +teardown(_, {Ctx, {Source, Target}}) -> + delete_db(Source), + delete_db(Target), + ok = application:stop(couch_replicator), + ok = test_util:stop_couch(Ctx). + +filtered_replication_test_() -> + Pairs = [{local, local}, {local, remote}, + {remote, local}, {remote, remote}], + { + "Filtered replication tests", + { + foreachx, + fun setup/1, fun teardown/2, + [{Pair, fun should_succeed/2} || Pair <- Pairs] + } + }. + +query_filtered_replication_test_() -> + Pairs = [{local, local}, {local, remote}, + {remote, local}, {remote, remote}], + { + "Filtered with query replication tests", + { + foreachx, + fun setup/1, fun teardown/2, + [{Pair, fun should_succeed_with_query/2} || Pair <- Pairs] + } + }. + +view_filtered_replication_test_() -> + Pairs = [{local, local}], + { + "Filtered with a view replication tests", + { + foreachx, + fun setup/1, fun teardown/2, + [{Pair, fun should_succeed_with_view/2} || Pair <- Pairs] + } + }. + +should_succeed({From, To}, {_Ctx, {Source, Target}}) -> + RepObject = {[ + {<<"source">>, db_url(From, Source)}, + {<<"target">>, db_url(To, Target)}, + {<<"filter">>, <<"filter_ddoc/testfilter">>} + ]}, + {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), + %% FilteredFun is an Erlang version of following JS function + %% function(doc, req){if (doc.class == 'mammal') return true;} + FilterFun = fun(_DocId, {Props}) -> + couch_util:get_value(<<"class">>, Props) == <<"mammal">> + end, + {ok, TargetDbInfo, AllReplies} = compare_dbs(Source, Target, FilterFun), + {lists:flatten(io_lib:format("~p -> ~p", [From, To])), [ + {"Target DB has proper number of docs", + ?_assertEqual(1, proplists:get_value(doc_count, TargetDbInfo))}, + {"Target DB doesn't have deleted docs", + ?_assertEqual(0, proplists:get_value(doc_del_count, TargetDbInfo))}, + {"All the docs filtered as expected", + ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))} + ]}. + +should_succeed_with_query({From, To}, {_Ctx, {Source, Target}}) -> + RepObject = {[ + {<<"source">>, db_url(From, Source)}, + {<<"target">>, db_url(To, Target)}, + {<<"filter">>, <<"filter_ddoc/queryfilter">>}, + {<<"query_params">>, {[ + {<<"starts">>, <<"a">>} + ]}} + ]}, + {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), + FilterFun = fun(_DocId, {Props}) -> + case couch_util:get_value(<<"class">>, Props) of + <<"a", _/binary>> -> true; + _ -> false + end + end, + {ok, TargetDbInfo, AllReplies} = compare_dbs(Source, Target, FilterFun), + {lists:flatten(io_lib:format("~p -> ~p", [From, To])), [ + {"Target DB has proper number of docs", + ?_assertEqual(2, proplists:get_value(doc_count, TargetDbInfo))}, + {"Target DB doesn't have deleted docs", + ?_assertEqual(0, proplists:get_value(doc_del_count, TargetDbInfo))}, + {"All the docs filtered as expected", + ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))} + ]}. + +should_succeed_with_view({From, To}, {_Ctx, {Source, Target}}) -> + RepObject = {[ + {<<"source">>, db_url(From, Source)}, + {<<"target">>, db_url(To, Target)}, + {<<"filter">>, <<"_view">>}, + {<<"query_params">>, {[ + {<<"view">>, <<"filter_ddoc/mammals">>} + ]}} + ]}, + {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), + FilterFun = fun(_DocId, {Props}) -> + couch_util:get_value(<<"class">>, Props) == <<"mammal">> + end, + {ok, TargetDbInfo, AllReplies} = compare_dbs(Source, Target, FilterFun), + {lists:flatten(io_lib:format("~p -> ~p", [From, To])), [ + {"Target DB has proper number of docs", + ?_assertEqual(1, proplists:get_value(doc_count, TargetDbInfo))}, + {"Target DB doesn't have deleted docs", + ?_assertEqual(0, proplists:get_value(doc_del_count, TargetDbInfo))}, + {"All the docs filtered as expected", + ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))} + ]}. + +compare_dbs(Source, Target, FilterFun) -> + {ok, SourceDb} = couch_db:open_int(Source, []), + {ok, TargetDb} = couch_db:open_int(Target, []), + {ok, TargetDbInfo} = couch_db:get_db_info(TargetDb), + Fun = fun(FullDocInfo, Acc) -> + {ok, DocId, SourceDoc} = read_doc(SourceDb, FullDocInfo), + TargetReply = read_doc(TargetDb, DocId), + case FilterFun(DocId, SourceDoc) of + true -> + ValidReply = {ok, DocId, SourceDoc} == TargetReply, + {ok, [ValidReply|Acc]}; + false -> + ValidReply = {not_found, missing} == TargetReply, + {ok, [ValidReply|Acc]} + end + end, + {ok, AllReplies} = couch_db:fold_docs(SourceDb, Fun, [], []), + ok = couch_db:close(SourceDb), + ok = couch_db:close(TargetDb), + {ok, TargetDbInfo, AllReplies}. + +read_doc(Db, DocIdOrInfo) -> + case couch_db:open_doc(Db, DocIdOrInfo) of + {ok, Doc} -> + {Props} = couch_doc:to_json_obj(Doc, [attachments]), + DocId = couch_util:get_value(<<"_id">>, Props), + {ok, DocId, {Props}}; + Error -> + Error + end. + +create_db() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + DbName. + +create_docs(DbName) -> + {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), + DDoc = couch_doc:from_json_obj(?DDOC), + Doc1 = couch_doc:from_json_obj({[ + {<<"_id">>, <<"doc1">>}, + {<<"class">>, <<"mammal">>}, + {<<"value">>, 1} + + ]}), + Doc2 = couch_doc:from_json_obj({[ + {<<"_id">>, <<"doc2">>}, + {<<"class">>, <<"amphibians">>}, + {<<"value">>, 2} + + ]}), + Doc3 = couch_doc:from_json_obj({[ + {<<"_id">>, <<"doc3">>}, + {<<"class">>, <<"reptiles">>}, + {<<"value">>, 3} + + ]}), + Doc4 = couch_doc:from_json_obj({[ + {<<"_id">>, <<"doc4">>}, + {<<"class">>, <<"arthropods">>}, + {<<"value">>, 2} + + ]}), + {ok, _} = couch_db:update_docs(Db, [DDoc, Doc1, Doc2, Doc3, Doc4]), + couch_db:ensure_full_commit(Db), + couch_db:close(Db). + +delete_db(DbName) -> + ok = couch_server:delete(DbName, [?ADMIN_CTX]). + +db_url(local, DbName) -> + DbName; +db_url(remote, DbName) -> + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(couch_httpd, port), + ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])). diff --git a/src/couch_replicator/test/eunit/couch_replicator_httpc_pool_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_httpc_pool_tests.erl new file mode 100644 index 000000000..c4ad4e9b6 --- /dev/null +++ b/src/couch_replicator/test/eunit/couch_replicator_httpc_pool_tests.erl @@ -0,0 +1,174 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_replicator_httpc_pool_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(TIMEOUT, 1000). + + +setup() -> + spawn_pool(). + +teardown(Pool) -> + stop_pool(Pool). + + +httpc_pool_test_() -> + { + "httpc pool tests", + { + setup, + fun() -> test_util:start_couch([couch_replicator]) end, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_block_new_clients_when_full/1, + fun should_replace_worker_on_death/1 + ] + } + } + }. + + +should_block_new_clients_when_full(Pool) -> + ?_test(begin + Client1 = spawn_client(Pool), + Client2 = spawn_client(Pool), + Client3 = spawn_client(Pool), + + ?assertEqual(ok, ping_client(Client1)), + ?assertEqual(ok, ping_client(Client2)), + ?assertEqual(ok, ping_client(Client3)), + + Worker1 = get_client_worker(Client1, "1"), + Worker2 = get_client_worker(Client2, "2"), + Worker3 = get_client_worker(Client3, "3"), + + ?assert(is_process_alive(Worker1)), + ?assert(is_process_alive(Worker2)), + ?assert(is_process_alive(Worker3)), + + ?assertNotEqual(Worker1, Worker2), + ?assertNotEqual(Worker2, Worker3), + ?assertNotEqual(Worker3, Worker1), + + Client4 = spawn_client(Pool), + ?assertEqual(timeout, ping_client(Client4)), + + ?assertEqual(ok, stop_client(Client1)), + ?assertEqual(ok, ping_client(Client4)), + + Worker4 = get_client_worker(Client4, "4"), + ?assertEqual(Worker1, Worker4), + + lists:foreach( + fun(C) -> + ?assertEqual(ok, stop_client(C)) + end, [Client2, Client3, Client4]) + end). + +should_replace_worker_on_death(Pool) -> + ?_test(begin + Client1 = spawn_client(Pool), + ?assertEqual(ok, ping_client(Client1)), + Worker1 = get_client_worker(Client1, "1"), + ?assert(is_process_alive(Worker1)), + + ?assertEqual(ok, kill_client_worker(Client1)), + ?assertNot(is_process_alive(Worker1)), + ?assertEqual(ok, stop_client(Client1)), + + Client2 = spawn_client(Pool), + ?assertEqual(ok, ping_client(Client2)), + Worker2 = get_client_worker(Client2, "2"), + ?assert(is_process_alive(Worker2)), + + ?assertNotEqual(Worker1, Worker2), + ?assertEqual(ok, stop_client(Client2)) + end). + + +spawn_client(Pool) -> + Parent = self(), + Ref = make_ref(), + Pid = spawn(fun() -> + {ok, Worker} = couch_replicator_httpc_pool:get_worker(Pool), + loop(Parent, Ref, Worker, Pool) + end), + {Pid, Ref}. + +ping_client({Pid, Ref}) -> + Pid ! ping, + receive + {pong, Ref} -> + ok + after ?TIMEOUT -> + timeout + end. + +get_client_worker({Pid, Ref}, ClientName) -> + Pid ! get_worker, + receive + {worker, Ref, Worker} -> + Worker + after ?TIMEOUT -> + erlang:error( + {assertion_failed, + [{module, ?MODULE}, {line, ?LINE}, + {reason, "Timeout getting client " ++ ClientName ++ " worker"}]}) + end. + +stop_client({Pid, Ref}) -> + Pid ! stop, + receive + {stop, Ref} -> + ok + after ?TIMEOUT -> + timeout + end. + +kill_client_worker({Pid, Ref}) -> + Pid ! get_worker, + receive + {worker, Ref, Worker} -> + exit(Worker, kill), + ok + after ?TIMEOUT -> + timeout + end. + +loop(Parent, Ref, Worker, Pool) -> + receive + ping -> + Parent ! {pong, Ref}, + loop(Parent, Ref, Worker, Pool); + get_worker -> + Parent ! {worker, Ref, Worker}, + loop(Parent, Ref, Worker, Pool); + stop -> + couch_replicator_httpc_pool:release_worker(Pool, Worker), + Parent ! {stop, Ref} + end. + +spawn_pool() -> + Host = config:get("httpd", "bind_address", "127.0.0.1"), + Port = config:get("httpd", "port", "5984"), + {ok, Pool} = couch_replicator_httpc_pool:start_link( + "http://" ++ Host ++ ":" ++ Port, [{max_connections, 3}]), + Pool. + +stop_pool(Pool) -> + ok = couch_replicator_httpc_pool:stop(Pool). diff --git a/src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl new file mode 100644 index 000000000..70eda0566 --- /dev/null +++ b/src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl @@ -0,0 +1,94 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_replicator_id_too_long_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("couch_replicator/src/couch_replicator.hrl"). + + +setup(_) -> + Ctx = test_util:start_couch([couch_replicator]), + Source = create_db(), + create_doc(Source), + Target = create_db(), + {Ctx, {Source, Target}}. + + +teardown(_, {Ctx, {Source, Target}}) -> + delete_db(Source), + delete_db(Target), + config:set("replicator", "max_document_id_length", "infinity"), + ok = test_util:stop_couch(Ctx). + + +id_too_long_replication_test_() -> + Pairs = [{local, local}, {local, remote}, + {remote, local}, {remote, remote}], + { + "Doc id too long tests", + { + foreachx, + fun setup/1, fun teardown/2, + [{Pair, fun should_succeed/2} || Pair <- Pairs] ++ + [{Pair, fun should_fail/2} || Pair <- Pairs] + } + }. + + +should_succeed({From, To}, {_Ctx, {Source, Target}}) -> + RepObject = {[ + {<<"source">>, db_url(From, Source)}, + {<<"target">>, db_url(To, Target)} + ]}, + config:set("replicator", "max_document_id_length", "5"), + {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), + ?_assertEqual(ok, couch_replicator_test_helper:compare_dbs(Source, Target)). + + +should_fail({From, To}, {_Ctx, {Source, Target}}) -> + RepObject = {[ + {<<"source">>, db_url(From, Source)}, + {<<"target">>, db_url(To, Target)} + ]}, + config:set("replicator", "max_document_id_length", "4"), + {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), + ?_assertError({badmatch, {not_found, missing}}, + couch_replicator_test_helper:compare_dbs(Source, Target)). + + +create_db() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + DbName. + + +create_doc(DbName) -> + {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), + Doc = couch_doc:from_json_obj({[{<<"_id">>, <<"12345">>}]}), + {ok, _} = couch_db:update_doc(Db, Doc, []), + couch_db:ensure_full_commit(Db), + couch_db:close(Db). + + +delete_db(DbName) -> + ok = couch_server:delete(DbName, [?ADMIN_CTX]). + + +db_url(local, DbName) -> + DbName; +db_url(remote, DbName) -> + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(couch_httpd, port), + ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])). diff --git a/src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl new file mode 100644 index 000000000..b9adf5c4b --- /dev/null +++ b/src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl @@ -0,0 +1,124 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_replicator_large_atts_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-import(couch_replicator_test_helper, [ + db_url/1, + replicate/2, + compare_dbs/2 +]). + +-define(ATT_SIZE_1, 2 * 1024 * 1024). +-define(ATT_SIZE_2, round(6.6 * 1024 * 1024)). +-define(DOCS_COUNT, 11). +-define(TIMEOUT_EUNIT, 120). + + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + DbName. + +setup(local) -> + setup(); +setup(remote) -> + {remote, setup()}; +setup({A, B}) -> + Ctx = test_util:start_couch([couch_replicator]), + config:set("attachments", "compressible_types", "text/*", false), + Source = setup(A), + Target = setup(B), + {Ctx, {Source, Target}}. + +teardown({remote, DbName}) -> + teardown(DbName); +teardown(DbName) -> + ok = couch_server:delete(DbName, [?ADMIN_CTX]), + ok. + +teardown(_, {Ctx, {Source, Target}}) -> + teardown(Source), + teardown(Target), + + ok = application:stop(couch_replicator), + ok = test_util:stop_couch(Ctx). + +large_atts_test_() -> + Pairs = [{local, local}, {local, remote}, + {remote, local}, {remote, remote}], + { + "Replicate docs with large attachments", + { + foreachx, + fun setup/1, fun teardown/2, + [{Pair, fun should_populate_replicate_compact/2} + || Pair <- Pairs] + } + }. + + +should_populate_replicate_compact({From, To}, {_Ctx, {Source, Target}}) -> + {lists:flatten(io_lib:format("~p -> ~p", [From, To])), + {inorder, [should_populate_source(Source), + should_replicate(Source, Target), + should_compare_databases(Source, Target)]}}. + +should_populate_source({remote, Source}) -> + should_populate_source(Source); +should_populate_source(Source) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source, ?DOCS_COUNT))}. + +should_replicate({remote, Source}, Target) -> + should_replicate(db_url(Source), Target); +should_replicate(Source, {remote, Target}) -> + should_replicate(Source, db_url(Target)); +should_replicate(Source, Target) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}. + +should_compare_databases({remote, Source}, Target) -> + should_compare_databases(Source, Target); +should_compare_databases(Source, {remote, Target}) -> + should_compare_databases(Source, Target); +should_compare_databases(Source, Target) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target))}. + + +populate_db(DbName, DocCount) -> + {ok, Db} = couch_db:open_int(DbName, []), + Docs = lists:foldl( + fun(DocIdCounter, Acc) -> + Doc = #doc{ + id = iolist_to_binary(["doc", integer_to_list(DocIdCounter)]), + body = {[]}, + atts = [ + att(<<"att1">>, ?ATT_SIZE_1, <<"text/plain">>), + att(<<"att2">>, ?ATT_SIZE_2, <<"app/binary">>) + ] + }, + [Doc | Acc] + end, + [], lists:seq(1, DocCount)), + {ok, _} = couch_db:update_docs(Db, Docs, []), + couch_db:close(Db). + +att(Name, Size, Type) -> + couch_att:new([ + {name, Name}, + {type, Type}, + {att_len, Size}, + {data, fun(Count) -> crypto:strong_rand_bytes(Count) end} + ]). diff --git a/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl new file mode 100644 index 000000000..eee5b1647 --- /dev/null +++ b/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl @@ -0,0 +1,206 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_replicator_many_leaves_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-import(couch_replicator_test_helper, [ + db_url/1, + replicate/2 +]). + +-define(DOCS_CONFLICTS, [ + {<<"doc1">>, 10}, + {<<"doc2">>, 100}, + % a number > MaxURLlength (7000) / length(DocRevisionString) + {<<"doc3">>, 210} +]). +-define(NUM_ATTS, 2). +-define(TIMEOUT_EUNIT, 60). +-define(i2l(I), integer_to_list(I)). +-define(io2b(Io), iolist_to_binary(Io)). + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + DbName. + +setup(local) -> + setup(); +setup(remote) -> + {remote, setup()}; +setup({A, B}) -> + Ctx = test_util:start_couch([couch_replicator]), + Source = setup(A), + Target = setup(B), + {Ctx, {Source, Target}}. + +teardown({remote, DbName}) -> + teardown(DbName); +teardown(DbName) -> + ok = couch_server:delete(DbName, [?ADMIN_CTX]), + ok. + +teardown(_, {Ctx, {Source, Target}}) -> + teardown(Source), + teardown(Target), + ok = application:stop(couch_replicator), + ok = test_util:stop_couch(Ctx). + +docs_with_many_leaves_test_() -> + Pairs = [{local, local}, {local, remote}, + {remote, local}, {remote, remote}], + { + "Replicate documents with many leaves", + { + foreachx, + fun setup/1, fun teardown/2, + [{Pair, fun should_populate_replicate_compact/2} + || Pair <- Pairs] + } + }. + + +should_populate_replicate_compact({From, To}, {_Ctx, {Source, Target}}) -> + {lists:flatten(io_lib:format("~p -> ~p", [From, To])), + {inorder, [ + should_populate_source(Source), + should_replicate(Source, Target), + should_verify_target(Source, Target), + should_add_attachments_to_source(Source), + should_replicate(Source, Target), + should_verify_target(Source, Target) + ]}}. + +should_populate_source({remote, Source}) -> + should_populate_source(Source); +should_populate_source(Source) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source))}. + +should_replicate({remote, Source}, Target) -> + should_replicate(db_url(Source), Target); +should_replicate(Source, {remote, Target}) -> + should_replicate(Source, db_url(Target)); +should_replicate(Source, Target) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}. + +should_verify_target({remote, Source}, Target) -> + should_verify_target(Source, Target); +should_verify_target(Source, {remote, Target}) -> + should_verify_target(Source, Target); +should_verify_target(Source, Target) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(begin + {ok, SourceDb} = couch_db:open_int(Source, []), + {ok, TargetDb} = couch_db:open_int(Target, []), + verify_target(SourceDb, TargetDb, ?DOCS_CONFLICTS), + ok = couch_db:close(SourceDb), + ok = couch_db:close(TargetDb) + end)}. + +should_add_attachments_to_source({remote, Source}) -> + should_add_attachments_to_source(Source); +should_add_attachments_to_source(Source) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(begin + {ok, SourceDb} = couch_db:open_int(Source, []), + add_attachments(SourceDb, ?NUM_ATTS, ?DOCS_CONFLICTS), + ok = couch_db:close(SourceDb) + end)}. + +populate_db(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + lists:foreach( + fun({DocId, NumConflicts}) -> + Value = <<"0">>, + Doc = #doc{ + id = DocId, + body = {[ {<<"value">>, Value} ]} + }, + {ok, _} = couch_db:update_doc(Db, Doc, []), + {ok, _} = add_doc_siblings(Db, DocId, NumConflicts) + end, ?DOCS_CONFLICTS), + couch_db:close(Db). + +add_doc_siblings(Db, DocId, NumLeaves) when NumLeaves > 0 -> + add_doc_siblings(Db, DocId, NumLeaves, [], []). + +add_doc_siblings(Db, _DocId, 0, AccDocs, AccRevs) -> + {ok, []} = couch_db:update_docs(Db, AccDocs, [], replicated_changes), + {ok, AccRevs}; + +add_doc_siblings(Db, DocId, NumLeaves, AccDocs, AccRevs) -> + Value = ?l2b(?i2l(NumLeaves)), + Rev = couch_hash:md5_hash(Value), + Doc = #doc{ + id = DocId, + revs = {1, [Rev]}, + body = {[ {<<"value">>, Value} ]} + }, + add_doc_siblings(Db, DocId, NumLeaves - 1, + [Doc | AccDocs], [{1, Rev} | AccRevs]). + +verify_target(_SourceDb, _TargetDb, []) -> + ok; +verify_target(SourceDb, TargetDb, [{DocId, NumConflicts} | Rest]) -> + {ok, SourceLookups} = couch_db:open_doc_revs( + SourceDb, + DocId, + all, + [conflicts, deleted_conflicts]), + {ok, TargetLookups} = couch_db:open_doc_revs( + TargetDb, + DocId, + all, + [conflicts, deleted_conflicts]), + SourceDocs = [Doc || {ok, Doc} <- SourceLookups], + TargetDocs = [Doc || {ok, Doc} <- TargetLookups], + Total = NumConflicts + 1, + ?assertEqual(Total, length(TargetDocs)), + lists:foreach( + fun({SourceDoc, TargetDoc}) -> + SourceJson = couch_doc:to_json_obj(SourceDoc, [attachments]), + TargetJson = couch_doc:to_json_obj(TargetDoc, [attachments]), + ?assertEqual(SourceJson, TargetJson) + end, + lists:zip(SourceDocs, TargetDocs)), + verify_target(SourceDb, TargetDb, Rest). + +add_attachments(_SourceDb, _NumAtts, []) -> + ok; +add_attachments(SourceDb, NumAtts, [{DocId, NumConflicts} | Rest]) -> + {ok, SourceLookups} = couch_db:open_doc_revs(SourceDb, DocId, all, []), + SourceDocs = [Doc || {ok, Doc} <- SourceLookups], + Total = NumConflicts + 1, + ?assertEqual(Total, length(SourceDocs)), + NewDocs = lists:foldl( + fun(#doc{atts = Atts, revs = {Pos, [Rev | _]}} = Doc, Acc) -> + NewAtts = lists:foldl(fun(I, AttAcc) -> + AttData = crypto:strong_rand_bytes(100), + NewAtt = couch_att:new([ + {name, ?io2b(["att_", ?i2l(I), "_", + couch_doc:rev_to_str({Pos, Rev})])}, + {type, <<"application/foobar">>}, + {att_len, byte_size(AttData)}, + {data, AttData} + ]), + [NewAtt | AttAcc] + end, [], lists:seq(1, NumAtts)), + [Doc#doc{atts = Atts ++ NewAtts} | Acc] + end, + [], SourceDocs), + {ok, UpdateResults} = couch_db:update_docs(SourceDb, NewDocs, []), + NewRevs = [R || {ok, R} <- UpdateResults], + ?assertEqual(length(NewDocs), length(NewRevs)), + add_attachments(SourceDb, NumAtts, Rest). + diff --git a/src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl new file mode 100644 index 000000000..c1681781f --- /dev/null +++ b/src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl @@ -0,0 +1,157 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_replicator_missing_stubs_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-import(couch_replicator_test_helper, [ + db_url/1, + replicate/2, + compare_dbs/2 +]). + +-define(REVS_LIMIT, 3). +-define(TIMEOUT_EUNIT, 30). + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + DbName. + +setup(local) -> + setup(); +setup(remote) -> + {remote, setup()}; +setup({A, B}) -> + Ctx = test_util:start_couch([couch_replicator]), + Source = setup(A), + Target = setup(B), + {Ctx, {Source, Target}}. + +teardown({remote, DbName}) -> + teardown(DbName); +teardown(DbName) -> + ok = couch_server:delete(DbName, [?ADMIN_CTX]), + ok. + +teardown(_, {Ctx, {Source, Target}}) -> + teardown(Source), + teardown(Target), + ok = application:stop(couch_replicator), + ok = test_util:stop_couch(Ctx). + +missing_stubs_test_() -> + Pairs = [{local, local}, {local, remote}, + {remote, local}, {remote, remote}], + { + "Replicate docs with missing stubs (COUCHDB-1365)", + { + foreachx, + fun setup/1, fun teardown/2, + [{Pair, fun should_replicate_docs_with_missed_att_stubs/2} + || Pair <- Pairs] + } + }. + + +should_replicate_docs_with_missed_att_stubs({From, To}, {_Ctx, {Source, Target}}) -> + {lists:flatten(io_lib:format("~p -> ~p", [From, To])), + {inorder, [ + should_populate_source(Source), + should_set_target_revs_limit(Target, ?REVS_LIMIT), + should_replicate(Source, Target), + should_compare_databases(Source, Target), + should_update_source_docs(Source, ?REVS_LIMIT * 2), + should_replicate(Source, Target), + should_compare_databases(Source, Target) + ]}}. + +should_populate_source({remote, Source}) -> + should_populate_source(Source); +should_populate_source(Source) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source))}. + +should_replicate({remote, Source}, Target) -> + should_replicate(db_url(Source), Target); +should_replicate(Source, {remote, Target}) -> + should_replicate(Source, db_url(Target)); +should_replicate(Source, Target) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}. + +should_set_target_revs_limit({remote, Target}, RevsLimit) -> + should_set_target_revs_limit(Target, RevsLimit); +should_set_target_revs_limit(Target, RevsLimit) -> + ?_test(begin + {ok, Db} = couch_db:open_int(Target, [?ADMIN_CTX]), + ?assertEqual(ok, couch_db:set_revs_limit(Db, RevsLimit)), + ok = couch_db:close(Db) + end). + +should_compare_databases({remote, Source}, Target) -> + should_compare_databases(Source, Target); +should_compare_databases(Source, {remote, Target}) -> + should_compare_databases(Source, Target); +should_compare_databases(Source, Target) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target))}. + +should_update_source_docs({remote, Source}, Times) -> + should_update_source_docs(Source, Times); +should_update_source_docs(Source, Times) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(update_db_docs(Source, Times))}. + + +populate_db(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + AttData = crypto:strong_rand_bytes(6000), + Doc = #doc{ + id = <<"doc1">>, + atts = [ + couch_att:new([ + {name, <<"doc1_att1">>}, + {type, <<"application/foobar">>}, + {att_len, byte_size(AttData)}, + {data, AttData} + ]) + ] + }, + {ok, _} = couch_db:update_doc(Db, Doc, []), + couch_db:close(Db). + +update_db_docs(DbName, Times) -> + {ok, Db} = couch_db:open_int(DbName, []), + {ok, _} = couch_db:fold_docs( + Db, + fun(FDI, Acc) -> db_fold_fun(FDI, Acc) end, + {DbName, Times}, + []), + ok = couch_db:close(Db). + +db_fold_fun(FullDocInfo, {DbName, Times}) -> + {ok, Db} = couch_db:open_int(DbName, []), + {ok, Doc} = couch_db:open_doc(Db, FullDocInfo), + lists:foldl( + fun(_, {Pos, RevId}) -> + {ok, Db2} = couch_db:reopen(Db), + NewDocVersion = Doc#doc{ + revs = {Pos, [RevId]}, + body = {[{<<"value">>, base64:encode(crypto:strong_rand_bytes(100))}]} + }, + {ok, NewRev} = couch_db:update_doc(Db2, NewDocVersion, []), + NewRev + end, + {element(1, Doc#doc.revs), hd(element(2, Doc#doc.revs))}, + lists:seq(1, Times)), + ok = couch_db:close(Db), + {ok, {DbName, Times}}. diff --git a/src/couch_replicator/test/eunit/couch_replicator_proxy_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_proxy_tests.erl new file mode 100644 index 000000000..4f545bcb5 --- /dev/null +++ b/src/couch_replicator/test/eunit/couch_replicator_proxy_tests.erl @@ -0,0 +1,69 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_replicator_proxy_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch_replicator/src/couch_replicator.hrl"). +-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl"). + + +setup() -> + ok. + + +teardown(_) -> + ok. + + +replicator_proxy_test_() -> + { + "replicator proxy tests", + { + setup, + fun() -> test_util:start_couch([couch_replicator]) end, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun parse_rep_doc_without_proxy/1, + fun parse_rep_doc_with_proxy/1 + ] + } + } + }. + + +parse_rep_doc_without_proxy(_) -> + ?_test(begin + NoProxyDoc = {[ + {<<"source">>, <<"http://unproxied.com">>}, + {<<"target">>, <<"http://otherunproxied.com">>} + ]}, + Rep = couch_replicator_docs:parse_rep_doc(NoProxyDoc), + ?assertEqual((Rep#rep.source)#httpdb.proxy_url, undefined), + ?assertEqual((Rep#rep.target)#httpdb.proxy_url, undefined) + end). + + +parse_rep_doc_with_proxy(_) -> + ?_test(begin + ProxyURL = <<"http://myproxy.com">>, + ProxyDoc = {[ + {<<"source">>, <<"http://unproxied.com">>}, + {<<"target">>, <<"http://otherunproxied.com">>}, + {<<"proxy">>, ProxyURL} + ]}, + Rep = couch_replicator_docs:parse_rep_doc(ProxyDoc), + ?assertEqual((Rep#rep.source)#httpdb.proxy_url, binary_to_list(ProxyURL)), + ?assertEqual((Rep#rep.target)#httpdb.proxy_url, binary_to_list(ProxyURL)) + end). diff --git a/src/couch_replicator/test/eunit/couch_replicator_rate_limiter_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_rate_limiter_tests.erl new file mode 100644 index 000000000..034550aec --- /dev/null +++ b/src/couch_replicator/test/eunit/couch_replicator_rate_limiter_tests.erl @@ -0,0 +1,89 @@ +-module(couch_replicator_rate_limiter_tests). + +-include_lib("couch/include/couch_eunit.hrl"). + + +rate_limiter_test_() -> + { + foreach, + fun setup/0, + fun teardown/1, + [ + t_new_key(), + t_1_failure(), + t_2_failures_back_to_back(), + t_2_failures(), + t_success_threshold(), + t_1_failure_2_successes() + ] + }. + + +t_new_key() -> + ?_test(begin + ?assertEqual(0, couch_replicator_rate_limiter:interval({"foo", get})) + end). + + +t_1_failure() -> + ?_test(begin + ?assertEqual(24, couch_replicator_rate_limiter:failure({"foo", get})) + end). + + +t_2_failures() -> + ?_test(begin + couch_replicator_rate_limiter:failure({"foo", get}), + low_pass_filter_delay(), + Interval = couch_replicator_rate_limiter:failure({"foo", get}), + ?assertEqual(29, Interval) + end). + + +t_2_failures_back_to_back() -> + ?_test(begin + couch_replicator_rate_limiter:failure({"foo", get}), + Interval = couch_replicator_rate_limiter:failure({"foo", get}), + ?assertEqual(24, Interval) + end). + + +t_success_threshold() -> + ?_test(begin + Interval = couch_replicator_rate_limiter:success({"foo", get}), + ?assertEqual(0, Interval), + Interval = couch_replicator_rate_limiter:success({"foo", get}), + ?assertEqual(0, Interval) + end). + + +t_1_failure_2_successes() -> + ?_test(begin + couch_replicator_rate_limiter:failure({"foo", get}), + low_pass_filter_delay(), + Succ1 = couch_replicator_rate_limiter:success({"foo", get}), + ?assertEqual(20, Succ1), + low_pass_filter_delay(), + Succ2 = couch_replicator_rate_limiter:success({"foo", get}), + ?assertEqual(0, Succ2) + end). + + +low_pass_filter_delay() -> + timer:sleep(100). + + +setup() -> + {ok, Pid} = couch_replicator_rate_limiter:start_link(), + Pid. + + +teardown(Pid) -> + Ref = erlang:monitor(process, Pid), + unlink(Pid), + exit(Pid, kill), + receive + {'DOWN', Ref, process, Pid, _} -> + ok + end, + ok. diff --git a/src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl b/src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl new file mode 100644 index 000000000..3b7377b78 --- /dev/null +++ b/src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl @@ -0,0 +1,160 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_replicator_retain_stats_between_job_runs). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("couch_replicator/src/couch_replicator.hrl"). + +-define(DELAY, 500). +-define(TIMEOUT, 60000). +-define(i2l(I), integer_to_list(I)). +-define(io2b(Io), iolist_to_binary(Io)). + + +setup() -> + Ctx = test_util:start_couch([couch_replicator]), + Source = setup_db(), + Target = setup_db(), + {Ctx, {Source, Target}}. + + +teardown({Ctx, {Source, Target}}) -> + teardown_db(Source), + teardown_db(Target), + ok = application:stop(couch_replicator), + ok = test_util:stop_couch(Ctx). + + +stats_retained_test_() -> + { + setup, + fun setup/0, + fun teardown/1, + fun t_stats_retained/1 + }. + + +t_stats_retained({_Ctx, {Source, Target}}) -> + ?_test(begin + populate_db(Source, 42), + {ok, RepPid, RepId} = replicate(Source, Target), + wait_target_in_sync(Source, Target), + check_active_tasks(42, 42), + reschedule_job(RepPid), + check_active_tasks(42, 42), + couch_replicator_scheduler:remove_job(RepId) + end). + + +setup_db() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + DbName. + + +teardown_db(DbName) -> + ok = couch_server:delete(DbName, [?ADMIN_CTX]), + ok. + + +reschedule_job(RepPid) -> + Ref = erlang:monitor(process, RepPid), + gen_server:cast(couch_replicator_scheduler, {set_max_jobs, 0}), + couch_replicator_scheduler:reschedule(), + receive + {'DOWN', Ref, _, _, _} -> ok + after ?TIMEOUT -> + erlang:error(timeout) + end, + gen_server:cast(couch_replicator_scheduler, {set_max_jobs, 500}), + couch_replicator_scheduler:reschedule(). + + +check_active_tasks(DocsRead, DocsWritten) -> + RepTask = wait_for_task_status(), + ?assertNotEqual(timeout, RepTask), + ?assertEqual(DocsRead, couch_util:get_value(docs_read, RepTask)), + ?assertEqual(DocsWritten, couch_util:get_value(docs_written, RepTask)). + + +replication_tasks() -> + lists:filter(fun(P) -> + couch_util:get_value(type, P) =:= replication + end, couch_task_status:all()). + + +wait_for_task_status() -> + test_util:wait(fun() -> + case replication_tasks() of + [] -> wait; + [RepTask] -> RepTask + end + end). + + +populate_db(DbName, DocCount) -> + {ok, Db} = couch_db:open_int(DbName, []), + Docs = lists:foldl( + fun(DocIdCounter, Acc) -> + Id = ?io2b(["doc", ?i2l(DocIdCounter)]), + Doc = #doc{id = Id, body = {[]}}, + [Doc | Acc] + end, + [], lists:seq(1, DocCount)), + {ok, _} = couch_db:update_docs(Db, Docs, []), + ok = couch_db:close(Db). + + +wait_target_in_sync(Source, Target) -> + {ok, SourceDb} = couch_db:open_int(Source, []), + {ok, SourceInfo} = couch_db:get_db_info(SourceDb), + ok = couch_db:close(SourceDb), + SourceDocCount = couch_util:get_value(doc_count, SourceInfo), + wait_target_in_sync_loop(SourceDocCount, Target, 300). + + +wait_target_in_sync_loop(_DocCount, _TargetName, 0) -> + erlang:error({assertion_failed, [ + {module, ?MODULE}, {line, ?LINE}, + {reason, "Could not get source and target databases in sync"} + ]}); + +wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) -> + {ok, Target} = couch_db:open_int(TargetName, []), + {ok, TargetInfo} = couch_db:get_db_info(Target), + ok = couch_db:close(Target), + TargetDocCount = couch_util:get_value(doc_count, TargetInfo), + case TargetDocCount == DocCount of + true -> + true; + false -> + ok = timer:sleep(?DELAY), + wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft - 1) + end. + + +replicate(Source, Target) -> + SrcUrl = couch_replicator_test_helper:db_url(Source), + TgtUrl = couch_replicator_test_helper:db_url(Target), + RepObject = {[ + {<<"source">>, SrcUrl}, + {<<"target">>, TgtUrl}, + {<<"continuous">>, true} + ]}, + {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_USER), + ok = couch_replicator_scheduler:add_job(Rep), + couch_replicator_scheduler:reschedule(), + Pid = couch_replicator_test_helper:get_pid(Rep#rep.id), + {ok, Pid, Rep#rep.id}. diff --git a/src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl new file mode 100644 index 000000000..a7f4c5df3 --- /dev/null +++ b/src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl @@ -0,0 +1,121 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_replicator_selector_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("couch_replicator/src/couch_replicator.hrl"). + + +setup(_) -> + Ctx = test_util:start_couch([couch_replicator]), + Source = create_db(), + create_docs(Source), + Target = create_db(), + {Ctx, {Source, Target}}. + +teardown(_, {Ctx, {Source, Target}}) -> + delete_db(Source), + delete_db(Target), + ok = application:stop(couch_replicator), + ok = test_util:stop_couch(Ctx). + +selector_replication_test_() -> + Pairs = [{local, local}, {local, remote}, + {remote, local}, {remote, remote}], + { + "Selector filtered replication tests", + { + foreachx, + fun setup/1, fun teardown/2, + [{Pair, fun should_succeed/2} || Pair <- Pairs] + } + }. + +should_succeed({From, To}, {_Ctx, {Source, Target}}) -> + RepObject = {[ + {<<"source">>, db_url(From, Source)}, + {<<"target">>, db_url(To, Target)}, + {<<"selector">>, {[{<<"_id">>, <<"doc2">>}]}} + ]}, + {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), + %% FilteredFun is an Erlang version of following mango selector + FilterFun = fun(_DocId, {Props}) -> + couch_util:get_value(<<"_id">>, Props) == <<"doc2">> + end, + {ok, TargetDbInfo, AllReplies} = compare_dbs(Source, Target, FilterFun), + {lists:flatten(io_lib:format("~p -> ~p", [From, To])), [ + {"Target DB has proper number of docs", + ?_assertEqual(1, proplists:get_value(doc_count, TargetDbInfo))}, + {"All the docs selected as expected", + ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))} + ]}. + +compare_dbs(Source, Target, FilterFun) -> + {ok, SourceDb} = couch_db:open_int(Source, []), + {ok, TargetDb} = couch_db:open_int(Target, []), + {ok, TargetDbInfo} = couch_db:get_db_info(TargetDb), + Fun = fun(FullDocInfo, Acc) -> + {ok, DocId, SourceDoc} = read_doc(SourceDb, FullDocInfo), + TargetReply = read_doc(TargetDb, DocId), + case FilterFun(DocId, SourceDoc) of + true -> + ValidReply = {ok, DocId, SourceDoc} == TargetReply, + {ok, [ValidReply|Acc]}; + false -> + ValidReply = {not_found, missing} == TargetReply, + {ok, [ValidReply|Acc]} + end + end, + {ok, AllReplies} = couch_db:fold_docs(SourceDb, Fun, [], []), + ok = couch_db:close(SourceDb), + ok = couch_db:close(TargetDb), + {ok, TargetDbInfo, AllReplies}. + +read_doc(Db, DocIdOrInfo) -> + case couch_db:open_doc(Db, DocIdOrInfo) of + {ok, Doc} -> + {Props} = couch_doc:to_json_obj(Doc, [attachments]), + DocId = couch_util:get_value(<<"_id">>, Props), + {ok, DocId, {Props}}; + Error -> + Error + end. + +create_db() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + DbName. + +create_docs(DbName) -> + {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), + Doc1 = couch_doc:from_json_obj({[ + {<<"_id">>, <<"doc1">>} + ]}), + Doc2 = couch_doc:from_json_obj({[ + {<<"_id">>, <<"doc2">>} + ]}), + {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2]), + couch_db:ensure_full_commit(Db), + couch_db:close(Db). + +delete_db(DbName) -> + ok = couch_server:delete(DbName, [?ADMIN_CTX]). + +db_url(local, DbName) -> + DbName; +db_url(remote, DbName) -> + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(couch_httpd, port), + ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])). diff --git a/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl b/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl new file mode 100644 index 000000000..af3a285f5 --- /dev/null +++ b/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl @@ -0,0 +1,188 @@ +-module(couch_replicator_small_max_request_size_target). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-import(couch_replicator_test_helper, [ + db_url/1, + replicate/1, + compare_dbs/3 +]). + +-define(TIMEOUT_EUNIT, 360). + + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + DbName. + + +setup(local) -> + setup(); + +setup(remote) -> + {remote, setup()}; + +setup({A, B}) -> + Ctx = test_util:start_couch([couch_replicator]), + config:set("httpd", "max_http_request_size", "10000", false), + Source = setup(A), + Target = setup(B), + {Ctx, {Source, Target}}. + + +teardown({remote, DbName}) -> + teardown(DbName); +teardown(DbName) -> + ok = couch_server:delete(DbName, [?ADMIN_CTX]), + ok. + +teardown(_, {Ctx, {Source, Target}}) -> + teardown(Source), + teardown(Target), + ok = application:stop(couch_replicator), + ok = test_util:stop_couch(Ctx). + + +reduce_max_request_size_test_() -> + Pairs = [{local, remote}, {remote, remote}], + { + "Replicate docs when target has a small max_http_request_size", + { + foreachx, + fun setup/1, fun teardown/2, + [{Pair, fun should_replicate_all_docs/2} + || Pair <- Pairs] + ++ [{Pair, fun should_replicate_one/2} + || Pair <- Pairs] + % Disabled. See issue 574. Sometimes PUTs with a doc and + % attachment which exceed maximum request size are simply + % closed instead of returning a 413 request. That makes these + % tests flaky. + ++ [{Pair, fun should_replicate_one_with_attachment/2} + || Pair <- Pairs] + } + }. + + +% Test documents which are below max_http_request_size but when batched, batch size +% will be greater than max_http_request_size. Replicator could automatically split +% the batch into smaller batches and POST those separately. +should_replicate_all_docs({From, To}, {_Ctx, {Source, Target}}) -> + {lists:flatten(io_lib:format("~p -> ~p", [From, To])), + {inorder, [should_populate_source(Source), + should_replicate(Source, Target), + should_compare_databases(Source, Target, [])]}}. + + +% If a document is too large to post as a single request, that document is +% skipped but replication overall will make progress and not crash. +should_replicate_one({From, To}, {_Ctx, {Source, Target}}) -> + {lists:flatten(io_lib:format("~p -> ~p", [From, To])), + {inorder, [should_populate_source_one_large_one_small(Source), + should_replicate(Source, Target), + should_compare_databases(Source, Target, [<<"doc0">>])]}}. + + +% If a document has an attachment > 64 * 1024 bytes, replicator will switch to +% POST-ing individual documents directly and skip bulk_docs. Test that case +% separately +% See note in main test function why this was disabled. +should_replicate_one_with_attachment({From, To}, {_Ctx, {Source, Target}}) -> + {lists:flatten(io_lib:format("~p -> ~p", [From, To])), + {inorder, [should_populate_source_one_large_attachment(Source), + should_populate_source(Source), + should_replicate(Source, Target), + should_compare_databases(Source, Target, [<<"doc0">>])]}}. + + +should_populate_source({remote, Source}) -> + should_populate_source(Source); + +should_populate_source(Source) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(add_docs(Source, 5, 3000, 0))}. + + +should_populate_source_one_large_one_small({remote, Source}) -> + should_populate_source_one_large_one_small(Source); + +should_populate_source_one_large_one_small(Source) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_one_small(Source, 12000, 3000))}. + + +should_populate_source_one_large_attachment({remote, Source}) -> + should_populate_source_one_large_attachment(Source); + +should_populate_source_one_large_attachment(Source) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_attachment(Source, 70000, 70000))}. + + +should_replicate({remote, Source}, Target) -> + should_replicate(db_url(Source), Target); + +should_replicate(Source, {remote, Target}) -> + should_replicate(Source, db_url(Target)); + +should_replicate(Source, Target) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}. + + +should_compare_databases({remote, Source}, Target, ExceptIds) -> + should_compare_databases(Source, Target, ExceptIds); + +should_compare_databases(Source, {remote, Target}, ExceptIds) -> + should_compare_databases(Source, Target, ExceptIds); + +should_compare_databases(Source, Target, ExceptIds) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target, ExceptIds))}. + + +binary_chunk(Size) when is_integer(Size), Size > 0 -> + << <<"x">> || _ <- lists:seq(1, Size) >>. + + +add_docs(DbName, DocCount, DocSize, AttSize) -> + [begin + DocId = iolist_to_binary(["doc", integer_to_list(Id)]), + add_doc(DbName, DocId, DocSize, AttSize) + end || Id <- lists:seq(1, DocCount)], + ok. + + +one_large_one_small(DbName, Large, Small) -> + add_doc(DbName, <<"doc0">>, Large, 0), + add_doc(DbName, <<"doc1">>, Small, 0). + + +one_large_attachment(DbName, Size, AttSize) -> + add_doc(DbName, <<"doc0">>, Size, AttSize). + + +add_doc(DbName, DocId, Size, AttSize) when is_binary(DocId) -> + {ok, Db} = couch_db:open_int(DbName, []), + Doc0 = #doc{id = DocId, body = {[{<<"x">>, binary_chunk(Size)}]}}, + Doc = Doc0#doc{atts = atts(AttSize)}, + {ok, _} = couch_db:update_doc(Db, Doc, []), + couch_db:close(Db). + + +atts(0) -> + []; + +atts(Size) -> + [couch_att:new([ + {name, <<"att1">>}, + {type, <<"app/binary">>}, + {att_len, Size}, + {data, fun(Bytes) -> binary_chunk(Bytes) end} + ])]. + + +replicate(Source, Target) -> + replicate({[ + {<<"source">>, Source}, + {<<"target">>, Target}, + {<<"worker_processes">>, "1"} % This make batch_size predictable + ]}). diff --git a/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl b/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl new file mode 100644 index 000000000..fd0409164 --- /dev/null +++ b/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl @@ -0,0 +1,135 @@ +-module(couch_replicator_test_helper). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("couch_replicator/src/couch_replicator.hrl"). + +-export([ + compare_dbs/2, + compare_dbs/3, + db_url/1, + replicate/1, + get_pid/1, + replicate/2 +]). + + +compare_dbs(Source, Target) -> + compare_dbs(Source, Target, []). + + +compare_dbs(Source, Target, ExceptIds) -> + {ok, SourceDb} = couch_db:open_int(Source, []), + {ok, TargetDb} = couch_db:open_int(Target, []), + + Fun = fun(FullDocInfo, Acc) -> + {ok, DocSource} = couch_db:open_doc(SourceDb, FullDocInfo), + Id = DocSource#doc.id, + case lists:member(Id, ExceptIds) of + true -> + ?assertEqual(not_found, couch_db:get_doc_info(TargetDb, Id)); + false -> + {ok, TDoc} = couch_db:open_doc(TargetDb, Id), + compare_docs(DocSource, TDoc) + end, + {ok, Acc} + end, + + {ok, _} = couch_db:fold_docs(SourceDb, Fun, [], []), + ok = couch_db:close(SourceDb), + ok = couch_db:close(TargetDb). + + +compare_docs(Doc1, Doc2) -> + ?assertEqual(Doc1#doc.body, Doc2#doc.body), + #doc{atts = Atts1} = Doc1, + #doc{atts = Atts2} = Doc2, + ?assertEqual(lists:sort([couch_att:fetch(name, Att) || Att <- Atts1]), + lists:sort([couch_att:fetch(name, Att) || Att <- Atts2])), + FunCompareAtts = fun(Att) -> + AttName = couch_att:fetch(name, Att), + {ok, AttTarget} = find_att(Atts2, AttName), + SourceMd5 = att_md5(Att), + TargetMd5 = att_md5(AttTarget), + case AttName of + <<"att1">> -> + ?assertEqual(gzip, couch_att:fetch(encoding, Att)), + ?assertEqual(gzip, couch_att:fetch(encoding, AttTarget)), + DecSourceMd5 = att_decoded_md5(Att), + DecTargetMd5 = att_decoded_md5(AttTarget), + ?assertEqual(DecSourceMd5, DecTargetMd5); + _ -> + ?assertEqual(identity, couch_att:fetch(encoding, AttTarget)), + ?assertEqual(identity, couch_att:fetch(encoding, AttTarget)) + end, + ?assertEqual(SourceMd5, TargetMd5), + ?assert(is_integer(couch_att:fetch(disk_len, Att))), + ?assert(is_integer(couch_att:fetch(att_len, Att))), + ?assert(is_integer(couch_att:fetch(disk_len, AttTarget))), + ?assert(is_integer(couch_att:fetch(att_len, AttTarget))), + ?assertEqual(couch_att:fetch(disk_len, Att), + couch_att:fetch(disk_len, AttTarget)), + ?assertEqual(couch_att:fetch(att_len, Att), + couch_att:fetch(att_len, AttTarget)), + ?assertEqual(couch_att:fetch(type, Att), + couch_att:fetch(type, AttTarget)), + ?assertEqual(couch_att:fetch(md5, Att), + couch_att:fetch(md5, AttTarget)) + end, + lists:foreach(FunCompareAtts, Atts1). + + +find_att([], _Name) -> + nil; +find_att([Att | Rest], Name) -> + case couch_att:fetch(name, Att) of + Name -> + {ok, Att}; + _ -> + find_att(Rest, Name) + end. + + +att_md5(Att) -> + Md50 = couch_att:foldl( + Att, + fun(Chunk, Acc) -> couch_hash:md5_hash_update(Acc, Chunk) end, + couch_hash:md5_hash_init()), + couch_hash:md5_hash_final(Md50). + +att_decoded_md5(Att) -> + Md50 = couch_att:foldl_decode( + Att, + fun(Chunk, Acc) -> couch_hash:md5_hash_update(Acc, Chunk) end, + couch_hash:md5_hash_init()), + couch_hash:md5_hash_final(Md50). + +db_url(DbName) -> + iolist_to_binary([ + "http://", config:get("httpd", "bind_address", "127.0.0.1"), + ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)), + "/", DbName + ]). + +get_pid(RepId) -> + Pid = global:whereis_name({couch_replicator_scheduler_job,RepId}), + ?assert(is_pid(Pid)), + Pid. + +replicate(Source, Target) -> + replicate({[ + {<<"source">>, Source}, + {<<"target">>, Target} + ]}). + +replicate({[_ | _]} = RepObject) -> + {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_USER), + ok = couch_replicator_scheduler:add_job(Rep), + couch_replicator_scheduler:reschedule(), + Pid = get_pid(Rep#rep.id), + MonRef = erlang:monitor(process, Pid), + receive + {'DOWN', MonRef, process, Pid, _} -> + ok + end, + ok = couch_replicator_scheduler:remove_job(Rep#rep.id). diff --git a/src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl new file mode 100644 index 000000000..c2fcf8bf1 --- /dev/null +++ b/src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl @@ -0,0 +1,181 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_replicator_use_checkpoints_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-import(couch_replicator_test_helper, [ + db_url/1, + replicate/1 +]). + +-define(DOCS_COUNT, 100). +-define(TIMEOUT_EUNIT, 30). +-define(i2l(I), integer_to_list(I)). +-define(io2b(Io), iolist_to_binary(Io)). + + +start(false) -> + fun + ({finished, _, {CheckpointHistory}}) -> + ?assertEqual([{<<"use_checkpoints">>,false}], CheckpointHistory); + (_) -> + ok + end; +start(true) -> + fun + ({finished, _, {CheckpointHistory}}) -> + ?assertNotEqual(false, lists:keyfind(<<"session_id">>, + 1, CheckpointHistory)); + (_) -> + ok + end. + +stop(_, _) -> + ok. + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + DbName. + +setup(local) -> + setup(); +setup(remote) -> + {remote, setup()}; +setup({_, Fun, {A, B}}) -> + Ctx = test_util:start_couch([couch_replicator]), + {ok, Listener} = couch_replicator_notifier:start_link(Fun), + Source = setup(A), + Target = setup(B), + {Ctx, {Source, Target, Listener}}. + +teardown({remote, DbName}) -> + teardown(DbName); +teardown(DbName) -> + ok = couch_server:delete(DbName, [?ADMIN_CTX]), + ok. + +teardown(_, {Ctx, {Source, Target, Listener}}) -> + teardown(Source), + teardown(Target), + + couch_replicator_notifier:stop(Listener), + ok = application:stop(couch_replicator), + ok = test_util:stop_couch(Ctx). + +use_checkpoints_test_() -> + { + "Replication use_checkpoints feature tests", + { + foreachx, + fun start/1, fun stop/2, + [{UseCheckpoints, fun use_checkpoints_tests/2} + || UseCheckpoints <- [false, true]] + } + }. + +use_checkpoints_tests(UseCheckpoints, Fun) -> + Pairs = [{local, local}, {local, remote}, + {remote, local}, {remote, remote}], + { + "use_checkpoints: " ++ atom_to_list(UseCheckpoints), + { + foreachx, + fun setup/1, fun teardown/2, + [{{UseCheckpoints, Fun, Pair}, fun should_test_checkpoints/2} + || Pair <- Pairs] + } + }. + +should_test_checkpoints({UseCheckpoints, _, {From, To}}, {_Ctx, {Source, Target, _}}) -> + should_test_checkpoints(UseCheckpoints, {From, To}, {Source, Target}). +should_test_checkpoints(UseCheckpoints, {From, To}, {Source, Target}) -> + {lists:flatten(io_lib:format("~p -> ~p", [From, To])), + {inorder, [ + should_populate_source(Source, ?DOCS_COUNT), + should_replicate(Source, Target, UseCheckpoints), + should_compare_databases(Source, Target) + ]}}. + +should_populate_source({remote, Source}, DocCount) -> + should_populate_source(Source, DocCount); +should_populate_source(Source, DocCount) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(populate_db(Source, DocCount))}. + +should_replicate({remote, Source}, Target, UseCheckpoints) -> + should_replicate(db_url(Source), Target, UseCheckpoints); +should_replicate(Source, {remote, Target}, UseCheckpoints) -> + should_replicate(Source, db_url(Target), UseCheckpoints); +should_replicate(Source, Target, UseCheckpoints) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target, UseCheckpoints))}. + +should_compare_databases({remote, Source}, Target) -> + should_compare_databases(Source, Target); +should_compare_databases(Source, {remote, Target}) -> + should_compare_databases(Source, Target); +should_compare_databases(Source, Target) -> + {timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target))}. + + +populate_db(DbName, DocCount) -> + {ok, Db} = couch_db:open_int(DbName, []), + Docs = lists:foldl( + fun(DocIdCounter, Acc) -> + Id = ?io2b(["doc", ?i2l(DocIdCounter)]), + Value = ?io2b(["val", ?i2l(DocIdCounter)]), + Doc = #doc{ + id = Id, + body = {[ {<<"value">>, Value} ]} + }, + [Doc | Acc] + end, + [], lists:seq(1, DocCount)), + {ok, _} = couch_db:update_docs(Db, Docs, []), + ok = couch_db:close(Db). + +compare_dbs(Source, Target) -> + {ok, SourceDb} = couch_db:open_int(Source, []), + {ok, TargetDb} = couch_db:open_int(Target, []), + Fun = fun(FullDocInfo, Acc) -> + {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo), + {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]), + DocId = couch_util:get_value(<<"_id">>, Props), + DocTarget = case couch_db:open_doc(TargetDb, DocId) of + {ok, DocT} -> + DocT; + Error -> + erlang:error( + {assertion_failed, + [{module, ?MODULE}, {line, ?LINE}, + {reason, lists:concat(["Error opening document '", + ?b2l(DocId), "' from target: ", + couch_util:to_list(Error)])}]}) + end, + DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]), + ?assertEqual(DocJson, DocTargetJson), + {ok, Acc} + end, + {ok, _} = couch_db:fold_docs(SourceDb, Fun, [], []), + ok = couch_db:close(SourceDb), + ok = couch_db:close(TargetDb). + +replicate(Source, Target, UseCheckpoints) -> + replicate({[ + {<<"source">>, Source}, + {<<"target">>, Target}, + {<<"use_checkpoints">>, UseCheckpoints} + ]}). + diff --git a/src/ddoc_cache/test/ddoc_cache_basic_test.erl b/src/ddoc_cache/test/ddoc_cache_basic_test.erl deleted file mode 100644 index b576d88bb..000000000 --- a/src/ddoc_cache/test/ddoc_cache_basic_test.erl +++ /dev/null @@ -1,175 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(ddoc_cache_basic_test). - - --export([ - recover/1 -]). - - --include_lib("couch/include/couch_db.hrl"). --include_lib("eunit/include/eunit.hrl"). --include("ddoc_cache_test.hrl"). - - -recover(DbName) -> - {ok, {DbName, totes_custom}}. - - -start_couch() -> - Ctx = ddoc_cache_tutil:start_couch(), - meck:new(ddoc_cache_ev, [passthrough]), - Ctx. - - -stop_couch(Ctx) -> - meck:unload(), - ddoc_cache_tutil:stop_couch(Ctx). - - -check_basic_test_() -> - { - setup, - fun start_couch/0, - fun stop_couch/1, - ddoc_cache_tutil:with([ - {"cache_ddoc", fun cache_ddoc/1}, - {"cache_ddoc_rev", fun cache_ddoc_rev/1}, - {"cache_vdu", fun cache_vdu/1}, - {"cache_custom", fun cache_custom/1}, - {"cache_ddoc_refresher_unchanged", fun cache_ddoc_refresher_unchanged/1}, - {"dont_cache_not_found", fun dont_cache_not_found/1}, - {"deprecated_api_works", fun deprecated_api_works/1} - ]) - }. - - -check_no_vdu_test_() -> - { - setup, - fun() -> ddoc_cache_tutil:start_couch([{write_ddocs, false}]) end, - fun ddoc_cache_tutil:stop_couch/1, - ddoc_cache_tutil:with([ - {"cache_no_vdu_no_ddoc", fun cache_no_vdu_no_ddoc/1}, - {"cache_no_vdu_empty_ddoc", fun cache_no_vdu_empty_ddoc/1} - ]) - }. - - -cache_ddoc({DbName, _}) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - ?assertEqual(0, ets:info(?CACHE, size)), - Resp1 = ddoc_cache:open_doc(DbName, ?FOOBAR), - ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp1), - meck:wait(ddoc_cache_ev, event, [started, '_'], 1000), - meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000), - ?assertEqual(2, ets:info(?CACHE, size)), - Resp2 = ddoc_cache:open_doc(DbName, ?FOOBAR), - ?assertEqual(Resp1, Resp2), - ?assertEqual(2, ets:info(?CACHE, size)). - - -cache_ddoc_rev({DbName, _}) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR), - ?assertEqual(0, ets:info(?CACHE, size)), - Resp1 = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev), - ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp1), - meck:wait(ddoc_cache_ev, event, [started, '_'], 1000), - meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000), - ?assertEqual(2, ets:info(?CACHE, size)), - Resp2 = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev), - ?assertEqual(Resp1, Resp2), - ?assertEqual(2, ets:info(?CACHE, size)), - - % Assert that the non-rev cache entry is separate - Resp3 = ddoc_cache:open_doc(DbName, ?FOOBAR), - ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp3), - ?assertEqual(2, ets:info(?CACHE, size)). - - -cache_vdu({DbName, _}) -> - ddoc_cache_tutil:clear(), - ?assertEqual(0, ets:info(?CACHE, size)), - Resp1 = ddoc_cache:open_validation_funs(DbName), - ?assertMatch({ok, [_]}, Resp1), - ?assertEqual(1, ets:info(?CACHE, size)), - Resp2 = ddoc_cache:open_validation_funs(DbName), - ?assertEqual(Resp1, Resp2), - ?assertEqual(1, ets:info(?CACHE, size)). - - -cache_custom({DbName, _}) -> - ddoc_cache_tutil:clear(), - ?assertEqual(0, ets:info(?CACHE, size)), - Resp1 = ddoc_cache:open_custom(DbName, ?MODULE), - ?assertMatch({ok, {DbName, totes_custom}}, Resp1), - ?assertEqual(1, ets:info(?CACHE, size)), - Resp2 = ddoc_cache:open_custom(DbName, ?MODULE), - ?assertEqual(Resp1, Resp2), - ?assertEqual(1, ets:info(?CACHE, size)). - - -cache_ddoc_refresher_unchanged({DbName, _}) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - ?assertEqual(0, ets:info(?CACHE, size)), - ddoc_cache:open_doc(DbName, ?FOOBAR), - meck:wait(ddoc_cache_ev, event, [started, '_'], 1000), - meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000), - Tab1 = [_, _] = lists:sort(ets:tab2list(?CACHE)), - ddoc_cache:open_doc(DbName, ?FOOBAR), - meck:wait(ddoc_cache_ev, event, [accessed, '_'], 1000), - Tab2 = lists:sort(ets:tab2list(?CACHE)), - ?assertEqual(Tab2, Tab1). - - -dont_cache_not_found({DbName, _}) -> - DDocId = <<"_design/not_found">>, - ddoc_cache_tutil:clear(), - Resp = ddoc_cache:open_doc(DbName, DDocId), - ?assertEqual({not_found, missing}, Resp), - ?assertEqual(0, ets:info(?CACHE, size)), - ?assertEqual(0, ets:info(?LRU, size)). - - -deprecated_api_works({DbName, _}) -> - ddoc_cache_tutil:clear(), - {ok, _} = ddoc_cache:open(DbName, ?FOOBAR), - {ok, _} = ddoc_cache:open(DbName, <<"foobar">>), - {ok, _} = ddoc_cache:open(DbName, ?MODULE), - {ok, _} = ddoc_cache:open(DbName, validation_funs). - - -cache_no_vdu_no_ddoc({DbName, _}) -> - ddoc_cache_tutil:clear(), - Resp = ddoc_cache:open_validation_funs(DbName), - ?assertEqual({ok, []}, Resp), - ?assertEqual(1, ets:info(?CACHE, size)), - ?assertEqual(1, ets:info(?LRU, size)). - - -cache_no_vdu_empty_ddoc({DbName, _}) -> - ddoc_cache_tutil:clear(), - DDoc = #doc{ - id = <<"_design/no_vdu">>, - body = {[]} - }, - {ok, _} = fabric:update_docs(DbName, [DDoc], [?ADMIN_CTX]), - Resp = ddoc_cache:open_validation_funs(DbName), - ?assertEqual({ok, []}, Resp), - ?assertEqual(1, ets:info(?CACHE, size)), - ?assertEqual(1, ets:info(?LRU, size)). diff --git a/src/ddoc_cache/test/ddoc_cache_coverage_test.erl b/src/ddoc_cache/test/ddoc_cache_coverage_test.erl deleted file mode 100644 index b1a185bdc..000000000 --- a/src/ddoc_cache/test/ddoc_cache_coverage_test.erl +++ /dev/null @@ -1,77 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(ddoc_cache_coverage_test). - - --include_lib("couch/include/couch_db.hrl"). --include_lib("eunit/include/eunit.hrl"). --include("ddoc_cache_test.hrl"). - - -coverage_test_() -> - { - setup, - fun ddoc_cache_tutil:start_couch/0, - fun ddoc_cache_tutil:stop_couch/1, - [ - fun restart_lru/0, - fun stop_on_evictor_death/0 - ] - }. - - -restart_lru() -> - send_bad_messages(ddoc_cache_lru), - ?assertEqual(ok, ddoc_cache_lru:terminate(bang, {st, a, b, c})), - ?assertEqual({ok, foo}, ddoc_cache_lru:code_change(1, foo, [])). - - -stop_on_evictor_death() -> - meck:new(ddoc_cache_ev, [passthrough]), - try - Lru = whereis(ddoc_cache_lru), - State = sys:get_state(Lru), - Evictor = element(4, State), - Ref = erlang:monitor(process, Lru), - exit(Evictor, shutdown), - receive - {'DOWN', Ref, _, _, Reason} -> - ?assertEqual(shutdown, Reason) - end, - meck:wait(ddoc_cache_ev, event, [lru_init, '_'], 1000), - ?assert(whereis(ddoc_cache_lru) /= Lru) - after - meck:unload() - end. - - -send_bad_messages(Name) -> - wait_for_restart(Name, fun() -> - ?assertEqual({invalid_call, foo}, gen_server:call(Name, foo)) - end), - wait_for_restart(Name, fun() -> - gen_server:cast(Name, foo) - end), - wait_for_restart(Name, fun() -> - whereis(Name) ! foo - end). - - -wait_for_restart(Server, Fun) -> - Ref = erlang:monitor(process, whereis(Server)), - Fun(), - receive - {'DOWN', Ref, _, _, _} -> - ok - end, - ?assert(is_pid(test_util:wait_process(Server))). diff --git a/src/ddoc_cache/test/ddoc_cache_disabled_test.erl b/src/ddoc_cache/test/ddoc_cache_disabled_test.erl deleted file mode 100644 index d46bdde32..000000000 --- a/src/ddoc_cache/test/ddoc_cache_disabled_test.erl +++ /dev/null @@ -1,62 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(ddoc_cache_disabled_test). - - --include_lib("couch/include/couch_db.hrl"). --include_lib("eunit/include/eunit.hrl"). --include("ddoc_cache_test.hrl"). - - -start_couch() -> - Ctx = ddoc_cache_tutil:start_couch(), - config:set("ddoc_cache", "max_size", "0", false), - Ctx. - - -check_disabled_test_() -> - { - setup, - fun start_couch/0, - fun ddoc_cache_tutil:stop_couch/1, - ddoc_cache_tutil:with([ - {"resp_ok", fun resp_ok/1}, - {"resp_not_found", fun resp_not_found/1}, - {"check_effectively_disabled", fun check_effectively_disabled/1} - ]) - }. - - -resp_ok({DbName, _}) -> - ddoc_cache_tutil:clear(), - Resp = ddoc_cache:open_doc(DbName, ?FOOBAR), - ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp), - ?assertEqual(0, ets:info(?CACHE, size)), - ?assertEqual(0, ets:info(?LRU, size)). - - -resp_not_found({DbName, _}) -> - ddoc_cache_tutil:clear(), - Resp = ddoc_cache:open_doc(DbName, <<"_design/not_found">>), - ?assertEqual({not_found, missing}, Resp), - ?assertEqual(0, ets:info(?CACHE, size)), - ?assertEqual(0, ets:info(?LRU, size)). - - -check_effectively_disabled({DbName, _}) -> - config:set("ddoc_cache", "max_size", "1", false), - ddoc_cache_tutil:clear(), - Resp = ddoc_cache:open_doc(DbName, ?FOOBAR), - ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp), - ?assertEqual(0, ets:info(?CACHE, size)), - ?assertEqual(0, ets:info(?LRU, size)). diff --git a/src/ddoc_cache/test/ddoc_cache_entry_test.erl b/src/ddoc_cache/test/ddoc_cache_entry_test.erl deleted file mode 100644 index c992bea8d..000000000 --- a/src/ddoc_cache/test/ddoc_cache_entry_test.erl +++ /dev/null @@ -1,159 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(ddoc_cache_entry_test). - - --export([ - recover/1 -]). - - --include_lib("couch/include/couch_db.hrl"). --include_lib("eunit/include/eunit.hrl"). --include("ddoc_cache_test.hrl"). - - -recover(<<"foo">>) -> - timer:sleep(30000); - -recover(DbName) -> - {ok, {DbName, such_custom}}. - - -start_couch() -> - Ctx = ddoc_cache_tutil:start_couch(), - meck:new(ddoc_cache_ev, [passthrough]), - Ctx. - - -stop_couch(Ctx) -> - meck:unload(), - ddoc_cache_tutil:stop_couch(Ctx). - - -check_entry_test_() -> - { - setup, - fun start_couch/0, - fun stop_couch/1, - ddoc_cache_tutil:with([ - {"cancel_and_replace_opener", fun cancel_and_replace_opener/1}, - {"condenses_access_messages", fun condenses_access_messages/1}, - {"kill_opener_on_terminate", fun kill_opener_on_terminate/1}, - {"evict_when_not_accessed", fun evict_when_not_accessed/1}, - {"open_dead_entry", fun open_dead_entry/1}, - {"handles_bad_messages", fun handles_bad_messages/1}, - {"handles_code_change", fun handles_code_change/1} - ]) - }. - - -cancel_and_replace_opener(_) -> - Key = {ddoc_cache_entry_custom, {<<"foo">>, ?MODULE}}, - true = ets:insert_new(?CACHE, #entry{key = Key}), - {ok, Entry} = ddoc_cache_entry:start_link(Key, undefined), - Opener1 = element(4, sys:get_state(Entry)), - Ref1 = erlang:monitor(process, Opener1), - gen_server:cast(Entry, force_refresh), - receive {'DOWN', Ref1, _, _, _} -> ok end, - Opener2 = element(4, sys:get_state(Entry)), - ?assert(Opener2 /= Opener1), - ?assert(is_process_alive(Opener2)), - % Clean up after ourselves - unlink(Entry), - ddoc_cache_entry:shutdown(Entry). - - -condenses_access_messages({DbName, _}) -> - meck:reset(ddoc_cache_ev), - Key = {ddoc_cache_entry_custom, {DbName, ?MODULE}}, - true = ets:insert(?CACHE, #entry{key = Key}), - {ok, Entry} = ddoc_cache_entry:start_link(Key, undefined), - erlang:suspend_process(Entry), - lists:foreach(fun(_) -> - gen_server:cast(Entry, accessed) - end, lists:seq(1, 100)), - erlang:resume_process(Entry), - meck:wait(1, ddoc_cache_ev, event, [accessed, Key], 1000), - ?assertError( - timeout, - meck:wait(2, ddoc_cache_ev, event, [accessed, Key], 100) - ), - unlink(Entry), - ddoc_cache_entry:shutdown(Entry). - - -kill_opener_on_terminate(_) -> - Pid = spawn(fun() -> receive _ -> ok end end), - ?assert(is_process_alive(Pid)), - St = {st, key, val, Pid, waiters, ts, accessed}, - ?assertEqual(ok, ddoc_cache_entry:terminate(normal, St)), - ?assert(not is_process_alive(Pid)). - - -evict_when_not_accessed(_) -> - meck:reset(ddoc_cache_ev), - Key = {ddoc_cache_entry_custom, {<<"bar">>, ?MODULE}}, - true = ets:insert_new(?CACHE, #entry{key = Key}), - {ok, Entry} = ddoc_cache_entry:start_link(Key, undefined), - Ref = erlang:monitor(process, Entry), - AccessCount1 = element(7, sys:get_state(Entry)), - ?assertEqual(1, AccessCount1), - ok = gen_server:cast(Entry, refresh), - - meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000), - - AccessCount2 = element(7, sys:get_state(Entry)), - ?assertEqual(0, AccessCount2), - ok = gen_server:cast(Entry, refresh), - receive {'DOWN', Ref, _, _, Reason} -> Reason end, - ?assertEqual(normal, Reason), - ?assertEqual(0, ets:info(?CACHE, size)). - - -open_dead_entry({DbName, _}) -> - Pid = spawn(fun() -> ok end), - Key = {ddoc_cache_entry_custom, {DbName, ?MODULE}}, - ?assertEqual(recover(DbName), ddoc_cache_entry:open(Pid, Key)). - - -handles_bad_messages(_) -> - CallExpect = {stop, {bad_call, foo}, {bad_call, foo}, baz}, - CastExpect = {stop, {bad_cast, foo}, bar}, - InfoExpect = {stop, {bad_info, foo}, bar}, - ?assertEqual(CallExpect, ddoc_cache_entry:handle_call(foo, bar, baz)), - ?assertEqual(CastExpect, ddoc_cache_entry:handle_cast(foo, bar)), - ?assertEqual(InfoExpect, ddoc_cache_entry:handle_info(foo, bar)). - - -handles_code_change(_) -> - CCExpect = {ok, bar}, - ?assertEqual(CCExpect, ddoc_cache_entry:code_change(foo, bar, baz)). - - -handles_bad_shutdown_test_() -> - {timeout, 10, ?_test(begin - ErrorPid = spawn(fun() -> - receive - _ -> exit(bad_shutdown) - end - end), - ?assertExit(bad_shutdown, ddoc_cache_entry:shutdown(ErrorPid)), - NotDeadYetPid = spawn(fun() -> - timer:sleep(infinity) - end), - ?assertExit( - {timeout, {entry_shutdown, NotDeadYetPid}}, - ddoc_cache_entry:shutdown(NotDeadYetPid) - ) - end)}. diff --git a/src/ddoc_cache/test/ddoc_cache_ev.erl b/src/ddoc_cache/test/ddoc_cache_ev.erl deleted file mode 100644 index a451342cf..000000000 --- a/src/ddoc_cache/test/ddoc_cache_ev.erl +++ /dev/null @@ -1,21 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(ddoc_cache_ev). - --export([ - event/2 -]). - - -event(Name, Arg) -> - couch_log:error("~s :: ~s :: ~p", [?MODULE, Name, Arg]). diff --git a/src/ddoc_cache/test/ddoc_cache_eviction_test.erl b/src/ddoc_cache/test/ddoc_cache_eviction_test.erl deleted file mode 100644 index bd61afc37..000000000 --- a/src/ddoc_cache/test/ddoc_cache_eviction_test.erl +++ /dev/null @@ -1,96 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(ddoc_cache_eviction_test). - - --export([ - recover/1 -]). - - --include_lib("couch/include/couch_db.hrl"). --include_lib("eunit/include/eunit.hrl"). --include_lib("mem3/include/mem3.hrl"). --include("ddoc_cache_test.hrl"). - - -recover(DbName) -> - {ok, {DbName, totes_custom}}. - - -start_couch() -> - Ctx = ddoc_cache_tutil:start_couch(), - meck:new(ddoc_cache_ev, [passthrough]), - Ctx. - - -stop_couch(Ctx) -> - meck:unload(), - ddoc_cache_tutil:stop_couch(Ctx). - - -check_eviction_test_() -> - { - setup, - fun start_couch/0, - fun stop_couch/1, - ddoc_cache_tutil:with([ - {"evict_all", fun evict_all/1}, - {"dont_evict_all_unrelated", fun dont_evict_all_unrelated/1}, - {"check_upgrade_clause", fun check_upgrade_clause/1} - ]) - }. - - -evict_all({DbName, _}) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR), - #shard{name = ShardName} = hd(mem3:shards(DbName)), - {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR), - {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev), - {ok, _} = ddoc_cache:open_validation_funs(DbName), - {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE), - ?assertEqual(4, ets:info(?CACHE, size)), - {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo), - meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000), - meck:wait(4, ddoc_cache_ev, event, [removed, '_'], 1000), - ?assertEqual(0, ets:info(?CACHE, size)). - - -dont_evict_all_unrelated({DbName, _}) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR), - {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR), - {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev), - {ok, _} = ddoc_cache:open_validation_funs(DbName), - {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE), - ?assertEqual(4, ets:info(?CACHE, size)), - ShardName = <<"shards/00000000-ffffffff/test.1384769918">>, - {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo), - meck:wait(ddoc_cache_ev, event, [evict_noop, <<"test">>], 1000), - ?assertEqual(4, ets:info(?CACHE, size)). - - -check_upgrade_clause({DbName, _}) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR), - meck:wait(ddoc_cache_ev, event, [started, '_'], 1000), - meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000), - ?assertEqual(2, ets:info(?CACHE, size)), - gen_server:cast(ddoc_cache_opener, {do_evict, DbName}), - meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000), - meck:wait(2, ddoc_cache_ev, event, [removed, '_'], 1000), - ?assertEqual(0, ets:info(?CACHE, size)). diff --git a/src/ddoc_cache/test/ddoc_cache_lru_test.erl b/src/ddoc_cache/test/ddoc_cache_lru_test.erl deleted file mode 100644 index e37f1c090..000000000 --- a/src/ddoc_cache/test/ddoc_cache_lru_test.erl +++ /dev/null @@ -1,216 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(ddoc_cache_lru_test). - - --export([ - recover/1 -]). - - --include_lib("couch/include/couch_db.hrl"). --include_lib("eunit/include/eunit.hrl"). --include("ddoc_cache_test.hrl"). - - -recover(<<"pause", _/binary>>) -> - receive go -> ok end, - {ok, paused}; - -recover(<<"big", _/binary>>) -> - {ok, [couch_rand:uniform() || _ <- lists:seq(1, 8192)]}; - -recover(DbName) -> - {ok, DbName}. - - -start_couch() -> - Ctx = ddoc_cache_tutil:start_couch(), - meck:new(ddoc_cache_ev, [passthrough]), - Ctx. - - -stop_couch(Ctx) -> - meck:unload(), - ddoc_cache_tutil:stop_couch(Ctx). - - -check_not_started_test() -> - % Starting couch, but not ddoc_cache - Ctx = test_util:start_couch(), - try - Key = {ddoc_cache_entry_custom, {<<"dbname">>, ?MODULE}}, - ?assertEqual({ok, <<"dbname">>}, ddoc_cache_lru:open(Key)) - after - test_util:stop_couch(Ctx) - end. - - -check_lru_test_() -> - { - setup, - fun start_couch/0, - fun stop_couch/1, - ddoc_cache_tutil:with([ - {"check_multi_start", fun check_multi_start/1}, - {"check_multi_open", fun check_multi_open/1}, - {"check_capped_size", fun check_capped_size/1}, - {"check_cache_refill", fun check_cache_refill/1}, - {"check_evict_and_exit", fun check_evict_and_exit/1} - ]) - }. - - -check_multi_start(_) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - Key = {ddoc_cache_entry_custom, {<<"pause">>, ?MODULE}}, - % These will all get sent through ddoc_cache_lru - Clients = lists:map(fun(_) -> - spawn_monitor(fun() -> - ddoc_cache_lru:open(Key) - end) - end, lists:seq(1, 10)), - meck:wait(ddoc_cache_ev, event, [started, Key], 1000), - lists:foreach(fun({Pid, _Ref}) -> - ?assert(is_process_alive(Pid)) - end, Clients), - [#entry{pid = Pid}] = ets:tab2list(?CACHE), - Opener = element(4, sys:get_state(Pid)), - OpenerRef = erlang:monitor(process, Opener), - ?assert(is_process_alive(Opener)), - Opener ! go, - receive {'DOWN', OpenerRef, _, _, _} -> ok end, - lists:foreach(fun({_, Ref}) -> - receive - {'DOWN', Ref, _, _, normal} -> ok - end - end, Clients). - - -check_multi_open(_) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - Key = {ddoc_cache_entry_custom, {<<"pause">>, ?MODULE}}, - % We wait after the first client so that - % the rest of the clients go directly to - % ddoc_cache_entry bypassing ddoc_cache_lru - Client1 = spawn_monitor(fun() -> - ddoc_cache_lru:open(Key) - end), - meck:wait(ddoc_cache_ev, event, [started, Key], 1000), - Clients = [Client1] ++ lists:map(fun(_) -> - spawn_monitor(fun() -> - ddoc_cache_lru:open(Key) - end) - end, lists:seq(1, 9)), - lists:foreach(fun({Pid, _Ref}) -> - ?assert(is_process_alive(Pid)) - end, Clients), - [#entry{pid = Pid}] = ets:tab2list(?CACHE), - Opener = element(4, sys:get_state(Pid)), - OpenerRef = erlang:monitor(process, Opener), - ?assert(is_process_alive(Opener)), - Opener ! go, - receive {'DOWN', OpenerRef, _, _, _} -> ok end, - lists:foreach(fun({_, Ref}) -> - receive {'DOWN', Ref, _, _, normal} -> ok end - end, Clients). - - -check_capped_size(_) -> - % The extra factor of two in the size checks is - % a fudge factor. We don't reject entries from - % the cache if they would put us over the limit - % as we don't have the size information a - % priori. - config:set("ddoc_cache", "max_size", "1048576", false), - MaxSize = 1048576, - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - lists:foreach(fun(I) -> - DbName = list_to_binary("big_" ++ integer_to_list(I)), - ddoc_cache:open_custom(DbName, ?MODULE), - meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000), - ?assert(cache_size() < MaxSize * 2) - end, lists:seq(1, 25)), - lists:foreach(fun(I) -> - DbName = list_to_binary("big_" ++ integer_to_list(I)), - ddoc_cache:open_custom(DbName, ?MODULE), - meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000), - ?assert(cache_size() < MaxSize * 2) - end, lists:seq(26, 100)). - - -check_cache_refill({DbName, _}) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - - InitDDoc = fun(I) -> - NumBin = list_to_binary(integer_to_list(I)), - DDocId = <<"_design/", NumBin/binary>>, - Doc = #doc{id = DDocId, body = {[]}}, - {ok, _} = fabric:update_doc(DbName, Doc, [?ADMIN_CTX]), - {ok, _} = ddoc_cache:open_doc(DbName, DDocId), - {ddoc_cache_entry_ddocid, {DbName, DDocId}} - end, - - lists:foreach(fun(I) -> - Key = InitDDoc(I), - meck:wait(ddoc_cache_ev, event, [started, Key], 1000) - end, lists:seq(1, 5)), - - ShardName = mem3:name(hd(mem3:shards(DbName))), - {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo), - meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000), - meck:wait(10, ddoc_cache_ev, event, [removed, '_'], 1000), - ?assertEqual(0, ets:info(?CACHE, size)), - - lists:foreach(fun(I) -> - Key = InitDDoc(I), - meck:wait(ddoc_cache_ev, event, [started, Key], 1000) - end, lists:seq(6, 10)). - - -check_evict_and_exit(_) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - - Key = {ddoc_cache_entry_custom, {<<"dbname">>, ?MODULE}}, - ?assertEqual({ok, <<"dbname">>}, ddoc_cache_lru:open(Key)), - [#entry{key = Key, pid = Pid}] = ets:tab2list(?CACHE), - - erlang:monitor(process, whereis(ddoc_cache_lru)), - - % Pause the LRU so we can queue multiple messages - erlang:suspend_process(whereis(ddoc_cache_lru)), - - gen_server:cast(ddoc_cache_lru, {do_evict, <<"dbname">>}), - whereis(ddoc_cache_lru) ! {'EXIT', Pid, normal}, - - % Resume the LRU and ensure that it doesn't die - erlang:resume_process(whereis(ddoc_cache_lru)), - - meck:wait(ddoc_cache_ev, event, [evicted, <<"dbname">>], 1000), - - % Make sure it can handle another message - OtherKey = {ddoc_cache_entry_custom, {<<"otherdb">>, ?MODULE}}, - ?assertEqual({ok, <<"otherdb">>}, ddoc_cache_lru:open(OtherKey)), - - % And verify our monitor doesn't fire - timer:sleep(500), - ?assertEqual({messages, []}, process_info(self(), messages)). - - -cache_size() -> - ets:info(?CACHE, memory) * erlang:system_info(wordsize). diff --git a/src/ddoc_cache/test/ddoc_cache_no_cache_test.erl b/src/ddoc_cache/test/ddoc_cache_no_cache_test.erl deleted file mode 100644 index 637a6e872..000000000 --- a/src/ddoc_cache/test/ddoc_cache_no_cache_test.erl +++ /dev/null @@ -1,79 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(ddoc_cache_no_cache_test). - - --include_lib("couch/include/couch_db.hrl"). --include_lib("eunit/include/eunit.hrl"). - - -ddoc(DDocId) -> - {ok, #doc{ - id = DDocId, - revs = {1, [<<"deadbeefdeadbeef">>]}, - body = {[ - {<<"ohai">>, null} - ]} - }}. - - -not_found(_DDocId) -> - {not_found, missing}. - - -return_error(_DDocId) -> - {error, timeout}. - - -start(Resp) -> - Ctx = ddoc_cache_tutil:start_couch(), - meck:new(fabric), - meck:expect(fabric, open_doc, fun(_, DDocId, _) -> - Resp(DDocId) - end), - Ctx. - - -stop(Ctx) -> - meck:unload(), - ddoc_cache_tutil:stop_couch(Ctx). - - -no_cache_open_ok_test() -> - Ctx = start(fun ddoc/1), - try - Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>), - ?assertEqual(ddoc(<<"bar">>), Resp) - after - stop(Ctx) - end. - - -no_cache_open_not_found_test() -> - Ctx = start(fun not_found/1), - try - Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>), - ?assertEqual(not_found(<<"bar">>), Resp) - after - stop(Ctx) - end. - - -no_cache_open_error_test() -> - Ctx = start(fun return_error/1), - try - Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>), - ?assertEqual(return_error(<<"bar">>), Resp) - after - stop(Ctx) - end. diff --git a/src/ddoc_cache/test/ddoc_cache_open_error_test.erl b/src/ddoc_cache/test/ddoc_cache_open_error_test.erl deleted file mode 100644 index c7379d26a..000000000 --- a/src/ddoc_cache/test/ddoc_cache_open_error_test.erl +++ /dev/null @@ -1,46 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(ddoc_cache_open_error_test). - - --include_lib("couch/include/couch_db.hrl"). --include_lib("eunit/include/eunit.hrl"). --include("ddoc_cache_test.hrl"). - - -start_couch() -> - Ctx = ddoc_cache_tutil:start_couch(), - meck:expect(fabric, open_doc, fun(_, ?FOOBAR, _) -> - erlang:error(test_kaboom) - end), - Ctx. - - -stop_couch(Ctx) -> - meck:unload(), - ddoc_cache_tutil:stop_couch(Ctx). - - -check_open_error_test_() -> - { - setup, - fun start_couch/0, - fun stop_couch/1, - ddoc_cache_tutil:with([ - {"handle_open_error", fun handle_open_error/1} - ]) - }. - - -handle_open_error({DbName, _}) -> - ?assertError(test_kaboom, ddoc_cache:open_doc(DbName, ?FOOBAR)). diff --git a/src/ddoc_cache/test/ddoc_cache_open_test.erl b/src/ddoc_cache/test/ddoc_cache_open_test.erl deleted file mode 100644 index 73d644f71..000000000 --- a/src/ddoc_cache/test/ddoc_cache_open_test.erl +++ /dev/null @@ -1,107 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(ddoc_cache_open_test). - --export([ - dbname/1, - ddocid/1, - recover/1, - insert/2 -]). - --include_lib("couch/include/couch_db.hrl"). --include_lib("eunit/include/eunit.hrl"). --include("ddoc_cache_test.hrl"). - - -%% behaviour callbacks -dbname(DbName) -> - DbName. - - -ddocid(_) -> - no_ddocid. - - -recover({deleted, _DbName}) -> - erlang:error(database_does_not_exist); -recover(DbName) -> - ddoc_cache_entry_validation_funs:recover(DbName). - - -insert(_, _) -> - ok. - - -start_couch() -> - Ctx = ddoc_cache_tutil:start_couch(), - meck:new(ddoc_cache_entry_validation_funs, [passthrough]), - meck:expect(ddoc_cache_entry_validation_funs, recover, - ['_'], meck:passthrough()), - Ctx. - - -stop_couch(Ctx) -> - meck:unload(), - ddoc_cache_tutil:stop_couch(Ctx). - - -check_open_error_test_() -> - { - setup, - fun start_couch/0, - fun stop_couch/1, - ddoc_cache_tutil:with([ - {"should_return_database_does_not_exist", - fun should_return_database_does_not_exist/1}, - {"should_not_call_recover_when_database_does_not_exist", - fun should_not_call_recover_when_database_does_not_exist/1}, - {"should_call_recover_when_needed", - fun should_call_recover_when_needed/1}, - {"should_call_recover_when_needed", - fun should_not_crash_lru_process/1} - ]) - }. - - -should_return_database_does_not_exist({DbName, _}) -> - ?assertError( - database_does_not_exist, - ddoc_cache_lru:open({?MODULE, {deleted, DbName}})). - - -should_not_call_recover_when_database_does_not_exist({DbName, _}) -> - meck:reset(ddoc_cache_entry_validation_funs), - ?assertError( - database_does_not_exist, - ddoc_cache_lru:open({?MODULE, {deleted, DbName}})), - ?assertError( - timeout, - meck:wait(1, ddoc_cache_entry_validation_funs, recover, '_', 100)). - - -should_call_recover_when_needed({DbName, _}) -> - meck:reset(ddoc_cache_entry_validation_funs), - ddoc_cache_lru:open({?MODULE, DbName}), - ?assertEqual( - ok, - meck:wait(1, ddoc_cache_entry_validation_funs, recover, '_', 500)). - - -should_not_crash_lru_process({DbName, _}) -> - LRUPid = whereis(ddoc_cache_lru), - ?assert(is_process_alive(LRUPid)), - ?assertError( - database_does_not_exist, - ddoc_cache_lru:open({?MODULE, {deleted, DbName}})), - ?assert(is_process_alive(LRUPid)). diff --git a/src/ddoc_cache/test/ddoc_cache_opener_test.erl b/src/ddoc_cache/test/ddoc_cache_opener_test.erl deleted file mode 100644 index c3846360c..000000000 --- a/src/ddoc_cache/test/ddoc_cache_opener_test.erl +++ /dev/null @@ -1,33 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(ddoc_cache_opener_test). - - --include_lib("couch/include/couch_db.hrl"). --include_lib("eunit/include/eunit.hrl"). --include("ddoc_cache_test.hrl"). - - -empty_hull_test() -> - InitExpect = {ok, nil}, - TermExpect = ok, - CallExpect = {stop, {invalid_call, foo}, {invalid_call, foo}, baz}, - CastExpect = {stop, {invalid_cast, foo}, bar}, - InfoExpect = {stop, {invalid_info, foo}, bar}, - CCExpect = {ok, bar}, - ?assertEqual(InitExpect, ddoc_cache_opener:init(foo)), - ?assertEqual(TermExpect, ddoc_cache_opener:terminate(foo, bar)), - ?assertEqual(CallExpect, ddoc_cache_opener:handle_call(foo, bar, baz)), - ?assertEqual(CastExpect, ddoc_cache_opener:handle_cast(foo, bar)), - ?assertEqual(InfoExpect, ddoc_cache_opener:handle_info(foo, bar)), - ?assertEqual(CCExpect, ddoc_cache_opener:code_change(foo, bar, baz)). diff --git a/src/ddoc_cache/test/ddoc_cache_refresh_test.erl b/src/ddoc_cache/test/ddoc_cache_refresh_test.erl deleted file mode 100644 index 24ae346d4..000000000 --- a/src/ddoc_cache/test/ddoc_cache_refresh_test.erl +++ /dev/null @@ -1,174 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(ddoc_cache_refresh_test). - - --export([ - recover/1 -]). - - --include_lib("couch/include/couch_db.hrl"). --include_lib("eunit/include/eunit.hrl"). --include("ddoc_cache_test.hrl"). - - -recover(DbName) -> - {ok, {DbName, rand_string()}}. - - -start_couch() -> - Ctx = ddoc_cache_tutil:start_couch(), - meck:new(ddoc_cache_ev, [passthrough]), - Ctx. - - -stop_couch(Ctx) -> - meck:unload(), - ddoc_cache_tutil:stop_couch(Ctx). - - -check_refresh_test_() -> - { - setup, - fun start_couch/0, - fun stop_couch/1, - ddoc_cache_tutil:with([ - {"refresh_ddoc", fun refresh_ddoc/1}, - {"refresh_ddoc_rev", fun refresh_ddoc_rev/1}, - {"refresh_vdu", fun refresh_vdu/1}, - {"refresh_custom", fun refresh_custom/1}, - {"refresh_multiple", fun refresh_multiple/1}, - {"check_upgrade_clause", fun check_upgrade_clause/1} - ]) - }. - - -refresh_ddoc({DbName, _}) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR), - meck:wait(ddoc_cache_ev, event, [started, '_'], 1000), - meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000), - - ?assertEqual(2, ets:info(?CACHE, size)), - [#entry{key = Key, val = DDoc}, _] = lists:sort(ets:tab2list(?CACHE)), - NewDDoc = DDoc#doc{ - body = {[{<<"foo">>, <<"baz">>}]} - }, - {ok, {Depth, RevId}} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]), - Expect = NewDDoc#doc{ - revs = {Depth, [RevId | element(2, DDoc#doc.revs)]} - }, - meck:wait(ddoc_cache_ev, event, [updated, {Key, Expect}], 1000), - ?assertMatch({ok, Expect}, ddoc_cache:open_doc(DbName, ?FOOBAR)), - ?assertEqual(2, ets:info(?CACHE, size)). - - -refresh_ddoc_rev({DbName, _}) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR), - {ok, RevDDoc} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev), - - meck:wait(ddoc_cache_ev, event, [started, '_'], 1000), - meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000), - - [_, #entry{key = Key, val = DDoc}] = lists:sort(ets:tab2list(?CACHE)), - NewDDoc = DDoc#doc{ - body = {[{<<"foo">>, <<"kazam">>}]} - }, - {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]), - % We pass the rev explicitly so we assert that we're - % getting the same original response from the cache - meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000), - ?assertMatch({ok, RevDDoc}, ddoc_cache:open_doc(DbName, ?FOOBAR, Rev)), - ?assertEqual(2, ets:info(?CACHE, size)). - - -refresh_vdu({DbName, _}) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - {ok, [_]} = ddoc_cache:open_validation_funs(DbName), - [#entry{key = Key}] = ets:tab2list(?CACHE), - {ok, DDoc} = fabric:open_doc(DbName, ?VDU, [?ADMIN_CTX]), - {ok, _} = fabric:update_doc(DbName, DDoc#doc{body = {[]}}, [?ADMIN_CTX]), - meck:wait(ddoc_cache_ev, event, [updated, {Key, []}], 1000), - ?assertMatch({ok, []}, ddoc_cache:open_validation_funs(DbName)), - ?assertEqual(1, ets:info(?CACHE, size)). - - -refresh_custom({DbName, _}) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - {ok, Resp1} = ddoc_cache:open_custom(DbName, ?MODULE), - {ok, DDoc} = fabric:open_doc(DbName, ?VDU, [?CUSTOM]), - {ok, _} = fabric:update_doc(DbName, DDoc#doc{body = {[]}}, [?ADMIN_CTX]), - meck:wait(ddoc_cache_ev, event, [updated, '_'], 1000), - ?assertNotEqual({ok, Resp1}, ddoc_cache:open_custom(DbName, ?MODULE)), - ?assertEqual(1, ets:info(?CACHE, size)). - - -refresh_multiple({DbName, _}) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR), - {ok, DDoc} = ddoc_cache:open_doc(DbName, ?FOOBAR), - {ok, DDoc} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev), - ?assertEqual(2, ets:info(?CACHE, size)), - % Relying on the sort order of entry keys to make - % sure our entries line up for this test - [ - #entry{key = NoRevKey, val = DDoc}, - #entry{key = RevKey, val = DDoc} - ] = lists:sort(ets:tab2list(?CACHE)), - NewDDoc = DDoc#doc{ - body = {[{<<"foo">>, <<"kalamazoo">>}]} - }, - {ok, {Depth, RevId}} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]), - Updated = NewDDoc#doc{ - revs = {Depth, [RevId | element(2, DDoc#doc.revs)]} - }, - meck:wait(ddoc_cache_ev, event, [update_noop, RevKey], 1000), - meck:wait(ddoc_cache_ev, event, [updated, {NoRevKey, Updated}], 1000), - % We pass the rev explicitly so we assert that we're - % getting the same original response from the cache - ?assertEqual({ok, Updated}, ddoc_cache:open_doc(DbName, ?FOOBAR)), - ?assertEqual({ok, DDoc}, ddoc_cache:open_doc(DbName, ?FOOBAR, Rev)), - ?assertEqual(2, ets:info(?CACHE, size)). - - -check_upgrade_clause({DbName, _}) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR), - [#entry{key = Key}] = ets:tab2list(?CACHE), - gen_server:cast(ddoc_cache_opener, {do_evict, DbName, [?FOOBAR]}), - meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000). - - -rand_string() -> - Bin = crypto:strong_rand_bytes(8), - to_hex(Bin, []). - - -to_hex(<<>>, Acc) -> - list_to_binary(lists:reverse(Acc)); -to_hex(<>, Acc) -> - to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]). - - -hexdig(C) when C >= 0, C =< 9 -> - C + $0; -hexdig(C) when C >= 10, C =< 15 -> - C + $A - 10. diff --git a/src/ddoc_cache/test/ddoc_cache_remove_test.erl b/src/ddoc_cache/test/ddoc_cache_remove_test.erl deleted file mode 100644 index e40518529..000000000 --- a/src/ddoc_cache/test/ddoc_cache_remove_test.erl +++ /dev/null @@ -1,224 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(ddoc_cache_remove_test). - - --export([ - recover/1 -]). - - --include_lib("couch/include/couch_db.hrl"). --include_lib("mem3/include/mem3.hrl"). --include_lib("eunit/include/eunit.hrl"). --include("ddoc_cache_test.hrl"). - - -recover(DbName) -> - {ok, #doc{body = {Body}}} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]), - case couch_util:get_value(<<"status">>, Body) of - <<"ok">> -> - {ok, yay}; - <<"not_ok">> -> - {ruh, roh}; - <<"error">> -> - erlang:error(thpppt) - end. - - -start_couch() -> - Ctx = ddoc_cache_tutil:start_couch(), - meck:new(ddoc_cache_ev, [passthrough]), - Ctx. - - -stop_couch(Ctx) -> - meck:unload(), - ddoc_cache_tutil:stop_couch(Ctx). - - -check_refresh_test_() -> - { - setup, - fun start_couch/0, - fun stop_couch/1, - ddoc_cache_tutil:with([ - {"remove_ddoc", fun remove_ddoc/1}, - {"remove_ddoc_rev", fun remove_ddoc_rev/1}, - {"remove_ddoc_rev_only", fun remove_ddoc_rev_only/1}, - {"remove_custom_not_ok", fun remove_custom_not_ok/1}, - {"remove_custom_error", fun remove_custom_error/1} - ]) - }. - - -remove_ddoc({DbName, _}) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - ?assertEqual(0, ets:info(?CACHE, size)), - {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR), - - meck:wait(ddoc_cache_ev, event, [started, '_'], 1000), - meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000), - - [#entry{val = DDoc}, #entry{val = DDoc}] = ets:tab2list(?CACHE), - {Depth, [RevId | _]} = DDoc#doc.revs, - NewDDoc = DDoc#doc{ - deleted = true, - body = {[]} - }, - {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]), - - DDocIdKey = {ddoc_cache_entry_ddocid, {DbName, ?FOOBAR}}, - Rev = {Depth, RevId}, - DDocIdRevKey = {ddoc_cache_entry_ddocid_rev, {DbName, ?FOOBAR, Rev}}, - meck:wait(ddoc_cache_ev, event, [removed, DDocIdKey], 1000), - meck:wait(ddoc_cache_ev, event, [update_noop, DDocIdRevKey], 1000), - - ?assertMatch({not_found, deleted}, ddoc_cache:open_doc(DbName, ?FOOBAR)), - ?assertEqual(1, ets:info(?CACHE, size)). - - -remove_ddoc_rev({DbName, _}) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - Rev = ddoc_cache_tutil:get_rev(DbName, ?VDU), - {ok, _} = ddoc_cache:open_doc(DbName, ?VDU, Rev), - - meck:wait(ddoc_cache_ev, event, [started, '_'], 1000), - meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000), - - % Notice the sort so that we know we're getting the - % revid version second. - [_, #entry{key = Key, val = DDoc, pid = Pid}] - = lists:sort(ets:tab2list(?CACHE)), - - NewDDoc = DDoc#doc{ - body = {[{<<"an">>, <<"update">>}]} - }, - {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]), - meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000), - % Compact the database so that the old rev is removed - lists:foreach(fun(Shard) -> - do_compact(Shard#shard.name) - end, mem3:local_shards(DbName)), - % Trigger a refresh rather than wait for the timeout - ddoc_cache_entry:refresh(Pid), - meck:wait(ddoc_cache_ev, event, [removed, Key], 1000), - ?assertMatch( - {{not_found, missing}, _}, - ddoc_cache:open_doc(DbName, ?VDU, Rev) - ), - ?assertEqual(1, ets:info(?CACHE, size)). - - -remove_ddoc_rev_only({DbName, _}) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - Rev = ddoc_cache_tutil:get_rev(DbName, ?VDU), - {ok, _} = ddoc_cache:open_doc(DbName, ?VDU), - {ok, _} = ddoc_cache:open_doc(DbName, ?VDU, Rev), - % Relying on the sort order of keys to keep - % these lined up for testing - [ - #entry{key = NoRevKey, val = DDoc, pid = NoRevPid}, - #entry{key = RevKey, val = DDoc, pid = RevPid} - ] = lists:sort(ets:tab2list(?CACHE)), - NewDDoc = DDoc#doc{ - body = {[{<<"new">>, <<"awesomeness">>}]} - }, - {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]), - meck:wait(ddoc_cache_ev, event, [updated, '_'], 1000), - meck:wait(ddoc_cache_ev, event, [update_noop, RevKey], 1000), - % Compact the database so that the old rev is removed - lists:foreach(fun(Shard) -> - do_compact(Shard#shard.name) - end, mem3:local_shards(DbName)), - % Trigger a refresh rather than wait for the timeout - ddoc_cache_entry:refresh(NoRevPid), - ddoc_cache_entry:refresh(RevPid), - meck:wait(ddoc_cache_ev, event, [update_noop, NoRevKey], 1000), - meck:wait(ddoc_cache_ev, event, [removed, RevKey], 1000), - ?assertMatch({ok, _}, ddoc_cache:open_doc(DbName, ?VDU)), - ?assertMatch( - {{not_found, missing}, _}, - ddoc_cache:open_doc(DbName, ?VDU, Rev) - ), - ?assertEqual(1, ets:info(?CACHE, size)). - -remove_custom_not_ok({DbName, _}) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - init_custom_ddoc(DbName), - {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE), - [#entry{key = Key}] = ets:tab2list(?CACHE), - {ok, DDoc} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]), - NewDDoc = DDoc#doc{ - body = {[{<<"status">>, <<"not_ok">>}]} - }, - {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]), - meck:wait(ddoc_cache_ev, event, [removed, Key], 1000), - ?assertEqual({ruh, roh}, ddoc_cache:open_custom(DbName, ?MODULE)), - ?assertEqual(0, ets:info(?CACHE, size)). - - -remove_custom_error({DbName, _}) -> - ddoc_cache_tutil:clear(), - meck:reset(ddoc_cache_ev), - init_custom_ddoc(DbName), - {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE), - [#entry{key = Key}] = ets:tab2list(?CACHE), - {ok, DDoc} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]), - NewDDoc = DDoc#doc{ - body = {[{<<"status">>, <<"error">>}]} - }, - {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]), - meck:wait(ddoc_cache_ev, event, [removed, Key], 1000), - ?assertError(thpppt, ddoc_cache:open_custom(DbName, ?MODULE)), - ?assertEqual(0, ets:info(?CACHE, size)). - - -init_custom_ddoc(DbName) -> - Body = {[{<<"status">>, <<"ok">>}]}, - {ok, Doc} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]), - NewDoc = Doc#doc{body = Body}, - {ok, _} = fabric:update_doc(DbName, NewDoc, [?ADMIN_CTX]). - - -do_compact(ShardName) -> - {ok, Db} = couch_db:open_int(ShardName, []), - try - {ok, Pid} = couch_db:start_compact(Db), - Ref = erlang:monitor(process, Pid), - receive - {'DOWN', Ref, _, _, _} -> - ok - end - after - couch_db:close(Db) - end, - wait_for_compaction(ShardName). - - -wait_for_compaction(ShardName) -> - {ok, Db} = couch_db:open_int(ShardName, []), - CompactRunning = try - {ok, Info} = couch_db:get_db_info(Db), - couch_util:get_value(compact_running, Info) - after - couch_db:close(Db) - end, - if not CompactRunning -> ok; true -> - timer:sleep(100), - wait_for_compaction(ShardName) - end. \ No newline at end of file diff --git a/src/ddoc_cache/test/ddoc_cache_test.hrl b/src/ddoc_cache/test/ddoc_cache_test.hrl deleted file mode 100644 index 73f7bc217..000000000 --- a/src/ddoc_cache/test/ddoc_cache_test.hrl +++ /dev/null @@ -1,26 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - - --define(CACHE, ddoc_cache_entries). --define(LRU, ddoc_cache_lru). --define(OPENERS, ddoc_cache_openers). - --define(FOOBAR, <<"_design/foobar">>). --define(VDU, <<"_design/vdu">>). --define(CUSTOM, <<"_design/custom">>). - --record(entry, { - key, - val, - pid -}). diff --git a/src/ddoc_cache/test/ddoc_cache_tutil.erl b/src/ddoc_cache/test/ddoc_cache_tutil.erl deleted file mode 100644 index b34d4b163..000000000 --- a/src/ddoc_cache/test/ddoc_cache_tutil.erl +++ /dev/null @@ -1,111 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(ddoc_cache_tutil). - - --export([ - start_couch/0, - start_couch/1, - stop_couch/1, - clear/0, - get_rev/2, - ddocs/0, - purge_modules/0, - with/1 -]). - - --include_lib("couch/include/couch_db.hrl"). --include_lib("couch/include/couch_eunit.hrl"). - - -start_couch() -> - start_couch([{write_ddocs, true}]). - - -start_couch(Options) -> - WriteDDocs = couch_util:get_value(write_ddocs, Options, true), - purge_modules(), - Ctx = test_util:start_couch(?CONFIG_CHAIN, [chttpd, ddoc_cache]), - TmpDb = ?tempdb(), - ok = fabric:create_db(TmpDb, [{q, "1"}, {n, "1"}]), - if not WriteDDocs -> ok; true -> - {ok, _} = fabric:update_docs(TmpDb, ddocs(), [?ADMIN_CTX]) - end, - {TmpDb, Ctx}. - - -stop_couch({_TmpDb, Ctx}) -> - test_util:stop_couch(Ctx). - - -clear() -> - application:stop(ddoc_cache), - application:start(ddoc_cache). - - -get_rev(DbName, DDocId) -> - {_, Ref} = erlang:spawn_monitor(fun() -> - {ok, #doc{revs = Revs}} = fabric:open_doc(DbName, DDocId, [?ADMIN_CTX]), - {Depth, [RevId | _]} = Revs, - exit({Depth, RevId}) - end), - receive - {'DOWN', Ref, _, _, Rev} -> Rev - end. - - -ddocs() -> - FooBar = #doc{ - id = <<"_design/foobar">>, - body = {[ - {<<"foo">>, <<"bar">>} - ]} - }, - VDU = #doc{ - id = <<"_design/vdu">>, - body = {[ - {<<"validate_doc_update">>, <<"function(doc) {return;}">>} - ]} - }, - Custom = #doc{ - id = <<"_design/custom">>, - body = {[ - {<<"status">>, <<"ok">>}, - {<<"custom">>, <<"hotrod">>} - ]} - }, - [FooBar, VDU, Custom]. - - -purge_modules() -> - case application:get_key(ddoc_cache, modules) of - {ok, Mods} -> - lists:foreach(fun(Mod) -> - case code:which(Mod) of - cover_compiled -> - ok; - _ -> - code:delete(Mod), - code:purge(Mod) - end - end, Mods); - undefined -> - ok - end. - -%% eunit implementation of {with, Tests} doesn't detect test name correctly -with(Tests) -> - fun(ArgsTuple) -> - [{Name, ?_test(Fun(ArgsTuple))} || {Name, Fun} <- Tests] - end. diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl new file mode 100644 index 000000000..b576d88bb --- /dev/null +++ b/src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl @@ -0,0 +1,175 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(ddoc_cache_basic_test). + + +-export([ + recover/1 +]). + + +-include_lib("couch/include/couch_db.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include("ddoc_cache_test.hrl"). + + +recover(DbName) -> + {ok, {DbName, totes_custom}}. + + +start_couch() -> + Ctx = ddoc_cache_tutil:start_couch(), + meck:new(ddoc_cache_ev, [passthrough]), + Ctx. + + +stop_couch(Ctx) -> + meck:unload(), + ddoc_cache_tutil:stop_couch(Ctx). + + +check_basic_test_() -> + { + setup, + fun start_couch/0, + fun stop_couch/1, + ddoc_cache_tutil:with([ + {"cache_ddoc", fun cache_ddoc/1}, + {"cache_ddoc_rev", fun cache_ddoc_rev/1}, + {"cache_vdu", fun cache_vdu/1}, + {"cache_custom", fun cache_custom/1}, + {"cache_ddoc_refresher_unchanged", fun cache_ddoc_refresher_unchanged/1}, + {"dont_cache_not_found", fun dont_cache_not_found/1}, + {"deprecated_api_works", fun deprecated_api_works/1} + ]) + }. + + +check_no_vdu_test_() -> + { + setup, + fun() -> ddoc_cache_tutil:start_couch([{write_ddocs, false}]) end, + fun ddoc_cache_tutil:stop_couch/1, + ddoc_cache_tutil:with([ + {"cache_no_vdu_no_ddoc", fun cache_no_vdu_no_ddoc/1}, + {"cache_no_vdu_empty_ddoc", fun cache_no_vdu_empty_ddoc/1} + ]) + }. + + +cache_ddoc({DbName, _}) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + ?assertEqual(0, ets:info(?CACHE, size)), + Resp1 = ddoc_cache:open_doc(DbName, ?FOOBAR), + ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp1), + meck:wait(ddoc_cache_ev, event, [started, '_'], 1000), + meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000), + ?assertEqual(2, ets:info(?CACHE, size)), + Resp2 = ddoc_cache:open_doc(DbName, ?FOOBAR), + ?assertEqual(Resp1, Resp2), + ?assertEqual(2, ets:info(?CACHE, size)). + + +cache_ddoc_rev({DbName, _}) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR), + ?assertEqual(0, ets:info(?CACHE, size)), + Resp1 = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev), + ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp1), + meck:wait(ddoc_cache_ev, event, [started, '_'], 1000), + meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000), + ?assertEqual(2, ets:info(?CACHE, size)), + Resp2 = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev), + ?assertEqual(Resp1, Resp2), + ?assertEqual(2, ets:info(?CACHE, size)), + + % Assert that the non-rev cache entry is separate + Resp3 = ddoc_cache:open_doc(DbName, ?FOOBAR), + ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp3), + ?assertEqual(2, ets:info(?CACHE, size)). + + +cache_vdu({DbName, _}) -> + ddoc_cache_tutil:clear(), + ?assertEqual(0, ets:info(?CACHE, size)), + Resp1 = ddoc_cache:open_validation_funs(DbName), + ?assertMatch({ok, [_]}, Resp1), + ?assertEqual(1, ets:info(?CACHE, size)), + Resp2 = ddoc_cache:open_validation_funs(DbName), + ?assertEqual(Resp1, Resp2), + ?assertEqual(1, ets:info(?CACHE, size)). + + +cache_custom({DbName, _}) -> + ddoc_cache_tutil:clear(), + ?assertEqual(0, ets:info(?CACHE, size)), + Resp1 = ddoc_cache:open_custom(DbName, ?MODULE), + ?assertMatch({ok, {DbName, totes_custom}}, Resp1), + ?assertEqual(1, ets:info(?CACHE, size)), + Resp2 = ddoc_cache:open_custom(DbName, ?MODULE), + ?assertEqual(Resp1, Resp2), + ?assertEqual(1, ets:info(?CACHE, size)). + + +cache_ddoc_refresher_unchanged({DbName, _}) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + ?assertEqual(0, ets:info(?CACHE, size)), + ddoc_cache:open_doc(DbName, ?FOOBAR), + meck:wait(ddoc_cache_ev, event, [started, '_'], 1000), + meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000), + Tab1 = [_, _] = lists:sort(ets:tab2list(?CACHE)), + ddoc_cache:open_doc(DbName, ?FOOBAR), + meck:wait(ddoc_cache_ev, event, [accessed, '_'], 1000), + Tab2 = lists:sort(ets:tab2list(?CACHE)), + ?assertEqual(Tab2, Tab1). + + +dont_cache_not_found({DbName, _}) -> + DDocId = <<"_design/not_found">>, + ddoc_cache_tutil:clear(), + Resp = ddoc_cache:open_doc(DbName, DDocId), + ?assertEqual({not_found, missing}, Resp), + ?assertEqual(0, ets:info(?CACHE, size)), + ?assertEqual(0, ets:info(?LRU, size)). + + +deprecated_api_works({DbName, _}) -> + ddoc_cache_tutil:clear(), + {ok, _} = ddoc_cache:open(DbName, ?FOOBAR), + {ok, _} = ddoc_cache:open(DbName, <<"foobar">>), + {ok, _} = ddoc_cache:open(DbName, ?MODULE), + {ok, _} = ddoc_cache:open(DbName, validation_funs). + + +cache_no_vdu_no_ddoc({DbName, _}) -> + ddoc_cache_tutil:clear(), + Resp = ddoc_cache:open_validation_funs(DbName), + ?assertEqual({ok, []}, Resp), + ?assertEqual(1, ets:info(?CACHE, size)), + ?assertEqual(1, ets:info(?LRU, size)). + + +cache_no_vdu_empty_ddoc({DbName, _}) -> + ddoc_cache_tutil:clear(), + DDoc = #doc{ + id = <<"_design/no_vdu">>, + body = {[]} + }, + {ok, _} = fabric:update_docs(DbName, [DDoc], [?ADMIN_CTX]), + Resp = ddoc_cache:open_validation_funs(DbName), + ?assertEqual({ok, []}, Resp), + ?assertEqual(1, ets:info(?CACHE, size)), + ?assertEqual(1, ets:info(?LRU, size)). diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl new file mode 100644 index 000000000..b1a185bdc --- /dev/null +++ b/src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl @@ -0,0 +1,77 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(ddoc_cache_coverage_test). + + +-include_lib("couch/include/couch_db.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include("ddoc_cache_test.hrl"). + + +coverage_test_() -> + { + setup, + fun ddoc_cache_tutil:start_couch/0, + fun ddoc_cache_tutil:stop_couch/1, + [ + fun restart_lru/0, + fun stop_on_evictor_death/0 + ] + }. + + +restart_lru() -> + send_bad_messages(ddoc_cache_lru), + ?assertEqual(ok, ddoc_cache_lru:terminate(bang, {st, a, b, c})), + ?assertEqual({ok, foo}, ddoc_cache_lru:code_change(1, foo, [])). + + +stop_on_evictor_death() -> + meck:new(ddoc_cache_ev, [passthrough]), + try + Lru = whereis(ddoc_cache_lru), + State = sys:get_state(Lru), + Evictor = element(4, State), + Ref = erlang:monitor(process, Lru), + exit(Evictor, shutdown), + receive + {'DOWN', Ref, _, _, Reason} -> + ?assertEqual(shutdown, Reason) + end, + meck:wait(ddoc_cache_ev, event, [lru_init, '_'], 1000), + ?assert(whereis(ddoc_cache_lru) /= Lru) + after + meck:unload() + end. + + +send_bad_messages(Name) -> + wait_for_restart(Name, fun() -> + ?assertEqual({invalid_call, foo}, gen_server:call(Name, foo)) + end), + wait_for_restart(Name, fun() -> + gen_server:cast(Name, foo) + end), + wait_for_restart(Name, fun() -> + whereis(Name) ! foo + end). + + +wait_for_restart(Server, Fun) -> + Ref = erlang:monitor(process, whereis(Server)), + Fun(), + receive + {'DOWN', Ref, _, _, _} -> + ok + end, + ?assert(is_pid(test_util:wait_process(Server))). diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl new file mode 100644 index 000000000..d46bdde32 --- /dev/null +++ b/src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl @@ -0,0 +1,62 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(ddoc_cache_disabled_test). + + +-include_lib("couch/include/couch_db.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include("ddoc_cache_test.hrl"). + + +start_couch() -> + Ctx = ddoc_cache_tutil:start_couch(), + config:set("ddoc_cache", "max_size", "0", false), + Ctx. + + +check_disabled_test_() -> + { + setup, + fun start_couch/0, + fun ddoc_cache_tutil:stop_couch/1, + ddoc_cache_tutil:with([ + {"resp_ok", fun resp_ok/1}, + {"resp_not_found", fun resp_not_found/1}, + {"check_effectively_disabled", fun check_effectively_disabled/1} + ]) + }. + + +resp_ok({DbName, _}) -> + ddoc_cache_tutil:clear(), + Resp = ddoc_cache:open_doc(DbName, ?FOOBAR), + ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp), + ?assertEqual(0, ets:info(?CACHE, size)), + ?assertEqual(0, ets:info(?LRU, size)). + + +resp_not_found({DbName, _}) -> + ddoc_cache_tutil:clear(), + Resp = ddoc_cache:open_doc(DbName, <<"_design/not_found">>), + ?assertEqual({not_found, missing}, Resp), + ?assertEqual(0, ets:info(?CACHE, size)), + ?assertEqual(0, ets:info(?LRU, size)). + + +check_effectively_disabled({DbName, _}) -> + config:set("ddoc_cache", "max_size", "1", false), + ddoc_cache_tutil:clear(), + Resp = ddoc_cache:open_doc(DbName, ?FOOBAR), + ?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp), + ?assertEqual(0, ets:info(?CACHE, size)), + ?assertEqual(0, ets:info(?LRU, size)). diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl new file mode 100644 index 000000000..c992bea8d --- /dev/null +++ b/src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl @@ -0,0 +1,159 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(ddoc_cache_entry_test). + + +-export([ + recover/1 +]). + + +-include_lib("couch/include/couch_db.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include("ddoc_cache_test.hrl"). + + +recover(<<"foo">>) -> + timer:sleep(30000); + +recover(DbName) -> + {ok, {DbName, such_custom}}. + + +start_couch() -> + Ctx = ddoc_cache_tutil:start_couch(), + meck:new(ddoc_cache_ev, [passthrough]), + Ctx. + + +stop_couch(Ctx) -> + meck:unload(), + ddoc_cache_tutil:stop_couch(Ctx). + + +check_entry_test_() -> + { + setup, + fun start_couch/0, + fun stop_couch/1, + ddoc_cache_tutil:with([ + {"cancel_and_replace_opener", fun cancel_and_replace_opener/1}, + {"condenses_access_messages", fun condenses_access_messages/1}, + {"kill_opener_on_terminate", fun kill_opener_on_terminate/1}, + {"evict_when_not_accessed", fun evict_when_not_accessed/1}, + {"open_dead_entry", fun open_dead_entry/1}, + {"handles_bad_messages", fun handles_bad_messages/1}, + {"handles_code_change", fun handles_code_change/1} + ]) + }. + + +cancel_and_replace_opener(_) -> + Key = {ddoc_cache_entry_custom, {<<"foo">>, ?MODULE}}, + true = ets:insert_new(?CACHE, #entry{key = Key}), + {ok, Entry} = ddoc_cache_entry:start_link(Key, undefined), + Opener1 = element(4, sys:get_state(Entry)), + Ref1 = erlang:monitor(process, Opener1), + gen_server:cast(Entry, force_refresh), + receive {'DOWN', Ref1, _, _, _} -> ok end, + Opener2 = element(4, sys:get_state(Entry)), + ?assert(Opener2 /= Opener1), + ?assert(is_process_alive(Opener2)), + % Clean up after ourselves + unlink(Entry), + ddoc_cache_entry:shutdown(Entry). + + +condenses_access_messages({DbName, _}) -> + meck:reset(ddoc_cache_ev), + Key = {ddoc_cache_entry_custom, {DbName, ?MODULE}}, + true = ets:insert(?CACHE, #entry{key = Key}), + {ok, Entry} = ddoc_cache_entry:start_link(Key, undefined), + erlang:suspend_process(Entry), + lists:foreach(fun(_) -> + gen_server:cast(Entry, accessed) + end, lists:seq(1, 100)), + erlang:resume_process(Entry), + meck:wait(1, ddoc_cache_ev, event, [accessed, Key], 1000), + ?assertError( + timeout, + meck:wait(2, ddoc_cache_ev, event, [accessed, Key], 100) + ), + unlink(Entry), + ddoc_cache_entry:shutdown(Entry). + + +kill_opener_on_terminate(_) -> + Pid = spawn(fun() -> receive _ -> ok end end), + ?assert(is_process_alive(Pid)), + St = {st, key, val, Pid, waiters, ts, accessed}, + ?assertEqual(ok, ddoc_cache_entry:terminate(normal, St)), + ?assert(not is_process_alive(Pid)). + + +evict_when_not_accessed(_) -> + meck:reset(ddoc_cache_ev), + Key = {ddoc_cache_entry_custom, {<<"bar">>, ?MODULE}}, + true = ets:insert_new(?CACHE, #entry{key = Key}), + {ok, Entry} = ddoc_cache_entry:start_link(Key, undefined), + Ref = erlang:monitor(process, Entry), + AccessCount1 = element(7, sys:get_state(Entry)), + ?assertEqual(1, AccessCount1), + ok = gen_server:cast(Entry, refresh), + + meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000), + + AccessCount2 = element(7, sys:get_state(Entry)), + ?assertEqual(0, AccessCount2), + ok = gen_server:cast(Entry, refresh), + receive {'DOWN', Ref, _, _, Reason} -> Reason end, + ?assertEqual(normal, Reason), + ?assertEqual(0, ets:info(?CACHE, size)). + + +open_dead_entry({DbName, _}) -> + Pid = spawn(fun() -> ok end), + Key = {ddoc_cache_entry_custom, {DbName, ?MODULE}}, + ?assertEqual(recover(DbName), ddoc_cache_entry:open(Pid, Key)). + + +handles_bad_messages(_) -> + CallExpect = {stop, {bad_call, foo}, {bad_call, foo}, baz}, + CastExpect = {stop, {bad_cast, foo}, bar}, + InfoExpect = {stop, {bad_info, foo}, bar}, + ?assertEqual(CallExpect, ddoc_cache_entry:handle_call(foo, bar, baz)), + ?assertEqual(CastExpect, ddoc_cache_entry:handle_cast(foo, bar)), + ?assertEqual(InfoExpect, ddoc_cache_entry:handle_info(foo, bar)). + + +handles_code_change(_) -> + CCExpect = {ok, bar}, + ?assertEqual(CCExpect, ddoc_cache_entry:code_change(foo, bar, baz)). + + +handles_bad_shutdown_test_() -> + {timeout, 10, ?_test(begin + ErrorPid = spawn(fun() -> + receive + _ -> exit(bad_shutdown) + end + end), + ?assertExit(bad_shutdown, ddoc_cache_entry:shutdown(ErrorPid)), + NotDeadYetPid = spawn(fun() -> + timer:sleep(infinity) + end), + ?assertExit( + {timeout, {entry_shutdown, NotDeadYetPid}}, + ddoc_cache_entry:shutdown(NotDeadYetPid) + ) + end)}. diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_ev.erl b/src/ddoc_cache/test/eunit/ddoc_cache_ev.erl new file mode 100644 index 000000000..a451342cf --- /dev/null +++ b/src/ddoc_cache/test/eunit/ddoc_cache_ev.erl @@ -0,0 +1,21 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(ddoc_cache_ev). + +-export([ + event/2 +]). + + +event(Name, Arg) -> + couch_log:error("~s :: ~s :: ~p", [?MODULE, Name, Arg]). diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl new file mode 100644 index 000000000..bd61afc37 --- /dev/null +++ b/src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl @@ -0,0 +1,96 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(ddoc_cache_eviction_test). + + +-export([ + recover/1 +]). + + +-include_lib("couch/include/couch_db.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("mem3/include/mem3.hrl"). +-include("ddoc_cache_test.hrl"). + + +recover(DbName) -> + {ok, {DbName, totes_custom}}. + + +start_couch() -> + Ctx = ddoc_cache_tutil:start_couch(), + meck:new(ddoc_cache_ev, [passthrough]), + Ctx. + + +stop_couch(Ctx) -> + meck:unload(), + ddoc_cache_tutil:stop_couch(Ctx). + + +check_eviction_test_() -> + { + setup, + fun start_couch/0, + fun stop_couch/1, + ddoc_cache_tutil:with([ + {"evict_all", fun evict_all/1}, + {"dont_evict_all_unrelated", fun dont_evict_all_unrelated/1}, + {"check_upgrade_clause", fun check_upgrade_clause/1} + ]) + }. + + +evict_all({DbName, _}) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR), + #shard{name = ShardName} = hd(mem3:shards(DbName)), + {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR), + {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev), + {ok, _} = ddoc_cache:open_validation_funs(DbName), + {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE), + ?assertEqual(4, ets:info(?CACHE, size)), + {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo), + meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000), + meck:wait(4, ddoc_cache_ev, event, [removed, '_'], 1000), + ?assertEqual(0, ets:info(?CACHE, size)). + + +dont_evict_all_unrelated({DbName, _}) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR), + {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR), + {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev), + {ok, _} = ddoc_cache:open_validation_funs(DbName), + {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE), + ?assertEqual(4, ets:info(?CACHE, size)), + ShardName = <<"shards/00000000-ffffffff/test.1384769918">>, + {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo), + meck:wait(ddoc_cache_ev, event, [evict_noop, <<"test">>], 1000), + ?assertEqual(4, ets:info(?CACHE, size)). + + +check_upgrade_clause({DbName, _}) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR), + meck:wait(ddoc_cache_ev, event, [started, '_'], 1000), + meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000), + ?assertEqual(2, ets:info(?CACHE, size)), + gen_server:cast(ddoc_cache_opener, {do_evict, DbName}), + meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000), + meck:wait(2, ddoc_cache_ev, event, [removed, '_'], 1000), + ?assertEqual(0, ets:info(?CACHE, size)). diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl new file mode 100644 index 000000000..e37f1c090 --- /dev/null +++ b/src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl @@ -0,0 +1,216 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(ddoc_cache_lru_test). + + +-export([ + recover/1 +]). + + +-include_lib("couch/include/couch_db.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include("ddoc_cache_test.hrl"). + + +recover(<<"pause", _/binary>>) -> + receive go -> ok end, + {ok, paused}; + +recover(<<"big", _/binary>>) -> + {ok, [couch_rand:uniform() || _ <- lists:seq(1, 8192)]}; + +recover(DbName) -> + {ok, DbName}. + + +start_couch() -> + Ctx = ddoc_cache_tutil:start_couch(), + meck:new(ddoc_cache_ev, [passthrough]), + Ctx. + + +stop_couch(Ctx) -> + meck:unload(), + ddoc_cache_tutil:stop_couch(Ctx). + + +check_not_started_test() -> + % Starting couch, but not ddoc_cache + Ctx = test_util:start_couch(), + try + Key = {ddoc_cache_entry_custom, {<<"dbname">>, ?MODULE}}, + ?assertEqual({ok, <<"dbname">>}, ddoc_cache_lru:open(Key)) + after + test_util:stop_couch(Ctx) + end. + + +check_lru_test_() -> + { + setup, + fun start_couch/0, + fun stop_couch/1, + ddoc_cache_tutil:with([ + {"check_multi_start", fun check_multi_start/1}, + {"check_multi_open", fun check_multi_open/1}, + {"check_capped_size", fun check_capped_size/1}, + {"check_cache_refill", fun check_cache_refill/1}, + {"check_evict_and_exit", fun check_evict_and_exit/1} + ]) + }. + + +check_multi_start(_) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + Key = {ddoc_cache_entry_custom, {<<"pause">>, ?MODULE}}, + % These will all get sent through ddoc_cache_lru + Clients = lists:map(fun(_) -> + spawn_monitor(fun() -> + ddoc_cache_lru:open(Key) + end) + end, lists:seq(1, 10)), + meck:wait(ddoc_cache_ev, event, [started, Key], 1000), + lists:foreach(fun({Pid, _Ref}) -> + ?assert(is_process_alive(Pid)) + end, Clients), + [#entry{pid = Pid}] = ets:tab2list(?CACHE), + Opener = element(4, sys:get_state(Pid)), + OpenerRef = erlang:monitor(process, Opener), + ?assert(is_process_alive(Opener)), + Opener ! go, + receive {'DOWN', OpenerRef, _, _, _} -> ok end, + lists:foreach(fun({_, Ref}) -> + receive + {'DOWN', Ref, _, _, normal} -> ok + end + end, Clients). + + +check_multi_open(_) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + Key = {ddoc_cache_entry_custom, {<<"pause">>, ?MODULE}}, + % We wait after the first client so that + % the rest of the clients go directly to + % ddoc_cache_entry bypassing ddoc_cache_lru + Client1 = spawn_monitor(fun() -> + ddoc_cache_lru:open(Key) + end), + meck:wait(ddoc_cache_ev, event, [started, Key], 1000), + Clients = [Client1] ++ lists:map(fun(_) -> + spawn_monitor(fun() -> + ddoc_cache_lru:open(Key) + end) + end, lists:seq(1, 9)), + lists:foreach(fun({Pid, _Ref}) -> + ?assert(is_process_alive(Pid)) + end, Clients), + [#entry{pid = Pid}] = ets:tab2list(?CACHE), + Opener = element(4, sys:get_state(Pid)), + OpenerRef = erlang:monitor(process, Opener), + ?assert(is_process_alive(Opener)), + Opener ! go, + receive {'DOWN', OpenerRef, _, _, _} -> ok end, + lists:foreach(fun({_, Ref}) -> + receive {'DOWN', Ref, _, _, normal} -> ok end + end, Clients). + + +check_capped_size(_) -> + % The extra factor of two in the size checks is + % a fudge factor. We don't reject entries from + % the cache if they would put us over the limit + % as we don't have the size information a + % priori. + config:set("ddoc_cache", "max_size", "1048576", false), + MaxSize = 1048576, + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + lists:foreach(fun(I) -> + DbName = list_to_binary("big_" ++ integer_to_list(I)), + ddoc_cache:open_custom(DbName, ?MODULE), + meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000), + ?assert(cache_size() < MaxSize * 2) + end, lists:seq(1, 25)), + lists:foreach(fun(I) -> + DbName = list_to_binary("big_" ++ integer_to_list(I)), + ddoc_cache:open_custom(DbName, ?MODULE), + meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000), + ?assert(cache_size() < MaxSize * 2) + end, lists:seq(26, 100)). + + +check_cache_refill({DbName, _}) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + + InitDDoc = fun(I) -> + NumBin = list_to_binary(integer_to_list(I)), + DDocId = <<"_design/", NumBin/binary>>, + Doc = #doc{id = DDocId, body = {[]}}, + {ok, _} = fabric:update_doc(DbName, Doc, [?ADMIN_CTX]), + {ok, _} = ddoc_cache:open_doc(DbName, DDocId), + {ddoc_cache_entry_ddocid, {DbName, DDocId}} + end, + + lists:foreach(fun(I) -> + Key = InitDDoc(I), + meck:wait(ddoc_cache_ev, event, [started, Key], 1000) + end, lists:seq(1, 5)), + + ShardName = mem3:name(hd(mem3:shards(DbName))), + {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo), + meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000), + meck:wait(10, ddoc_cache_ev, event, [removed, '_'], 1000), + ?assertEqual(0, ets:info(?CACHE, size)), + + lists:foreach(fun(I) -> + Key = InitDDoc(I), + meck:wait(ddoc_cache_ev, event, [started, Key], 1000) + end, lists:seq(6, 10)). + + +check_evict_and_exit(_) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + + Key = {ddoc_cache_entry_custom, {<<"dbname">>, ?MODULE}}, + ?assertEqual({ok, <<"dbname">>}, ddoc_cache_lru:open(Key)), + [#entry{key = Key, pid = Pid}] = ets:tab2list(?CACHE), + + erlang:monitor(process, whereis(ddoc_cache_lru)), + + % Pause the LRU so we can queue multiple messages + erlang:suspend_process(whereis(ddoc_cache_lru)), + + gen_server:cast(ddoc_cache_lru, {do_evict, <<"dbname">>}), + whereis(ddoc_cache_lru) ! {'EXIT', Pid, normal}, + + % Resume the LRU and ensure that it doesn't die + erlang:resume_process(whereis(ddoc_cache_lru)), + + meck:wait(ddoc_cache_ev, event, [evicted, <<"dbname">>], 1000), + + % Make sure it can handle another message + OtherKey = {ddoc_cache_entry_custom, {<<"otherdb">>, ?MODULE}}, + ?assertEqual({ok, <<"otherdb">>}, ddoc_cache_lru:open(OtherKey)), + + % And verify our monitor doesn't fire + timer:sleep(500), + ?assertEqual({messages, []}, process_info(self(), messages)). + + +cache_size() -> + ets:info(?CACHE, memory) * erlang:system_info(wordsize). diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl new file mode 100644 index 000000000..637a6e872 --- /dev/null +++ b/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl @@ -0,0 +1,79 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(ddoc_cache_no_cache_test). + + +-include_lib("couch/include/couch_db.hrl"). +-include_lib("eunit/include/eunit.hrl"). + + +ddoc(DDocId) -> + {ok, #doc{ + id = DDocId, + revs = {1, [<<"deadbeefdeadbeef">>]}, + body = {[ + {<<"ohai">>, null} + ]} + }}. + + +not_found(_DDocId) -> + {not_found, missing}. + + +return_error(_DDocId) -> + {error, timeout}. + + +start(Resp) -> + Ctx = ddoc_cache_tutil:start_couch(), + meck:new(fabric), + meck:expect(fabric, open_doc, fun(_, DDocId, _) -> + Resp(DDocId) + end), + Ctx. + + +stop(Ctx) -> + meck:unload(), + ddoc_cache_tutil:stop_couch(Ctx). + + +no_cache_open_ok_test() -> + Ctx = start(fun ddoc/1), + try + Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>), + ?assertEqual(ddoc(<<"bar">>), Resp) + after + stop(Ctx) + end. + + +no_cache_open_not_found_test() -> + Ctx = start(fun not_found/1), + try + Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>), + ?assertEqual(not_found(<<"bar">>), Resp) + after + stop(Ctx) + end. + + +no_cache_open_error_test() -> + Ctx = start(fun return_error/1), + try + Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>), + ?assertEqual(return_error(<<"bar">>), Resp) + after + stop(Ctx) + end. diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl new file mode 100644 index 000000000..c7379d26a --- /dev/null +++ b/src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl @@ -0,0 +1,46 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(ddoc_cache_open_error_test). + + +-include_lib("couch/include/couch_db.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include("ddoc_cache_test.hrl"). + + +start_couch() -> + Ctx = ddoc_cache_tutil:start_couch(), + meck:expect(fabric, open_doc, fun(_, ?FOOBAR, _) -> + erlang:error(test_kaboom) + end), + Ctx. + + +stop_couch(Ctx) -> + meck:unload(), + ddoc_cache_tutil:stop_couch(Ctx). + + +check_open_error_test_() -> + { + setup, + fun start_couch/0, + fun stop_couch/1, + ddoc_cache_tutil:with([ + {"handle_open_error", fun handle_open_error/1} + ]) + }. + + +handle_open_error({DbName, _}) -> + ?assertError(test_kaboom, ddoc_cache:open_doc(DbName, ?FOOBAR)). diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl new file mode 100644 index 000000000..73d644f71 --- /dev/null +++ b/src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl @@ -0,0 +1,107 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(ddoc_cache_open_test). + +-export([ + dbname/1, + ddocid/1, + recover/1, + insert/2 +]). + +-include_lib("couch/include/couch_db.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include("ddoc_cache_test.hrl"). + + +%% behaviour callbacks +dbname(DbName) -> + DbName. + + +ddocid(_) -> + no_ddocid. + + +recover({deleted, _DbName}) -> + erlang:error(database_does_not_exist); +recover(DbName) -> + ddoc_cache_entry_validation_funs:recover(DbName). + + +insert(_, _) -> + ok. + + +start_couch() -> + Ctx = ddoc_cache_tutil:start_couch(), + meck:new(ddoc_cache_entry_validation_funs, [passthrough]), + meck:expect(ddoc_cache_entry_validation_funs, recover, + ['_'], meck:passthrough()), + Ctx. + + +stop_couch(Ctx) -> + meck:unload(), + ddoc_cache_tutil:stop_couch(Ctx). + + +check_open_error_test_() -> + { + setup, + fun start_couch/0, + fun stop_couch/1, + ddoc_cache_tutil:with([ + {"should_return_database_does_not_exist", + fun should_return_database_does_not_exist/1}, + {"should_not_call_recover_when_database_does_not_exist", + fun should_not_call_recover_when_database_does_not_exist/1}, + {"should_call_recover_when_needed", + fun should_call_recover_when_needed/1}, + {"should_call_recover_when_needed", + fun should_not_crash_lru_process/1} + ]) + }. + + +should_return_database_does_not_exist({DbName, _}) -> + ?assertError( + database_does_not_exist, + ddoc_cache_lru:open({?MODULE, {deleted, DbName}})). + + +should_not_call_recover_when_database_does_not_exist({DbName, _}) -> + meck:reset(ddoc_cache_entry_validation_funs), + ?assertError( + database_does_not_exist, + ddoc_cache_lru:open({?MODULE, {deleted, DbName}})), + ?assertError( + timeout, + meck:wait(1, ddoc_cache_entry_validation_funs, recover, '_', 100)). + + +should_call_recover_when_needed({DbName, _}) -> + meck:reset(ddoc_cache_entry_validation_funs), + ddoc_cache_lru:open({?MODULE, DbName}), + ?assertEqual( + ok, + meck:wait(1, ddoc_cache_entry_validation_funs, recover, '_', 500)). + + +should_not_crash_lru_process({DbName, _}) -> + LRUPid = whereis(ddoc_cache_lru), + ?assert(is_process_alive(LRUPid)), + ?assertError( + database_does_not_exist, + ddoc_cache_lru:open({?MODULE, {deleted, DbName}})), + ?assert(is_process_alive(LRUPid)). diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_opener_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_opener_test.erl new file mode 100644 index 000000000..c3846360c --- /dev/null +++ b/src/ddoc_cache/test/eunit/ddoc_cache_opener_test.erl @@ -0,0 +1,33 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(ddoc_cache_opener_test). + + +-include_lib("couch/include/couch_db.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include("ddoc_cache_test.hrl"). + + +empty_hull_test() -> + InitExpect = {ok, nil}, + TermExpect = ok, + CallExpect = {stop, {invalid_call, foo}, {invalid_call, foo}, baz}, + CastExpect = {stop, {invalid_cast, foo}, bar}, + InfoExpect = {stop, {invalid_info, foo}, bar}, + CCExpect = {ok, bar}, + ?assertEqual(InitExpect, ddoc_cache_opener:init(foo)), + ?assertEqual(TermExpect, ddoc_cache_opener:terminate(foo, bar)), + ?assertEqual(CallExpect, ddoc_cache_opener:handle_call(foo, bar, baz)), + ?assertEqual(CastExpect, ddoc_cache_opener:handle_cast(foo, bar)), + ?assertEqual(InfoExpect, ddoc_cache_opener:handle_info(foo, bar)), + ?assertEqual(CCExpect, ddoc_cache_opener:code_change(foo, bar, baz)). diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl new file mode 100644 index 000000000..24ae346d4 --- /dev/null +++ b/src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl @@ -0,0 +1,174 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(ddoc_cache_refresh_test). + + +-export([ + recover/1 +]). + + +-include_lib("couch/include/couch_db.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include("ddoc_cache_test.hrl"). + + +recover(DbName) -> + {ok, {DbName, rand_string()}}. + + +start_couch() -> + Ctx = ddoc_cache_tutil:start_couch(), + meck:new(ddoc_cache_ev, [passthrough]), + Ctx. + + +stop_couch(Ctx) -> + meck:unload(), + ddoc_cache_tutil:stop_couch(Ctx). + + +check_refresh_test_() -> + { + setup, + fun start_couch/0, + fun stop_couch/1, + ddoc_cache_tutil:with([ + {"refresh_ddoc", fun refresh_ddoc/1}, + {"refresh_ddoc_rev", fun refresh_ddoc_rev/1}, + {"refresh_vdu", fun refresh_vdu/1}, + {"refresh_custom", fun refresh_custom/1}, + {"refresh_multiple", fun refresh_multiple/1}, + {"check_upgrade_clause", fun check_upgrade_clause/1} + ]) + }. + + +refresh_ddoc({DbName, _}) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR), + meck:wait(ddoc_cache_ev, event, [started, '_'], 1000), + meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000), + + ?assertEqual(2, ets:info(?CACHE, size)), + [#entry{key = Key, val = DDoc}, _] = lists:sort(ets:tab2list(?CACHE)), + NewDDoc = DDoc#doc{ + body = {[{<<"foo">>, <<"baz">>}]} + }, + {ok, {Depth, RevId}} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]), + Expect = NewDDoc#doc{ + revs = {Depth, [RevId | element(2, DDoc#doc.revs)]} + }, + meck:wait(ddoc_cache_ev, event, [updated, {Key, Expect}], 1000), + ?assertMatch({ok, Expect}, ddoc_cache:open_doc(DbName, ?FOOBAR)), + ?assertEqual(2, ets:info(?CACHE, size)). + + +refresh_ddoc_rev({DbName, _}) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR), + {ok, RevDDoc} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev), + + meck:wait(ddoc_cache_ev, event, [started, '_'], 1000), + meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000), + + [_, #entry{key = Key, val = DDoc}] = lists:sort(ets:tab2list(?CACHE)), + NewDDoc = DDoc#doc{ + body = {[{<<"foo">>, <<"kazam">>}]} + }, + {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]), + % We pass the rev explicitly so we assert that we're + % getting the same original response from the cache + meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000), + ?assertMatch({ok, RevDDoc}, ddoc_cache:open_doc(DbName, ?FOOBAR, Rev)), + ?assertEqual(2, ets:info(?CACHE, size)). + + +refresh_vdu({DbName, _}) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + {ok, [_]} = ddoc_cache:open_validation_funs(DbName), + [#entry{key = Key}] = ets:tab2list(?CACHE), + {ok, DDoc} = fabric:open_doc(DbName, ?VDU, [?ADMIN_CTX]), + {ok, _} = fabric:update_doc(DbName, DDoc#doc{body = {[]}}, [?ADMIN_CTX]), + meck:wait(ddoc_cache_ev, event, [updated, {Key, []}], 1000), + ?assertMatch({ok, []}, ddoc_cache:open_validation_funs(DbName)), + ?assertEqual(1, ets:info(?CACHE, size)). + + +refresh_custom({DbName, _}) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + {ok, Resp1} = ddoc_cache:open_custom(DbName, ?MODULE), + {ok, DDoc} = fabric:open_doc(DbName, ?VDU, [?CUSTOM]), + {ok, _} = fabric:update_doc(DbName, DDoc#doc{body = {[]}}, [?ADMIN_CTX]), + meck:wait(ddoc_cache_ev, event, [updated, '_'], 1000), + ?assertNotEqual({ok, Resp1}, ddoc_cache:open_custom(DbName, ?MODULE)), + ?assertEqual(1, ets:info(?CACHE, size)). + + +refresh_multiple({DbName, _}) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + Rev = ddoc_cache_tutil:get_rev(DbName, ?FOOBAR), + {ok, DDoc} = ddoc_cache:open_doc(DbName, ?FOOBAR), + {ok, DDoc} = ddoc_cache:open_doc(DbName, ?FOOBAR, Rev), + ?assertEqual(2, ets:info(?CACHE, size)), + % Relying on the sort order of entry keys to make + % sure our entries line up for this test + [ + #entry{key = NoRevKey, val = DDoc}, + #entry{key = RevKey, val = DDoc} + ] = lists:sort(ets:tab2list(?CACHE)), + NewDDoc = DDoc#doc{ + body = {[{<<"foo">>, <<"kalamazoo">>}]} + }, + {ok, {Depth, RevId}} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]), + Updated = NewDDoc#doc{ + revs = {Depth, [RevId | element(2, DDoc#doc.revs)]} + }, + meck:wait(ddoc_cache_ev, event, [update_noop, RevKey], 1000), + meck:wait(ddoc_cache_ev, event, [updated, {NoRevKey, Updated}], 1000), + % We pass the rev explicitly so we assert that we're + % getting the same original response from the cache + ?assertEqual({ok, Updated}, ddoc_cache:open_doc(DbName, ?FOOBAR)), + ?assertEqual({ok, DDoc}, ddoc_cache:open_doc(DbName, ?FOOBAR, Rev)), + ?assertEqual(2, ets:info(?CACHE, size)). + + +check_upgrade_clause({DbName, _}) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR), + [#entry{key = Key}] = ets:tab2list(?CACHE), + gen_server:cast(ddoc_cache_opener, {do_evict, DbName, [?FOOBAR]}), + meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000). + + +rand_string() -> + Bin = crypto:strong_rand_bytes(8), + to_hex(Bin, []). + + +to_hex(<<>>, Acc) -> + list_to_binary(lists:reverse(Acc)); +to_hex(<>, Acc) -> + to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]). + + +hexdig(C) when C >= 0, C =< 9 -> + C + $0; +hexdig(C) when C >= 10, C =< 15 -> + C + $A - 10. diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl new file mode 100644 index 000000000..e40518529 --- /dev/null +++ b/src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl @@ -0,0 +1,224 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(ddoc_cache_remove_test). + + +-export([ + recover/1 +]). + + +-include_lib("couch/include/couch_db.hrl"). +-include_lib("mem3/include/mem3.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include("ddoc_cache_test.hrl"). + + +recover(DbName) -> + {ok, #doc{body = {Body}}} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]), + case couch_util:get_value(<<"status">>, Body) of + <<"ok">> -> + {ok, yay}; + <<"not_ok">> -> + {ruh, roh}; + <<"error">> -> + erlang:error(thpppt) + end. + + +start_couch() -> + Ctx = ddoc_cache_tutil:start_couch(), + meck:new(ddoc_cache_ev, [passthrough]), + Ctx. + + +stop_couch(Ctx) -> + meck:unload(), + ddoc_cache_tutil:stop_couch(Ctx). + + +check_refresh_test_() -> + { + setup, + fun start_couch/0, + fun stop_couch/1, + ddoc_cache_tutil:with([ + {"remove_ddoc", fun remove_ddoc/1}, + {"remove_ddoc_rev", fun remove_ddoc_rev/1}, + {"remove_ddoc_rev_only", fun remove_ddoc_rev_only/1}, + {"remove_custom_not_ok", fun remove_custom_not_ok/1}, + {"remove_custom_error", fun remove_custom_error/1} + ]) + }. + + +remove_ddoc({DbName, _}) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + ?assertEqual(0, ets:info(?CACHE, size)), + {ok, _} = ddoc_cache:open_doc(DbName, ?FOOBAR), + + meck:wait(ddoc_cache_ev, event, [started, '_'], 1000), + meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000), + + [#entry{val = DDoc}, #entry{val = DDoc}] = ets:tab2list(?CACHE), + {Depth, [RevId | _]} = DDoc#doc.revs, + NewDDoc = DDoc#doc{ + deleted = true, + body = {[]} + }, + {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]), + + DDocIdKey = {ddoc_cache_entry_ddocid, {DbName, ?FOOBAR}}, + Rev = {Depth, RevId}, + DDocIdRevKey = {ddoc_cache_entry_ddocid_rev, {DbName, ?FOOBAR, Rev}}, + meck:wait(ddoc_cache_ev, event, [removed, DDocIdKey], 1000), + meck:wait(ddoc_cache_ev, event, [update_noop, DDocIdRevKey], 1000), + + ?assertMatch({not_found, deleted}, ddoc_cache:open_doc(DbName, ?FOOBAR)), + ?assertEqual(1, ets:info(?CACHE, size)). + + +remove_ddoc_rev({DbName, _}) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + Rev = ddoc_cache_tutil:get_rev(DbName, ?VDU), + {ok, _} = ddoc_cache:open_doc(DbName, ?VDU, Rev), + + meck:wait(ddoc_cache_ev, event, [started, '_'], 1000), + meck:wait(ddoc_cache_ev, event, [default_started, '_'], 1000), + + % Notice the sort so that we know we're getting the + % revid version second. + [_, #entry{key = Key, val = DDoc, pid = Pid}] + = lists:sort(ets:tab2list(?CACHE)), + + NewDDoc = DDoc#doc{ + body = {[{<<"an">>, <<"update">>}]} + }, + {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]), + meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000), + % Compact the database so that the old rev is removed + lists:foreach(fun(Shard) -> + do_compact(Shard#shard.name) + end, mem3:local_shards(DbName)), + % Trigger a refresh rather than wait for the timeout + ddoc_cache_entry:refresh(Pid), + meck:wait(ddoc_cache_ev, event, [removed, Key], 1000), + ?assertMatch( + {{not_found, missing}, _}, + ddoc_cache:open_doc(DbName, ?VDU, Rev) + ), + ?assertEqual(1, ets:info(?CACHE, size)). + + +remove_ddoc_rev_only({DbName, _}) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + Rev = ddoc_cache_tutil:get_rev(DbName, ?VDU), + {ok, _} = ddoc_cache:open_doc(DbName, ?VDU), + {ok, _} = ddoc_cache:open_doc(DbName, ?VDU, Rev), + % Relying on the sort order of keys to keep + % these lined up for testing + [ + #entry{key = NoRevKey, val = DDoc, pid = NoRevPid}, + #entry{key = RevKey, val = DDoc, pid = RevPid} + ] = lists:sort(ets:tab2list(?CACHE)), + NewDDoc = DDoc#doc{ + body = {[{<<"new">>, <<"awesomeness">>}]} + }, + {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]), + meck:wait(ddoc_cache_ev, event, [updated, '_'], 1000), + meck:wait(ddoc_cache_ev, event, [update_noop, RevKey], 1000), + % Compact the database so that the old rev is removed + lists:foreach(fun(Shard) -> + do_compact(Shard#shard.name) + end, mem3:local_shards(DbName)), + % Trigger a refresh rather than wait for the timeout + ddoc_cache_entry:refresh(NoRevPid), + ddoc_cache_entry:refresh(RevPid), + meck:wait(ddoc_cache_ev, event, [update_noop, NoRevKey], 1000), + meck:wait(ddoc_cache_ev, event, [removed, RevKey], 1000), + ?assertMatch({ok, _}, ddoc_cache:open_doc(DbName, ?VDU)), + ?assertMatch( + {{not_found, missing}, _}, + ddoc_cache:open_doc(DbName, ?VDU, Rev) + ), + ?assertEqual(1, ets:info(?CACHE, size)). + +remove_custom_not_ok({DbName, _}) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + init_custom_ddoc(DbName), + {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE), + [#entry{key = Key}] = ets:tab2list(?CACHE), + {ok, DDoc} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]), + NewDDoc = DDoc#doc{ + body = {[{<<"status">>, <<"not_ok">>}]} + }, + {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]), + meck:wait(ddoc_cache_ev, event, [removed, Key], 1000), + ?assertEqual({ruh, roh}, ddoc_cache:open_custom(DbName, ?MODULE)), + ?assertEqual(0, ets:info(?CACHE, size)). + + +remove_custom_error({DbName, _}) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + init_custom_ddoc(DbName), + {ok, _} = ddoc_cache:open_custom(DbName, ?MODULE), + [#entry{key = Key}] = ets:tab2list(?CACHE), + {ok, DDoc} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]), + NewDDoc = DDoc#doc{ + body = {[{<<"status">>, <<"error">>}]} + }, + {ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]), + meck:wait(ddoc_cache_ev, event, [removed, Key], 1000), + ?assertError(thpppt, ddoc_cache:open_custom(DbName, ?MODULE)), + ?assertEqual(0, ets:info(?CACHE, size)). + + +init_custom_ddoc(DbName) -> + Body = {[{<<"status">>, <<"ok">>}]}, + {ok, Doc} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]), + NewDoc = Doc#doc{body = Body}, + {ok, _} = fabric:update_doc(DbName, NewDoc, [?ADMIN_CTX]). + + +do_compact(ShardName) -> + {ok, Db} = couch_db:open_int(ShardName, []), + try + {ok, Pid} = couch_db:start_compact(Db), + Ref = erlang:monitor(process, Pid), + receive + {'DOWN', Ref, _, _, _} -> + ok + end + after + couch_db:close(Db) + end, + wait_for_compaction(ShardName). + + +wait_for_compaction(ShardName) -> + {ok, Db} = couch_db:open_int(ShardName, []), + CompactRunning = try + {ok, Info} = couch_db:get_db_info(Db), + couch_util:get_value(compact_running, Info) + after + couch_db:close(Db) + end, + if not CompactRunning -> ok; true -> + timer:sleep(100), + wait_for_compaction(ShardName) + end. \ No newline at end of file diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_test.hrl b/src/ddoc_cache/test/eunit/ddoc_cache_test.hrl new file mode 100644 index 000000000..73f7bc217 --- /dev/null +++ b/src/ddoc_cache/test/eunit/ddoc_cache_test.hrl @@ -0,0 +1,26 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + + +-define(CACHE, ddoc_cache_entries). +-define(LRU, ddoc_cache_lru). +-define(OPENERS, ddoc_cache_openers). + +-define(FOOBAR, <<"_design/foobar">>). +-define(VDU, <<"_design/vdu">>). +-define(CUSTOM, <<"_design/custom">>). + +-record(entry, { + key, + val, + pid +}). diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl b/src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl new file mode 100644 index 000000000..b34d4b163 --- /dev/null +++ b/src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl @@ -0,0 +1,111 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(ddoc_cache_tutil). + + +-export([ + start_couch/0, + start_couch/1, + stop_couch/1, + clear/0, + get_rev/2, + ddocs/0, + purge_modules/0, + with/1 +]). + + +-include_lib("couch/include/couch_db.hrl"). +-include_lib("couch/include/couch_eunit.hrl"). + + +start_couch() -> + start_couch([{write_ddocs, true}]). + + +start_couch(Options) -> + WriteDDocs = couch_util:get_value(write_ddocs, Options, true), + purge_modules(), + Ctx = test_util:start_couch(?CONFIG_CHAIN, [chttpd, ddoc_cache]), + TmpDb = ?tempdb(), + ok = fabric:create_db(TmpDb, [{q, "1"}, {n, "1"}]), + if not WriteDDocs -> ok; true -> + {ok, _} = fabric:update_docs(TmpDb, ddocs(), [?ADMIN_CTX]) + end, + {TmpDb, Ctx}. + + +stop_couch({_TmpDb, Ctx}) -> + test_util:stop_couch(Ctx). + + +clear() -> + application:stop(ddoc_cache), + application:start(ddoc_cache). + + +get_rev(DbName, DDocId) -> + {_, Ref} = erlang:spawn_monitor(fun() -> + {ok, #doc{revs = Revs}} = fabric:open_doc(DbName, DDocId, [?ADMIN_CTX]), + {Depth, [RevId | _]} = Revs, + exit({Depth, RevId}) + end), + receive + {'DOWN', Ref, _, _, Rev} -> Rev + end. + + +ddocs() -> + FooBar = #doc{ + id = <<"_design/foobar">>, + body = {[ + {<<"foo">>, <<"bar">>} + ]} + }, + VDU = #doc{ + id = <<"_design/vdu">>, + body = {[ + {<<"validate_doc_update">>, <<"function(doc) {return;}">>} + ]} + }, + Custom = #doc{ + id = <<"_design/custom">>, + body = {[ + {<<"status">>, <<"ok">>}, + {<<"custom">>, <<"hotrod">>} + ]} + }, + [FooBar, VDU, Custom]. + + +purge_modules() -> + case application:get_key(ddoc_cache, modules) of + {ok, Mods} -> + lists:foreach(fun(Mod) -> + case code:which(Mod) of + cover_compiled -> + ok; + _ -> + code:delete(Mod), + code:purge(Mod) + end + end, Mods); + undefined -> + ok + end. + +%% eunit implementation of {with, Tests} doesn't detect test name correctly +with(Tests) -> + fun(ArgsTuple) -> + [{Name, ?_test(Fun(ArgsTuple))} || {Name, Fun} <- Tests] + end. diff --git a/src/fabric/test/eunit/fabric_rpc_purge_tests.erl b/src/fabric/test/eunit/fabric_rpc_purge_tests.erl new file mode 100644 index 000000000..4eafb2bc4 --- /dev/null +++ b/src/fabric/test/eunit/fabric_rpc_purge_tests.erl @@ -0,0 +1,307 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(fabric_rpc_purge_tests). + + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + + +-define(TDEF(A), {A, fun A/1}). + +% TODO: Add tests: +% - filter some updates +% - allow for an update that was filtered by a node +% - ignore lagging nodes + +main_test_() -> + { + setup, + spawn, + fun setup_all/0, + fun teardown_all/1, + [ + { + foreach, + fun setup_no_purge/0, + fun teardown_no_purge/1, + lists:map(fun wrap/1, [ + ?TDEF(t_no_purge_no_filter) + ]) + }, + { + foreach, + fun setup_single_purge/0, + fun teardown_single_purge/1, + lists:map(fun wrap/1, [ + ?TDEF(t_filter), + ?TDEF(t_filter_unknown_node), + ?TDEF(t_filter_local_node), + ?TDEF(t_no_filter_old_node), + ?TDEF(t_no_filter_different_node), + ?TDEF(t_no_filter_after_repl) + ]) + }, + { + foreach, + fun setup_multi_purge/0, + fun teardown_multi_purge/1, + lists:map(fun wrap/1, [ + ?TDEF(t_filter), + ?TDEF(t_filter_unknown_node), + ?TDEF(t_filter_local_node), + ?TDEF(t_no_filter_old_node), + ?TDEF(t_no_filter_different_node), + ?TDEF(t_no_filter_after_repl) + ]) + } + ] + }. + + +setup_all() -> + test_util:start_couch(). + + +teardown_all(Ctx) -> + test_util:stop_couch(Ctx). + + +setup_no_purge() -> + {ok, Db} = create_db(), + populate_db(Db), + couch_db:name(Db). + + +teardown_no_purge(DbName) -> + ok = couch_server:delete(DbName, []). + + +setup_single_purge() -> + DbName = setup_no_purge(), + DocId = <<"0003">>, + {ok, OldDoc} = open_doc(DbName, DocId), + purge_doc(DbName, DocId), + {DbName, DocId, OldDoc, 1}. + + +teardown_single_purge({DbName, _, _, _}) -> + teardown_no_purge(DbName). + + +setup_multi_purge() -> + DbName = setup_no_purge(), + DocId = <<"0003">>, + {ok, OldDoc} = open_doc(DbName, DocId), + lists:foreach(fun(I) -> + PDocId = iolist_to_binary(io_lib:format("~4..0b", [I])), + purge_doc(DbName, PDocId) + end, lists:seq(1, 5)), + {DbName, DocId, OldDoc, 3}. + + +teardown_multi_purge(Ctx) -> + teardown_single_purge(Ctx). + + +t_no_purge_no_filter(DbName) -> + DocId = <<"0003">>, + + {ok, OldDoc} = open_doc(DbName, DocId), + NewDoc = create_update(OldDoc, 2), + + rpc_update_doc(DbName, NewDoc), + + {ok, CurrDoc} = open_doc(DbName, DocId), + ?assert(CurrDoc /= OldDoc), + ?assert(CurrDoc == NewDoc). + + +t_filter({DbName, DocId, OldDoc, _PSeq}) -> + ?assertEqual({not_found, missing}, open_doc(DbName, DocId)), + create_purge_checkpoint(DbName, 0), + + rpc_update_doc(DbName, OldDoc), + + ?assertEqual({not_found, missing}, open_doc(DbName, DocId)). + + +t_filter_unknown_node({DbName, DocId, OldDoc, _PSeq}) -> + % Unknown nodes are assumed to start at PurgeSeq = 0 + ?assertEqual({not_found, missing}, open_doc(DbName, DocId)), + create_purge_checkpoint(DbName, 0), + + {Pos, [Rev | _]} = OldDoc#doc.revs, + RROpt = {read_repair, [{'blargh@127.0.0.1', [{Pos, Rev}]}]}, + rpc_update_doc(DbName, OldDoc, [RROpt]), + + ?assertEqual({not_found, missing}, open_doc(DbName, DocId)). + + +t_no_filter_old_node({DbName, DocId, OldDoc, PSeq}) -> + ?assertEqual({not_found, missing}, open_doc(DbName, DocId)), + create_purge_checkpoint(DbName, PSeq), + + % The random UUID is to generate a badarg exception when + % we try and convert it to an existing atom. + create_purge_checkpoint(DbName, 0, couch_uuids:random()), + + rpc_update_doc(DbName, OldDoc), + + ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)). + + +t_no_filter_different_node({DbName, DocId, OldDoc, PSeq}) -> + ?assertEqual({not_found, missing}, open_doc(DbName, DocId)), + create_purge_checkpoint(DbName, PSeq), + + % Create a valid purge for a different node + TgtNode = list_to_binary(atom_to_list('notfoo@127.0.0.1')), + create_purge_checkpoint(DbName, 0, TgtNode), + + rpc_update_doc(DbName, OldDoc), + + ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)). + + +t_filter_local_node({DbName, DocId, OldDoc, PSeq}) -> + ?assertEqual({not_found, missing}, open_doc(DbName, DocId)), + create_purge_checkpoint(DbName, PSeq), + + % Create a valid purge for a different node + TgtNode = list_to_binary(atom_to_list('notfoo@127.0.0.1')), + create_purge_checkpoint(DbName, 0, TgtNode), + + % Add a local node rev to the list of node revs. It should + % be filtered out + {Pos, [Rev | _]} = OldDoc#doc.revs, + RROpts = [{read_repair, [ + {tgt_node(), [{Pos, Rev}]}, + {node(), [{1, <<"123">>}]} + ]}], + rpc_update_doc(DbName, OldDoc, RROpts), + + ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)). + + +t_no_filter_after_repl({DbName, DocId, OldDoc, PSeq}) -> + ?assertEqual({not_found, missing}, open_doc(DbName, DocId)), + create_purge_checkpoint(DbName, PSeq), + + rpc_update_doc(DbName, OldDoc), + + ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)). + + +wrap({Name, Fun}) -> + fun(Arg) -> + {timeout, 60, {atom_to_list(Name), fun() -> + process_flag(trap_exit, true), + Fun(Arg) + end}} + end. + + +create_db() -> + DbName = ?tempdb(), + couch_db:create(DbName, [?ADMIN_CTX]). + + +populate_db(Db) -> + Docs = lists:map(fun(Idx) -> + DocId = lists:flatten(io_lib:format("~4..0b", [Idx])), + #doc{ + id = list_to_binary(DocId), + body = {[{<<"int">>, Idx}, {<<"vsn">>, 2}]} + } + end, lists:seq(1, 100)), + {ok, _} = couch_db:update_docs(Db, Docs). + + +open_doc(DbName, DocId) -> + couch_util:with_db(DbName, fun(Db) -> + couch_db:open_doc(Db, DocId, []) + end). + + +create_update(Doc, NewVsn) -> + #doc{ + id = DocId, + revs = {Pos, [Rev | _] = Revs}, + body = {Props} + } = Doc, + NewProps = lists:keyreplace(<<"vsn">>, 1, Props, {<<"vsn">>, NewVsn}), + NewRev = crypto:hash(md5, term_to_binary({DocId, Rev, {NewProps}})), + Doc#doc{ + revs = {Pos + 1, [NewRev | Revs]}, + body = {NewProps} + }. + + +purge_doc(DbName, DocId) -> + {ok, Doc} = open_doc(DbName, DocId), + {Pos, [Rev | _]} = Doc#doc.revs, + PInfo = {couch_uuids:random(), DocId, [{Pos, Rev}]}, + Resp = couch_util:with_db(DbName, fun(Db) -> + couch_db:purge_docs(Db, [PInfo], []) + end), + ?assertEqual({ok, [{ok, [{Pos, Rev}]}]}, Resp). + + +create_purge_checkpoint(DbName, PurgeSeq) -> + create_purge_checkpoint(DbName, PurgeSeq, tgt_node_bin()). + + +create_purge_checkpoint(DbName, PurgeSeq, TgtNode) when is_binary(TgtNode) -> + Resp = couch_util:with_db(DbName, fun(Db) -> + SrcUUID = couch_db:get_uuid(Db), + TgtUUID = couch_uuids:random(), + CPDoc = #doc{ + id = mem3_rep:make_purge_id(SrcUUID, TgtUUID), + body = {[ + {<<"target_node">>, TgtNode}, + {<<"purge_seq">>, PurgeSeq} + ]} + }, + couch_db:update_docs(Db, [CPDoc], []) + end), + ?assertMatch({ok, [_]}, Resp). + + +rpc_update_doc(DbName, Doc) -> + {Pos, [Rev | _]} = Doc#doc.revs, + RROpt = {read_repair, [{tgt_node(), [{Pos, Rev}]}]}, + rpc_update_doc(DbName, Doc, [RROpt]). + + +rpc_update_doc(DbName, Doc, Opts) -> + Ref = erlang:make_ref(), + put(rexi_from, {self(), Ref}), + fabric_rpc:update_docs(DbName, [Doc], Opts), + Reply = test_util:wait(fun() -> + receive + {Ref, Reply} -> + Reply + after 0 -> + wait + end + end), + ?assertEqual({ok, []}, Reply). + + +tgt_node() -> + 'foo@127.0.0.1'. + + +tgt_node_bin() -> + iolist_to_binary(atom_to_list(tgt_node())). diff --git a/src/fabric/test/fabric_rpc_purge_tests.erl b/src/fabric/test/fabric_rpc_purge_tests.erl deleted file mode 100644 index 4eafb2bc4..000000000 --- a/src/fabric/test/fabric_rpc_purge_tests.erl +++ /dev/null @@ -1,307 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(fabric_rpc_purge_tests). - - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - - --define(TDEF(A), {A, fun A/1}). - -% TODO: Add tests: -% - filter some updates -% - allow for an update that was filtered by a node -% - ignore lagging nodes - -main_test_() -> - { - setup, - spawn, - fun setup_all/0, - fun teardown_all/1, - [ - { - foreach, - fun setup_no_purge/0, - fun teardown_no_purge/1, - lists:map(fun wrap/1, [ - ?TDEF(t_no_purge_no_filter) - ]) - }, - { - foreach, - fun setup_single_purge/0, - fun teardown_single_purge/1, - lists:map(fun wrap/1, [ - ?TDEF(t_filter), - ?TDEF(t_filter_unknown_node), - ?TDEF(t_filter_local_node), - ?TDEF(t_no_filter_old_node), - ?TDEF(t_no_filter_different_node), - ?TDEF(t_no_filter_after_repl) - ]) - }, - { - foreach, - fun setup_multi_purge/0, - fun teardown_multi_purge/1, - lists:map(fun wrap/1, [ - ?TDEF(t_filter), - ?TDEF(t_filter_unknown_node), - ?TDEF(t_filter_local_node), - ?TDEF(t_no_filter_old_node), - ?TDEF(t_no_filter_different_node), - ?TDEF(t_no_filter_after_repl) - ]) - } - ] - }. - - -setup_all() -> - test_util:start_couch(). - - -teardown_all(Ctx) -> - test_util:stop_couch(Ctx). - - -setup_no_purge() -> - {ok, Db} = create_db(), - populate_db(Db), - couch_db:name(Db). - - -teardown_no_purge(DbName) -> - ok = couch_server:delete(DbName, []). - - -setup_single_purge() -> - DbName = setup_no_purge(), - DocId = <<"0003">>, - {ok, OldDoc} = open_doc(DbName, DocId), - purge_doc(DbName, DocId), - {DbName, DocId, OldDoc, 1}. - - -teardown_single_purge({DbName, _, _, _}) -> - teardown_no_purge(DbName). - - -setup_multi_purge() -> - DbName = setup_no_purge(), - DocId = <<"0003">>, - {ok, OldDoc} = open_doc(DbName, DocId), - lists:foreach(fun(I) -> - PDocId = iolist_to_binary(io_lib:format("~4..0b", [I])), - purge_doc(DbName, PDocId) - end, lists:seq(1, 5)), - {DbName, DocId, OldDoc, 3}. - - -teardown_multi_purge(Ctx) -> - teardown_single_purge(Ctx). - - -t_no_purge_no_filter(DbName) -> - DocId = <<"0003">>, - - {ok, OldDoc} = open_doc(DbName, DocId), - NewDoc = create_update(OldDoc, 2), - - rpc_update_doc(DbName, NewDoc), - - {ok, CurrDoc} = open_doc(DbName, DocId), - ?assert(CurrDoc /= OldDoc), - ?assert(CurrDoc == NewDoc). - - -t_filter({DbName, DocId, OldDoc, _PSeq}) -> - ?assertEqual({not_found, missing}, open_doc(DbName, DocId)), - create_purge_checkpoint(DbName, 0), - - rpc_update_doc(DbName, OldDoc), - - ?assertEqual({not_found, missing}, open_doc(DbName, DocId)). - - -t_filter_unknown_node({DbName, DocId, OldDoc, _PSeq}) -> - % Unknown nodes are assumed to start at PurgeSeq = 0 - ?assertEqual({not_found, missing}, open_doc(DbName, DocId)), - create_purge_checkpoint(DbName, 0), - - {Pos, [Rev | _]} = OldDoc#doc.revs, - RROpt = {read_repair, [{'blargh@127.0.0.1', [{Pos, Rev}]}]}, - rpc_update_doc(DbName, OldDoc, [RROpt]), - - ?assertEqual({not_found, missing}, open_doc(DbName, DocId)). - - -t_no_filter_old_node({DbName, DocId, OldDoc, PSeq}) -> - ?assertEqual({not_found, missing}, open_doc(DbName, DocId)), - create_purge_checkpoint(DbName, PSeq), - - % The random UUID is to generate a badarg exception when - % we try and convert it to an existing atom. - create_purge_checkpoint(DbName, 0, couch_uuids:random()), - - rpc_update_doc(DbName, OldDoc), - - ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)). - - -t_no_filter_different_node({DbName, DocId, OldDoc, PSeq}) -> - ?assertEqual({not_found, missing}, open_doc(DbName, DocId)), - create_purge_checkpoint(DbName, PSeq), - - % Create a valid purge for a different node - TgtNode = list_to_binary(atom_to_list('notfoo@127.0.0.1')), - create_purge_checkpoint(DbName, 0, TgtNode), - - rpc_update_doc(DbName, OldDoc), - - ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)). - - -t_filter_local_node({DbName, DocId, OldDoc, PSeq}) -> - ?assertEqual({not_found, missing}, open_doc(DbName, DocId)), - create_purge_checkpoint(DbName, PSeq), - - % Create a valid purge for a different node - TgtNode = list_to_binary(atom_to_list('notfoo@127.0.0.1')), - create_purge_checkpoint(DbName, 0, TgtNode), - - % Add a local node rev to the list of node revs. It should - % be filtered out - {Pos, [Rev | _]} = OldDoc#doc.revs, - RROpts = [{read_repair, [ - {tgt_node(), [{Pos, Rev}]}, - {node(), [{1, <<"123">>}]} - ]}], - rpc_update_doc(DbName, OldDoc, RROpts), - - ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)). - - -t_no_filter_after_repl({DbName, DocId, OldDoc, PSeq}) -> - ?assertEqual({not_found, missing}, open_doc(DbName, DocId)), - create_purge_checkpoint(DbName, PSeq), - - rpc_update_doc(DbName, OldDoc), - - ?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)). - - -wrap({Name, Fun}) -> - fun(Arg) -> - {timeout, 60, {atom_to_list(Name), fun() -> - process_flag(trap_exit, true), - Fun(Arg) - end}} - end. - - -create_db() -> - DbName = ?tempdb(), - couch_db:create(DbName, [?ADMIN_CTX]). - - -populate_db(Db) -> - Docs = lists:map(fun(Idx) -> - DocId = lists:flatten(io_lib:format("~4..0b", [Idx])), - #doc{ - id = list_to_binary(DocId), - body = {[{<<"int">>, Idx}, {<<"vsn">>, 2}]} - } - end, lists:seq(1, 100)), - {ok, _} = couch_db:update_docs(Db, Docs). - - -open_doc(DbName, DocId) -> - couch_util:with_db(DbName, fun(Db) -> - couch_db:open_doc(Db, DocId, []) - end). - - -create_update(Doc, NewVsn) -> - #doc{ - id = DocId, - revs = {Pos, [Rev | _] = Revs}, - body = {Props} - } = Doc, - NewProps = lists:keyreplace(<<"vsn">>, 1, Props, {<<"vsn">>, NewVsn}), - NewRev = crypto:hash(md5, term_to_binary({DocId, Rev, {NewProps}})), - Doc#doc{ - revs = {Pos + 1, [NewRev | Revs]}, - body = {NewProps} - }. - - -purge_doc(DbName, DocId) -> - {ok, Doc} = open_doc(DbName, DocId), - {Pos, [Rev | _]} = Doc#doc.revs, - PInfo = {couch_uuids:random(), DocId, [{Pos, Rev}]}, - Resp = couch_util:with_db(DbName, fun(Db) -> - couch_db:purge_docs(Db, [PInfo], []) - end), - ?assertEqual({ok, [{ok, [{Pos, Rev}]}]}, Resp). - - -create_purge_checkpoint(DbName, PurgeSeq) -> - create_purge_checkpoint(DbName, PurgeSeq, tgt_node_bin()). - - -create_purge_checkpoint(DbName, PurgeSeq, TgtNode) when is_binary(TgtNode) -> - Resp = couch_util:with_db(DbName, fun(Db) -> - SrcUUID = couch_db:get_uuid(Db), - TgtUUID = couch_uuids:random(), - CPDoc = #doc{ - id = mem3_rep:make_purge_id(SrcUUID, TgtUUID), - body = {[ - {<<"target_node">>, TgtNode}, - {<<"purge_seq">>, PurgeSeq} - ]} - }, - couch_db:update_docs(Db, [CPDoc], []) - end), - ?assertMatch({ok, [_]}, Resp). - - -rpc_update_doc(DbName, Doc) -> - {Pos, [Rev | _]} = Doc#doc.revs, - RROpt = {read_repair, [{tgt_node(), [{Pos, Rev}]}]}, - rpc_update_doc(DbName, Doc, [RROpt]). - - -rpc_update_doc(DbName, Doc, Opts) -> - Ref = erlang:make_ref(), - put(rexi_from, {self(), Ref}), - fabric_rpc:update_docs(DbName, [Doc], Opts), - Reply = test_util:wait(fun() -> - receive - {Ref, Reply} -> - Reply - after 0 -> - wait - end - end), - ?assertEqual({ok, []}, Reply). - - -tgt_node() -> - 'foo@127.0.0.1'. - - -tgt_node_bin() -> - iolist_to_binary(atom_to_list(tgt_node())). diff --git a/src/global_changes/test/eunit/global_changes_hooks_tests.erl b/src/global_changes/test/eunit/global_changes_hooks_tests.erl new file mode 100644 index 000000000..23fa2c87f --- /dev/null +++ b/src/global_changes/test/eunit/global_changes_hooks_tests.erl @@ -0,0 +1,156 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(global_changes_hooks_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-export([allowed_owner/2]). + +-define(t2l(V), lists:flatten(io_lib:format("~p", [V]))). + +start() -> + Ctx = test_util:start_couch([chttpd, global_changes]), + DbName = ?tempdb(), + ok = fabric:create_db(DbName, [?ADMIN_CTX]), + application:set_env(global_changes, dbname, DbName), + {Ctx, DbName}. + +stop({Ctx, DbName}) -> + ok = fabric:delete_db(DbName, [?ADMIN_CTX]), + test_util:stop_couch(Ctx), + ok. + +setup(default) -> + add_admin("admin", <<"pass">>), + config:delete("couch_httpd_auth", "authentication_redirect", false), + config:set("couch_httpd_auth", "require_valid_user", "false", false), + get_host(); +setup(A) -> + Host = setup(default), + ok = config:set("global_changes", "allowed_owner", + ?t2l({?MODULE, allowed_owner, A}), false), + Host. + +teardown(_) -> + delete_admin("admin"), + config:delete("global_changes", "allowed_owner", false), + ok. + +allowed_owner(_Req, "throw") -> + throw({unauthorized, <<"Exception thrown.">>}); +allowed_owner(_Req, "pass") -> + "super". + +allowed_owner_hook_test_() -> + { + "Check allowed_owner hook", + { + setup, + fun start/0, fun stop/1, + [ + disabled_allowed_owner_integration_point(), + enabled_allowed_owner_integration_point() + ] + } + }. + +disabled_allowed_owner_integration_point() -> + { + "disabled allowed_owner integration point", + { + foreach, + fun() -> setup(default) end, fun teardown/1, + [ + fun should_not_fail_for_admin/1, + fun should_fail_for_non_admin/1 + ] + } + }. + +enabled_allowed_owner_integration_point() -> + { + "enabled allowed_owner integration point", + [ + { + foreach, + fun() -> setup("throw") end, fun teardown/1, + [fun should_throw/1] + }, + { + foreach, + fun() -> setup("pass") end, fun teardown/1, + [fun should_pass/1] + } + ] + }. + +should_not_fail_for_admin(Host) -> + ?_test(begin + Headers = [{basic_auth, {"admin", "pass"}}], + {Status, [Error, Reason]} = + request(Host, Headers, [<<"error">>, <<"reason">>]), + ?assertEqual(200, Status), + ?assertEqual(undefined, Error), + ?assertEqual(undefined, Reason) + end). + +should_fail_for_non_admin(Host) -> + ?_test(begin + Headers = [], + {Status, [Error, Reason]} = + request(Host, Headers, [<<"error">>, <<"reason">>]), + ?assertEqual(401, Status), + ?assertEqual(<<"unauthorized">>, Error), + ?assertEqual(<<"You are not a server admin.">>, Reason) + end). + +should_pass(Host) -> + ?_test(begin + Headers = [{basic_auth, {"admin", "pass"}}], + {Status, [Error, Reason]} = + request(Host, Headers, [<<"error">>, <<"reason">>]), + ?assertEqual(200, Status), + ?assertEqual(undefined, Error), + ?assertEqual(undefined, Reason) + end). + +should_throw(Host) -> + ?_test(begin + Headers = [{basic_auth, {"admin", "pass"}}], + {Status, [Error, Reason]} = + request(Host, Headers, [<<"error">>, <<"reason">>]), + ?assertEqual(401, Status), + ?assertEqual(<<"unauthorized">>, Error), + ?assertEqual(<<"Exception thrown.">>, Reason) + end). + +request(Host, Headers, ToDecode) -> + Url = Host ++ "/_db_updates", + {ok, Status, _Headers, BinBody} = test_request:get(Url, Headers), + {Body} = jiffy:decode(BinBody), + Values = [couch_util:get_value(Key, Body) || Key <- ToDecode], + {Status, Values}. + +add_admin(User, Pass) -> + Hashed = couch_passwords:hash_admin_password(Pass), + config:set("admins", User, ?b2l(Hashed), false). + +delete_admin(User) -> + config:delete("admins", User, false). + +get_host() -> + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)), + Host = "http://" ++ Addr ++ ":" ++ Port, + Host. diff --git a/src/global_changes/test/global_changes_hooks_tests.erl b/src/global_changes/test/global_changes_hooks_tests.erl deleted file mode 100644 index 23fa2c87f..000000000 --- a/src/global_changes/test/global_changes_hooks_tests.erl +++ /dev/null @@ -1,156 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(global_changes_hooks_tests). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). - --export([allowed_owner/2]). - --define(t2l(V), lists:flatten(io_lib:format("~p", [V]))). - -start() -> - Ctx = test_util:start_couch([chttpd, global_changes]), - DbName = ?tempdb(), - ok = fabric:create_db(DbName, [?ADMIN_CTX]), - application:set_env(global_changes, dbname, DbName), - {Ctx, DbName}. - -stop({Ctx, DbName}) -> - ok = fabric:delete_db(DbName, [?ADMIN_CTX]), - test_util:stop_couch(Ctx), - ok. - -setup(default) -> - add_admin("admin", <<"pass">>), - config:delete("couch_httpd_auth", "authentication_redirect", false), - config:set("couch_httpd_auth", "require_valid_user", "false", false), - get_host(); -setup(A) -> - Host = setup(default), - ok = config:set("global_changes", "allowed_owner", - ?t2l({?MODULE, allowed_owner, A}), false), - Host. - -teardown(_) -> - delete_admin("admin"), - config:delete("global_changes", "allowed_owner", false), - ok. - -allowed_owner(_Req, "throw") -> - throw({unauthorized, <<"Exception thrown.">>}); -allowed_owner(_Req, "pass") -> - "super". - -allowed_owner_hook_test_() -> - { - "Check allowed_owner hook", - { - setup, - fun start/0, fun stop/1, - [ - disabled_allowed_owner_integration_point(), - enabled_allowed_owner_integration_point() - ] - } - }. - -disabled_allowed_owner_integration_point() -> - { - "disabled allowed_owner integration point", - { - foreach, - fun() -> setup(default) end, fun teardown/1, - [ - fun should_not_fail_for_admin/1, - fun should_fail_for_non_admin/1 - ] - } - }. - -enabled_allowed_owner_integration_point() -> - { - "enabled allowed_owner integration point", - [ - { - foreach, - fun() -> setup("throw") end, fun teardown/1, - [fun should_throw/1] - }, - { - foreach, - fun() -> setup("pass") end, fun teardown/1, - [fun should_pass/1] - } - ] - }. - -should_not_fail_for_admin(Host) -> - ?_test(begin - Headers = [{basic_auth, {"admin", "pass"}}], - {Status, [Error, Reason]} = - request(Host, Headers, [<<"error">>, <<"reason">>]), - ?assertEqual(200, Status), - ?assertEqual(undefined, Error), - ?assertEqual(undefined, Reason) - end). - -should_fail_for_non_admin(Host) -> - ?_test(begin - Headers = [], - {Status, [Error, Reason]} = - request(Host, Headers, [<<"error">>, <<"reason">>]), - ?assertEqual(401, Status), - ?assertEqual(<<"unauthorized">>, Error), - ?assertEqual(<<"You are not a server admin.">>, Reason) - end). - -should_pass(Host) -> - ?_test(begin - Headers = [{basic_auth, {"admin", "pass"}}], - {Status, [Error, Reason]} = - request(Host, Headers, [<<"error">>, <<"reason">>]), - ?assertEqual(200, Status), - ?assertEqual(undefined, Error), - ?assertEqual(undefined, Reason) - end). - -should_throw(Host) -> - ?_test(begin - Headers = [{basic_auth, {"admin", "pass"}}], - {Status, [Error, Reason]} = - request(Host, Headers, [<<"error">>, <<"reason">>]), - ?assertEqual(401, Status), - ?assertEqual(<<"unauthorized">>, Error), - ?assertEqual(<<"Exception thrown.">>, Reason) - end). - -request(Host, Headers, ToDecode) -> - Url = Host ++ "/_db_updates", - {ok, Status, _Headers, BinBody} = test_request:get(Url, Headers), - {Body} = jiffy:decode(BinBody), - Values = [couch_util:get_value(Key, Body) || Key <- ToDecode], - {Status, Values}. - -add_admin(User, Pass) -> - Hashed = couch_passwords:hash_admin_password(Pass), - config:set("admins", User, ?b2l(Hashed), false). - -delete_admin(User) -> - config:delete("admins", User, false). - -get_host() -> - Addr = config:get("httpd", "bind_address", "127.0.0.1"), - Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)), - Host = "http://" ++ Addr ++ ":" ++ Port, - Host. diff --git a/src/mem3/test/eunit/mem3_cluster_test.erl b/src/mem3/test/eunit/mem3_cluster_test.erl new file mode 100644 index 000000000..4610d64bd --- /dev/null +++ b/src/mem3/test/eunit/mem3_cluster_test.erl @@ -0,0 +1,133 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(mem3_cluster_test). + +-behavior(mem3_cluster). + +-include_lib("eunit/include/eunit.hrl"). + +-export([ + cluster_unstable/1, + cluster_stable/1 +]). + + +% Mem3 cluster callbacks + +cluster_unstable(Server) -> + Server ! cluster_unstable, + Server. + +cluster_stable(Server) -> + Server ! cluster_stable, + Server. + + +mem3_cluster_test_test_() -> + { + foreach, + fun setup/0, + fun teardown/1, + [ + t_cluster_stable_during_startup_period(), + t_cluster_unstable_delivered_on_nodeup(), + t_cluster_unstable_delivered_on_nodedown(), + t_wait_period_is_reset_after_last_change() + ] + }. + + +t_cluster_stable_during_startup_period() -> + ?_test(begin + {ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 2), + register(?MODULE, Pid), + receive + cluster_stable -> + ?assert(true) + after 1500 -> + throw(timeout) + end, + unlink(Pid), + exit(Pid, kill) + end). + + +t_cluster_unstable_delivered_on_nodeup() -> + ?_test(begin + {ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 2), + register(?MODULE, Pid), + Pid ! {nodeup, node()}, + receive + cluster_unstable -> + ?assert(true) + after 1000 -> + throw(timeout) + end, + unlink(Pid), + exit(Pid, kill) + end). + + +t_cluster_unstable_delivered_on_nodedown() -> + ?_test(begin + {ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 2), + register(?MODULE, Pid), + Pid ! {nodedown, node()}, + receive + cluster_unstable -> + ?assert(true) + after 1000 -> + throw(timeout) + end, + unlink(Pid), + exit(Pid, kill) + end). + + +t_wait_period_is_reset_after_last_change() -> + ?_test(begin + {ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 1), + register(?MODULE, Pid), + timer:sleep(800), + Pid ! {nodeup, node()}, % after 800 sec send a nodeup + receive + cluster_stable -> + ?assert(false) + after 400 -> + ?assert(true) % stability check should have been reset + end, + timer:sleep(1000), + receive + cluster_stable -> + ?assert(true) + after 0 -> + ?assert(false) % cluster_stable arrives after enough quiet time + end, + unlink(Pid), + exit(Pid, kill) + end). + + +% Test helper functions + +setup() -> + ok. + +teardown(_) -> + case whereis(?MODULE) of + undefined -> + ok; + Pid when is_pid(Pid) -> + unlink(Pid), + exit(Pid, kill) + end. diff --git a/src/mem3/test/eunit/mem3_hash_test.erl b/src/mem3/test/eunit/mem3_hash_test.erl new file mode 100644 index 000000000..7a40c5366 --- /dev/null +++ b/src/mem3/test/eunit/mem3_hash_test.erl @@ -0,0 +1,23 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(mem3_hash_test). + +-include_lib("eunit/include/eunit.hrl"). + +hash_test() -> + ?assertEqual(1624516141,mem3_hash:crc32(0)), + ?assertEqual(3816901808,mem3_hash:crc32("0")), + ?assertEqual(3523407757,mem3_hash:crc32(<<0>>)), + ?assertEqual(4108050209,mem3_hash:crc32(<<"0">>)), + ?assertEqual(3094724072,mem3_hash:crc32(zero)), + ok. diff --git a/src/mem3/test/eunit/mem3_rep_test.erl b/src/mem3/test/eunit/mem3_rep_test.erl new file mode 100644 index 000000000..4a46e7b93 --- /dev/null +++ b/src/mem3/test/eunit/mem3_rep_test.erl @@ -0,0 +1,321 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(mem3_rep_test). + + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("couch_mrview/include/couch_mrview.hrl"). +-include_lib("mem3/include/mem3.hrl"). + + +-define(ID, <<"_id">>). +-define(TIMEOUT, 60). % seconds + +setup() -> + {AllSrc, AllTgt} = {?tempdb(), ?tempdb()}, + {PartSrc, PartTgt} = {?tempdb(), ?tempdb()}, + create_db(AllSrc, [{q, 1}, {n, 1}]), + create_db(AllTgt, [{q, 2}, {n, 1}]), + PartProps = [{partitioned, true}, {hash, [couch_partition, hash, []]}], + create_db(PartSrc, [{q, 1}, {n, 1}, {props, PartProps}]), + create_db(PartTgt, [{q, 2}, {n, 1}, {props, PartProps}]), + #{allsrc => AllSrc, alltgt => AllTgt, partsrc => PartSrc, parttgt => PartTgt}. + + +teardown(#{} = Dbs) -> + maps:map(fun(_, Db) -> delete_db(Db) end, Dbs). + + +start_couch() -> + test_util:start_couch([mem3, fabric]). + + +stop_couch(Ctx) -> + test_util:stop_couch(Ctx). + + +mem3_reshard_db_test_() -> + { + "mem3 rep db tests", + { + setup, + fun start_couch/0, fun stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun replicate_basics/1, + fun replicate_small_batches/1, + fun replicate_low_batch_count/1, + fun replicate_with_partitions/1 + ] + } + } + }. + + +replicate_basics(#{allsrc := AllSrc, alltgt := AllTgt}) -> + {timeout, ?TIMEOUT, ?_test(begin + DocSpec = #{docs => 10, delete => [5, 9]}, + add_test_docs(AllSrc, DocSpec), + SDocs = get_all_docs(AllSrc), + + [Src] = lists:sort(mem3:local_shards(AllSrc)), + [Tgt1, Tgt2] = lists:sort(mem3:local_shards(AllTgt)), + #shard{range = R1} = Tgt1, + #shard{range = R2} = Tgt2, + TMap = #{R1 => Tgt1, R2 => Tgt2}, + Opts = [{batch_size, 1000}, {batch_count, all}], + ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)), + + ?assertEqual(SDocs, get_all_docs(AllTgt)) + end)}. + + +replicate_small_batches(#{allsrc := AllSrc, alltgt := AllTgt}) -> + {timeout, ?TIMEOUT, ?_test(begin + DocSpec = #{docs => 10, delete => [5, 9]}, + add_test_docs(AllSrc, DocSpec), + SDocs = get_all_docs(AllSrc), + + [Src] = lists:sort(mem3:local_shards(AllSrc)), + [Tgt1, Tgt2] = lists:sort(mem3:local_shards(AllTgt)), + #shard{range = R1} = Tgt1, + #shard{range = R2} = Tgt2, + TMap = #{R1 => Tgt1, R2 => Tgt2}, + Opts = [{batch_size, 2}, {batch_count, all}], + ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)), + + ?assertEqual(SDocs, get_all_docs(AllTgt)) + end)}. + + +replicate_low_batch_count(#{allsrc := AllSrc, alltgt := AllTgt}) -> + {timeout, ?TIMEOUT, ?_test(begin + DocSpec = #{docs => 10, delete => [5, 9]}, + add_test_docs(AllSrc, DocSpec), + SDocs = get_all_docs(AllSrc), + + [Src] = lists:sort(mem3:local_shards(AllSrc)), + [Tgt1, Tgt2] = lists:sort(mem3:local_shards(AllTgt)), + #shard{range = R1} = Tgt1, + #shard{range = R2} = Tgt2, + TMap = #{R1 => Tgt1, R2 => Tgt2}, + + Opts1 = [{batch_size, 2}, {batch_count, 1}], + ?assertMatch({ok, 8}, mem3_rep:go(Src, TMap, Opts1)), + + Opts2 = [{batch_size, 1}, {batch_count, 2}], + ?assertMatch({ok, 6}, mem3_rep:go(Src, TMap, Opts2)), + + Opts3 = [{batch_size, 1000}, {batch_count, all}], + ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts3)), + + ?assertEqual(SDocs, get_all_docs(AllTgt)) + end)}. + + +replicate_with_partitions(#{partsrc := PartSrc, parttgt := PartTgt}) -> + {timeout, ?TIMEOUT, ?_test(begin + DocSpec = #{ + pdocs => #{ + <<"PX">> => 15, + <<"PY">> => 19 + } + }, + add_test_docs(PartSrc, DocSpec), + SDocs = get_all_docs(PartSrc), + PXSrc = get_partition_info(PartSrc, <<"PX">>), + PYSrc = get_partition_info(PartSrc, <<"PY">>), + + [Src] = lists:sort(mem3:local_shards(PartSrc)), + [Tgt1, Tgt2] = lists:sort(mem3:local_shards(PartTgt)), + #shard{range = R1} = Tgt1, + #shard{range = R2} = Tgt2, + TMap = #{R1 => Tgt1, R2 => Tgt2}, + Opts = [{batch_size, 1000}, {batch_count, all}], + ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)), + + ?assertEqual(PXSrc, get_partition_info(PartTgt, <<"PX">>)), + ?assertEqual(PYSrc, get_partition_info(PartTgt, <<"PY">>)), + ?assertEqual(SDocs, get_all_docs(PartTgt)) + end)}. + + +get_partition_info(DbName, Partition) -> + with_proc(fun() -> + {ok, PInfo} = fabric:get_partition_info(DbName, Partition), + maps:with([ + <<"doc_count">>, <<"doc_del_count">>, <<"partition">> + ], to_map(PInfo)) + end). + + +get_all_docs(DbName) -> + get_all_docs(DbName, #mrargs{}). + + +get_all_docs(DbName, #mrargs{} = QArgs0) -> + GL = erlang:group_leader(), + with_proc(fun() -> + Cb = fun + ({row, Props}, Acc) -> + Doc = to_map(couch_util:get_value(doc, Props)), + #{?ID := Id} = Doc, + {ok, Acc#{Id => Doc}}; + ({meta, _}, Acc) -> {ok, Acc}; + (complete, Acc) -> {ok, Acc} + end, + QArgs = QArgs0#mrargs{include_docs = true}, + {ok, Docs} = fabric:all_docs(DbName, Cb, #{}, QArgs), + Docs + end, GL). + + +to_map([_ | _] = Props) -> + to_map({Props}); + +to_map({[_ | _]} = EJson) -> + jiffy:decode(jiffy:encode(EJson), [return_maps]). + + +create_db(DbName, Opts) -> + GL = erlang:group_leader(), + with_proc(fun() -> fabric:create_db(DbName, Opts) end, GL). + + +delete_db(DbName) -> + GL = erlang:group_leader(), + with_proc(fun() -> fabric:delete_db(DbName, [?ADMIN_CTX]) end, GL). + + +with_proc(Fun) -> + with_proc(Fun, undefined, 30000). + + +with_proc(Fun, GroupLeader) -> + with_proc(Fun, GroupLeader, 30000). + + +with_proc(Fun, GroupLeader, Timeout) -> + {Pid, Ref} = spawn_monitor(fun() -> + case GroupLeader of + undefined -> ok; + _ -> erlang:group_leader(GroupLeader, self()) + end, + exit({with_proc_res, Fun()}) + end), + receive + {'DOWN', Ref, process, Pid, {with_proc_res, Res}} -> + Res; + {'DOWN', Ref, process, Pid, Error} -> + error(Error) + after Timeout -> + erlang:demonitor(Ref, [flush]), + exit(Pid, kill), + error({with_proc_timeout, Fun, Timeout}) + end. + + +add_test_docs(DbName, #{} = DocSpec) -> + Docs = docs(maps:get(docs, DocSpec, [])) + ++ pdocs(maps:get(pdocs, DocSpec, #{})), + Res = update_docs(DbName, Docs), + Docs1 = lists:map(fun({Doc, {ok, {RevPos, Rev}}}) -> + Doc#doc{revs = {RevPos, [Rev]}} + end, lists:zip(Docs, Res)), + case delete_docs(maps:get(delete, DocSpec, []), Docs1) of + [] -> ok; + [_ | _] = Deleted -> update_docs(DbName, Deleted) + end, + ok. + + +update_docs(DbName, Docs) -> + with_proc(fun() -> + case fabric:update_docs(DbName, Docs, [?ADMIN_CTX]) of + {accepted, Res} -> Res; + {ok, Res} -> Res + end + end). + + +delete_docs([S, E], Docs) when E >= S -> + ToDelete = [doc_id(<<"">>, I) || I <- lists:seq(S, E)], + lists:filtermap(fun(#doc{id = Id} = Doc) -> + case lists:member(Id, ToDelete) of + true -> {true, Doc#doc{deleted = true}}; + false -> false + end + end, Docs); +delete_docs(_, _) -> + []. + + +pdocs(#{} = PMap) -> + maps:fold(fun(Part, DocSpec, DocsAcc) -> + docs(DocSpec, <>) ++ DocsAcc + end, [], PMap). + + +docs(DocSpec) -> + docs(DocSpec, <<"">>). + + +docs(N, Prefix) when is_integer(N), N > 0 -> + docs([0, N - 1], Prefix); +docs([S, E], Prefix) when E >= S -> + [doc(Prefix, I) || I <- lists:seq(S, E)]; +docs(_, _) -> + []. + + +doc(Pref, Id) -> + Body = bodyprops(), + doc(Pref, Id, Body, 42). + + +doc(Pref, Id, BodyProps, AttSize) -> + #doc{ + id = doc_id(Pref, Id), + body = {BodyProps}, + atts = atts(AttSize) + }. + + +doc_id(Pref, Id) -> + IdBin = iolist_to_binary(io_lib:format("~5..0B", [Id])), + <>. + + +bodyprops() -> + [ + {<<"g">>, {[ + {<<"type">>, <<"Polygon">>}, + {<<"coordinates">>, [[[-71.0, 48.4], [-70.0, 48.4], [-71.0, 48.4]]]} + ]}} + ]. + + +atts(0) -> + []; + +atts(Size) when is_integer(Size), Size >= 1 -> + Data = << <<"x">> || _ <- lists:seq(1, Size) >>, + [couch_att:new([ + {name, <<"att">>}, + {type, <<"app/binary">>}, + {att_len, Size}, + {data, Data} + ])]. diff --git a/src/mem3/test/eunit/mem3_reshard_api_test.erl b/src/mem3/test/eunit/mem3_reshard_api_test.erl new file mode 100644 index 000000000..c4df24ad3 --- /dev/null +++ b/src/mem3/test/eunit/mem3_reshard_api_test.erl @@ -0,0 +1,847 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(mem3_reshard_api_test). + + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("mem3/src/mem3_reshard.hrl"). + + +-define(USER, "mem3_reshard_api_test_admin"). +-define(PASS, "pass"). +-define(AUTH, {basic_auth, {?USER, ?PASS}}). +-define(JSON, {"Content-Type", "application/json"}). +-define(RESHARD, "_reshard/"). +-define(JOBS, "_reshard/jobs/"). +-define(STATE, "_reshard/state"). +-define(ID, <<"id">>). +-define(OK, <<"ok">>). +-define(TIMEOUT, 60). % seconds + + +setup() -> + Hashed = couch_passwords:hash_admin_password(?PASS), + ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(chttpd, port), + Url = lists:concat(["http://", Addr, ":", Port, "/"]), + {Db1, Db2, Db3} = {?tempdb(), ?tempdb(), ?tempdb()}, + create_db(Url, Db1, "?q=1&n=1"), + create_db(Url, Db2, "?q=1&n=1"), + create_db(Url, Db3, "?q=2&n=1"), + {Url, {Db1, Db2, Db3}}. + + +teardown({Url, {Db1, Db2, Db3}}) -> + mem3_reshard:reset_state(), + application:unset_env(mem3, reshard_disabled), + delete_db(Url, Db1), + delete_db(Url, Db2), + delete_db(Url, Db3), + ok = config:delete("reshard", "max_jobs", _Persist=false), + ok = config:delete("reshard", "require_node_param", _Persist=false), + ok = config:delete("reshard", "require_range_param", _Persist=false), + ok = config:delete("admins", ?USER, _Persist=false), + meck:unload(). + + +start_couch() -> + test_util:start_couch([mem3, chttpd]). + + +stop_couch(Ctx) -> + test_util:stop_couch(Ctx). + + +mem3_reshard_api_test_() -> + { + "mem3 shard split api tests", + { + setup, + fun start_couch/0, fun stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun basics/1, + fun create_job_basic/1, + fun create_two_jobs/1, + fun create_multiple_jobs_from_one_post/1, + fun start_stop_cluster_basic/1, + fun test_disabled/1, + fun start_stop_cluster_with_a_job/1, + fun individual_job_start_stop/1, + fun individual_job_stop_when_cluster_stopped/1, + fun create_job_with_invalid_arguments/1, + fun create_job_with_db/1, + fun create_job_with_shard_name/1, + fun completed_job_handling/1, + fun handle_db_deletion_in_initial_copy/1, + fun handle_db_deletion_in_topoff1/1, + fun handle_db_deletion_in_copy_local_docs/1, + fun handle_db_deletion_in_build_indices/1, + fun handle_db_deletion_in_update_shard_map/1, + fun handle_db_deletion_in_wait_source_close/1, + fun recover_in_initial_copy/1, + fun recover_in_topoff1/1, + fun recover_in_copy_local_docs/1, + fun recover_in_build_indices/1, + fun recover_in_update_shard_map/1, + fun recover_in_wait_source_close/1, + fun recover_in_topoff3/1, + fun recover_in_source_delete/1, + fun check_max_jobs/1, + fun check_node_and_range_required_params/1, + fun cleanup_completed_jobs/1 + ] + } + } + }. + + +basics({Top, _}) -> + {timeout, ?TIMEOUT, ?_test(begin + % GET /_reshard + ?assertMatch({200, #{ + <<"state">> := <<"running">>, + <<"state_reason">> := null, + <<"completed">> := 0, + <<"failed">> := 0, + <<"running">> := 0, + <<"stopped">> := 0, + <<"total">> := 0 + }}, req(get, Top ++ ?RESHARD)), + + % GET _reshard/state + ?assertMatch({200, #{<<"state">> := <<"running">>}}, + req(get, Top ++ ?STATE)), + + % GET _reshard/jobs + ?assertMatch({200, #{ + <<"jobs">> := [], + <<"offset">> := 0, + <<"total_rows">> := 0 + }}, req(get, Top ++ ?JOBS)), + + % Some invalid paths and methods + ?assertMatch({404, _}, req(get, Top ++ ?RESHARD ++ "/invalidpath")), + ?assertMatch({405, _}, req(put, Top ++ ?RESHARD, #{dont => thinkso})), + ?assertMatch({405, _}, req(post, Top ++ ?RESHARD, #{nope => nope})) + end)}. + + +create_job_basic({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + % POST /_reshard/jobs + {C1, R1} = req(post, Top ++ ?JOBS, #{type => split, db => Db1}), + ?assertEqual(201, C1), + ?assertMatch([#{?OK := true, ?ID := J, <<"shard">> := S}] + when is_binary(J) andalso is_binary(S), R1), + [#{?ID := Id, <<"shard">> := Shard}] = R1, + + % GET /_reshard/jobs + ?assertMatch({200, #{ + <<"jobs">> := [#{?ID := Id, <<"type">> := <<"split">>}], + <<"offset">> := 0, + <<"total_rows">> := 1 + }}, req(get, Top ++ ?JOBS)), + + % GET /_reshard/job/$jobid + {C2, R2} = req(get, Top ++ ?JOBS ++ ?b2l(Id)), + ?assertEqual(200, C2), + ThisNode = atom_to_binary(node(), utf8), + ?assertMatch(#{?ID := Id}, R2), + ?assertMatch(#{<<"type">> := <<"split">>}, R2), + ?assertMatch(#{<<"source">> := Shard}, R2), + ?assertMatch(#{<<"history">> := History} when length(History) > 1, R2), + ?assertMatch(#{<<"node">> := ThisNode}, R2), + ?assertMatch(#{<<"split_state">> := SSt} when is_binary(SSt), R2), + ?assertMatch(#{<<"job_state">> := JSt} when is_binary(JSt), R2), + ?assertMatch(#{<<"state_info">> := #{}}, R2), + ?assertMatch(#{<<"target">> := Target} when length(Target) == 2, R2), + + % GET /_reshard/job/$jobid/state + ?assertMatch({200, #{<<"state">> := S, <<"reason">> := R}} + when is_binary(S) andalso (is_binary(R) orelse R =:= null), + req(get, Top ++ ?JOBS ++ ?b2l(Id) ++ "/state")), + + % GET /_reshard + ?assertMatch({200, #{<<"state">> := <<"running">>, <<"total">> := 1}}, + req(get, Top ++ ?RESHARD)), + + % DELETE /_reshard/jobs/$jobid + ?assertMatch({200, #{?OK := true}}, + req(delete, Top ++ ?JOBS ++ ?b2l(Id))), + + % GET _reshard/jobs + ?assertMatch({200, #{<<"jobs">> := [], <<"total_rows">> := 0}}, + req(get, Top ++ ?JOBS)), + + % GET /_reshard/job/$jobid should be a 404 + ?assertMatch({404, #{}}, req(get, Top ++ ?JOBS ++ ?b2l(Id))), + + % DELETE /_reshard/jobs/$jobid should be a 404 as well + ?assertMatch({404, #{}}, req(delete, Top ++ ?JOBS ++ ?b2l(Id))) + end)}. + + +create_two_jobs({Top, {Db1, Db2, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + Jobs = Top ++ ?JOBS, + + ?assertMatch({201, [#{?OK := true}]}, + req(post, Jobs, #{type => split, db => Db1})), + ?assertMatch({201, [#{?OK := true}]}, + req(post, Jobs, #{type => split, db => Db2})), + + ?assertMatch({200, #{<<"total">> := 2}}, req(get, Top ++ ?RESHARD)), + + ?assertMatch({200, #{ + <<"jobs">> := [#{?ID := Id1}, #{?ID := Id2}], + <<"offset">> := 0, + <<"total_rows">> := 2 + }} when Id1 =/= Id2, req(get, Jobs)), + + {200, #{<<"jobs">> := [#{?ID := Id1}, #{?ID := Id2}]}} = req(get, Jobs), + + {200, #{?OK := true}} = req(delete, Jobs ++ ?b2l(Id1)), + ?assertMatch({200, #{<<"total">> := 1}}, req(get, Top ++ ?RESHARD)), + {200, #{?OK := true}} = req(delete, Jobs ++ ?b2l(Id2)), + ?assertMatch({200, #{<<"total">> := 0}}, req(get, Top ++ ?RESHARD)) + end)}. + + +create_multiple_jobs_from_one_post({Top, {_, _, Db3}}) -> + {timeout, ?TIMEOUT, ?_test(begin + Jobs = Top ++ ?JOBS, + {C1, R1} = req(post, Jobs, #{type => split, db => Db3}), + ?assertMatch({201, [#{?OK := true}, #{?OK := true}]}, {C1, R1}), + ?assertMatch({200, #{<<"total">> := 2}}, req(get, Top ++ ?RESHARD)) + end)}. + + +start_stop_cluster_basic({Top, _}) -> + {timeout, ?TIMEOUT, ?_test(begin + Url = Top ++ ?STATE, + + ?assertMatch({200, #{ + <<"state">> := <<"running">>, + <<"reason">> := null + }}, req(get, Url)), + + ?assertMatch({200, _}, req(put, Url, #{state => stopped})), + ?assertMatch({200, #{ + <<"state">> := <<"stopped">>, + <<"reason">> := R + }} when is_binary(R), req(get, Url)), + + ?assertMatch({200, _}, req(put, Url, #{state => running})), + + % Make sure the reason shows in the state GET request + Reason = <<"somereason">>, + ?assertMatch({200, _}, req(put, Url, #{state => stopped, + reason => Reason})), + ?assertMatch({200, #{<<"state">> := <<"stopped">>, + <<"reason">> := Reason}}, req(get, Url)), + + % Top level summary also shows the reason + ?assertMatch({200, #{ + <<"state">> := <<"stopped">>, + <<"state_reason">> := Reason + }}, req(get, Top ++ ?RESHARD)), + ?assertMatch({200, _}, req(put, Url, #{state => running})), + ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, Url)) + end)}. + + +test_disabled({Top, _}) -> + {timeout, ?TIMEOUT, ?_test(begin + application:set_env(mem3, reshard_disabled, true), + ?assertMatch({501, _}, req(get, Top ++ ?RESHARD)), + ?assertMatch({501, _}, req(put, Top ++ ?STATE, #{state => running})), + + application:unset_env(mem3, reshard_disabled), + ?assertMatch({200, _}, req(get, Top ++ ?RESHARD)), + ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})) + end)}. + + +start_stop_cluster_with_a_job({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + Url = Top ++ ?STATE, + + ?assertMatch({200, _}, req(put, Url, #{state => stopped})), + ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, Url)), + + % Can add jobs with global state stopped, they just won't be running + {201, R1} = req(post, Top ++ ?JOBS, #{type => split, db => Db1}), + ?assertMatch([#{?OK := true}], R1), + [#{?ID := Id1}] = R1, + {200, J1} = req(get, Top ++ ?JOBS ++ ?b2l(Id1)), + ?assertMatch(#{?ID := Id1, <<"job_state">> := <<"stopped">>}, J1), + % Check summary stats + ?assertMatch({200, #{ + <<"state">> := <<"stopped">>, + <<"running">> := 0, + <<"stopped">> := 1, + <<"total">> := 1 + }}, req(get, Top ++ ?RESHARD)), + + % Can delete the job when stopped + {200, #{?OK := true}} = req(delete, Top ++ ?JOBS ++ ?b2l(Id1)), + ?assertMatch({200, #{ + <<"state">> := <<"stopped">>, + <<"running">> := 0, + <<"stopped">> := 0, + <<"total">> := 0 + }}, req(get, Top ++ ?RESHARD)), + + % Add same job again + {201, [#{?ID := Id2}]} = req(post, Top ++ ?JOBS, #{type => split, + db => Db1}), + ?assertMatch({200, #{?ID := Id2, <<"job_state">> := <<"stopped">>}}, + req(get, Top ++ ?JOBS ++ ?b2l(Id2))), + + % Job should start after resharding is started on the cluster + ?assertMatch({200, _}, req(put, Url, #{state => running})), + ?assertMatch({200, #{?ID := Id2, <<"job_state">> := JSt}} + when JSt =/= <<"stopped">>, req(get, Top ++ ?JOBS ++ ?b2l(Id2))) + end)}. + + +individual_job_start_stop({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + intercept_state(topoff1), + + Body = #{type => split, db => Db1}, + {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body), + + JobUrl = Top ++ ?JOBS ++ ?b2l(Id), + StUrl = JobUrl ++ "/state", + + % Wait for the the job to start running and intercept it in topoff1 state + receive {JobPid, topoff1} -> ok end, + % Tell the intercept to never finish checkpointing so job is left hanging + % forever in running state + JobPid ! cancel, + ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)), + + {200, _} = req(put, StUrl, #{state => stopped}), + wait_state(StUrl, <<"stopped">>), + + % Stop/start resharding globally and job should still stay stopped + ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})), + ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})), + ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, StUrl)), + + % Start the job again + ?assertMatch({200, _}, req(put, StUrl, #{state => running})), + % Wait for the the job to start running and intercept it in topoff1 state + receive {JobPid2, topoff1} -> ok end, + ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)), + % Let it continue running and it should complete eventually + JobPid2 ! continue, + wait_state(StUrl, <<"completed">>) + end)}. + + +individual_job_stop_when_cluster_stopped({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + intercept_state(topoff1), + + Body = #{type => split, db => Db1}, + {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body), + + JobUrl = Top ++ ?JOBS ++ ?b2l(Id), + StUrl = JobUrl ++ "/state", + + % Wait for the the job to start running and intercept in topoff1 + receive {JobPid, topoff1} -> ok end, + % Tell the intercept to never finish checkpointing so job is left + % hanging forever in running state + JobPid ! cancel, + ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)), + + % Stop resharding globally + ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})), + wait_state(StUrl, <<"stopped">>), + + % Stop the job specifically + {200, _} = req(put, StUrl, #{state => stopped}), + % Job stays stopped + ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, StUrl)), + + % Set cluster to running again + ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})), + + % The job should stay stopped + ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, StUrl)), + + % It should be possible to resume job and it should complete + ?assertMatch({200, _}, req(put, StUrl, #{state => running})), + + % Wait for the the job to start running and intercept in topoff1 state + receive {JobPid2, topoff1} -> ok end, + ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)), + + % Let it continue running and it should complete eventually + JobPid2 ! continue, + wait_state(StUrl, <<"completed">>) + end)}. + + +create_job_with_invalid_arguments({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + Jobs = Top ++ ?JOBS, + + % Nothing in the body + ?assertMatch({400, _}, req(post, Jobs, #{})), + + % Missing type + ?assertMatch({400, _}, req(post, Jobs, #{db => Db1})), + + % Have type but no db and no shard + ?assertMatch({400, _}, req(post, Jobs, #{type => split})), + + % Have type and db but db is invalid + ?assertMatch({400, _}, req(post, Jobs, #{db => <<"baddb">>, + type => split})), + + % Have type and shard but shard is not an existing database + ?assertMatch({404, _}, req(post, Jobs, #{type => split, + shard => <<"shards/80000000-ffffffff/baddb.1549492084">>})), + + % Bad range values, too large, different types, inverted + ?assertMatch({400, _}, req(post, Jobs, #{db => Db1, range => 42, + type => split})), + ?assertMatch({400, _}, req(post, Jobs, #{db => Db1, + range => <<"x">>, type => split})), + ?assertMatch({400, _}, req(post, Jobs, #{db => Db1, + range => <<"ffffffff-80000000">>, type => split})), + ?assertMatch({400, _}, req(post, Jobs, #{db => Db1, + range => <<"00000000-fffffffff">>, type => split})), + + % Can't have both db and shard + ?assertMatch({400, _}, req(post, Jobs, #{type => split, db => Db1, + shard => <<"blah">>})) + end)}. + + +create_job_with_db({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + Jobs = Top ++ ?JOBS, + Body1 = #{type => split, db => Db1}, + + % Node with db + N = atom_to_binary(node(), utf8), + {C1, R1} = req(post, Jobs, Body1#{node => N}), + ?assertMatch({201, [#{?OK := true}]}, {C1, R1}), + wait_to_complete_then_cleanup(Top, R1), + + % Range and db + {C2, R2} = req(post, Jobs, Body1#{range => <<"00000000-7fffffff">>}), + ?assertMatch({201, [#{?OK := true}]}, {C2, R2}), + wait_to_complete_then_cleanup(Top, R2), + + % Node, range and db + Range = <<"80000000-ffffffff">>, + {C3, R3} = req(post, Jobs, Body1#{range => Range, node => N}), + ?assertMatch({201, [#{?OK := true}]}, {C3, R3}), + wait_to_complete_then_cleanup(Top, R3), + + ?assertMatch([ + [16#00000000, 16#3fffffff], + [16#40000000, 16#7fffffff], + [16#80000000, 16#bfffffff], + [16#c0000000, 16#ffffffff] + ], [mem3:range(S) || S <- lists:sort(mem3:shards(Db1))]) + end)}. + + +create_job_with_shard_name({Top, {_, _, Db3}}) -> + {timeout, ?TIMEOUT, ?_test(begin + Jobs = Top ++ ?JOBS, + [S1, S2] = [mem3:name(S) || S <- lists:sort(mem3:shards(Db3))], + + % Shard only + {C1, R1} = req(post, Jobs, #{type => split, shard => S1}), + ?assertMatch({201, [#{?OK := true}]}, {C1, R1}), + wait_to_complete_then_cleanup(Top, R1), + + % Shard with a node + N = atom_to_binary(node(), utf8), + {C2, R2} = req(post, Jobs, #{type => split, shard => S2, node => N}), + ?assertMatch({201, [#{?OK := true}]}, {C2, R2}), + wait_to_complete_then_cleanup(Top, R2), + + ?assertMatch([ + [16#00000000, 16#3fffffff], + [16#40000000, 16#7fffffff], + [16#80000000, 16#bfffffff], + [16#c0000000, 16#ffffffff] + ], [mem3:range(S) || S <- lists:sort(mem3:shards(Db3))]) + end)}. + + +completed_job_handling({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + Jobs = Top ++ ?JOBS, + + % Run job to completion + {C1, R1} = req(post, Jobs, #{type => split, db => Db1}), + ?assertMatch({201, [#{?OK := true}]}, {C1, R1}), + [#{?ID := Id}] = R1, + wait_to_complete(Top, R1), + + % Check top level stats + ?assertMatch({200, #{ + <<"state">> := <<"running">>, + <<"state_reason">> := null, + <<"completed">> := 1, + <<"failed">> := 0, + <<"running">> := 0, + <<"stopped">> := 0, + <<"total">> := 1 + }}, req(get, Top ++ ?RESHARD)), + + % Job state itself + JobUrl = Jobs ++ ?b2l(Id), + ?assertMatch({200, #{ + <<"split_state">> := <<"completed">>, + <<"job_state">> := <<"completed">> + }}, req(get, JobUrl)), + + % Job's state endpoint + StUrl = Jobs ++ ?b2l(Id) ++ "/state", + ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)), + + % Try to stop it and it should stay completed + {200, _} = req(put, StUrl, #{state => stopped}), + ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)), + + % Try to resume it and it should stay completed + {200, _} = req(put, StUrl, #{state => running}), + ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)), + + % Stop resharding globally and job should still stay completed + ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})), + ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)), + + % Start resharding and job stays completed + ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})), + ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)), + + ?assertMatch({200, #{?OK := true}}, req(delete, JobUrl)) + end)}. + + +handle_db_deletion_in_topoff1({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + JobId = delete_source_in_state(Top, Db1, topoff1), + wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>) + end)}. + + +handle_db_deletion_in_initial_copy({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + JobId = delete_source_in_state(Top, Db1, initial_copy), + wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>) + end)}. + + +handle_db_deletion_in_copy_local_docs({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + JobId = delete_source_in_state(Top, Db1, copy_local_docs), + wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>) + end)}. + + +handle_db_deletion_in_build_indices({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + JobId = delete_source_in_state(Top, Db1, build_indices), + wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>) + end)}. + + +handle_db_deletion_in_update_shard_map({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + JobId = delete_source_in_state(Top, Db1, update_shardmap), + wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>) + end)}. + + +handle_db_deletion_in_wait_source_close({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + JobId = delete_source_in_state(Top, Db1, wait_source_close), + wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>) + end)}. + + +recover_in_topoff1({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + JobId = recover_in_state(Top, Db1, topoff1), + wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) + end)}. + + +recover_in_initial_copy({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + JobId = recover_in_state(Top, Db1, initial_copy), + wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) + end)}. + + +recover_in_copy_local_docs({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + JobId = recover_in_state(Top, Db1, copy_local_docs), + wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) + end)}. + + +recover_in_build_indices({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + JobId = recover_in_state(Top, Db1, build_indices), + wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) + end)}. + + +recover_in_update_shard_map({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + JobId = recover_in_state(Top, Db1, update_shardmap), + wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) + end)}. + + +recover_in_wait_source_close({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + JobId = recover_in_state(Top, Db1, wait_source_close), + wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) + end)}. + + +recover_in_topoff3({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + JobId = recover_in_state(Top, Db1, topoff3), + wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) + end)}. + + +recover_in_source_delete({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + JobId = recover_in_state(Top, Db1, source_delete), + wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) + end)}. + + +check_max_jobs({Top, {Db1, Db2, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + Jobs = Top ++ ?JOBS, + + config:set("reshard", "max_jobs", "0", _Persist=false), + {C1, R1} = req(post, Jobs, #{type => split, db => Db1}), + ?assertMatch({500, [#{<<"error">> := <<"max_jobs_exceeded">>}]}, {C1, R1}), + + config:set("reshard", "max_jobs", "1", _Persist=false), + {201, R2} = req(post, Jobs, #{type => split, db => Db1}), + wait_to_complete(Top, R2), + + % Stop clustering so jobs are not started anymore and ensure max jobs + % is enforced even if jobs are stopped + ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})), + + {C3, R3} = req(post, Jobs, #{type => split, db => Db2}), + ?assertMatch({500, [#{<<"error">> := <<"max_jobs_exceeded">>}]}, + {C3, R3}), + + % Allow the job to be created by raising max_jobs + config:set("reshard", "max_jobs", "2", _Persist=false), + + {C4, R4} = req(post, Jobs, #{type => split, db => Db2}), + ?assertEqual(201, C4), + + % Lower max_jobs after job is created but it's not running + config:set("reshard", "max_jobs", "1", _Persist=false), + + % Start resharding again + ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})), + + % Jobs that have been created already are not removed if max jobs is lowered + % so make sure the job completes + wait_to_complete(Top, R4) + end)}. + + +check_node_and_range_required_params({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + Jobs = Top ++ ?JOBS, + + Node = atom_to_binary(node(), utf8), + Range = <<"00000000-ffffffff">>, + + config:set("reshard", "require_node_param", "true", _Persist=false), + {C1, R1} = req(post, Jobs, #{type => split, db => Db1}), + NodeRequiredErr = <<"`node` prameter is required">>, + ?assertEqual({400, #{<<"error">> => <<"bad_request">>, + <<"reason">> => NodeRequiredErr}}, {C1, R1}), + + config:set("reshard", "require_range_param", "true", _Persist=false), + {C2, R2} = req(post, Jobs, #{type => split, db => Db1, node => Node}), + RangeRequiredErr = <<"`range` prameter is required">>, + ?assertEqual({400, #{<<"error">> => <<"bad_request">>, + <<"reason">> => RangeRequiredErr}}, {C2, R2}), + + Body = #{type => split, db => Db1, range => Range, node => Node}, + {C3, R3} = req(post, Jobs, Body), + ?assertMatch({201, [#{?OK := true}]}, {C3, R3}), + wait_to_complete_then_cleanup(Top, R3) + end)}. + + +cleanup_completed_jobs({Top, {Db1, _, _}}) -> + {timeout, ?TIMEOUT, ?_test(begin + Body = #{type => split, db => Db1}, + {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body), + JobUrl = Top ++ ?JOBS ++ ?b2l(Id), + wait_state(JobUrl ++ "/state", <<"completed">>), + delete_db(Top, Db1), + wait_for_http_code(JobUrl, 404) + end)}. + + +% Test help functions + +wait_to_complete_then_cleanup(Top, Jobs) -> + JobsUrl = Top ++ ?JOBS, + lists:foreach(fun(#{?ID := Id}) -> + wait_state(JobsUrl ++ ?b2l(Id) ++ "/state", <<"completed">>), + {200, _} = req(delete, JobsUrl ++ ?b2l(Id)) + end, Jobs). + + +wait_to_complete(Top, Jobs) -> + JobsUrl = Top ++ ?JOBS, + lists:foreach(fun(#{?ID := Id}) -> + wait_state(JobsUrl ++ ?b2l(Id) ++ "/state", <<"completed">>) + end, Jobs). + + +intercept_state(State) -> + TestPid = self(), + meck:new(mem3_reshard_job, [passthrough]), + meck:expect(mem3_reshard_job, checkpoint_done, fun(Job) -> + case Job#job.split_state of + State -> + TestPid ! {self(), State}, + receive + continue -> meck:passthrough([Job]); + cancel -> ok + end; + _ -> + meck:passthrough([Job]) + end + end). + + +cancel_intercept() -> + meck:expect(mem3_reshard_job, checkpoint_done, fun(Job) -> + meck:passthrough([Job]) + end). + + +wait_state(Url, State) -> + test_util:wait(fun() -> + case req(get, Url) of + {200, #{<<"state">> := State}} -> ok; + {200, #{}} -> timer:sleep(100), wait + end + end, 30000). + + +wait_for_http_code(Url, Code) when is_integer(Code) -> + test_util:wait(fun() -> + case req(get, Url) of + {Code, _} -> ok; + {_, _} -> timer:sleep(100), wait + end + end, 30000). + + +delete_source_in_state(Top, Db, State) when is_atom(State), is_binary(Db) -> + intercept_state(State), + Body = #{type => split, db => Db}, + {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body), + receive {JobPid, State} -> ok end, + sync_delete_db(Top, Db), + JobPid ! continue, + Id. + + +recover_in_state(Top, Db, State) when is_atom(State) -> + intercept_state(State), + Body = #{type => split, db => Db}, + {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body), + receive {JobPid, State} -> ok end, + % Job is now stuck in running we prevented it from executing + % the given state + JobPid ! cancel, + % Now restart resharding + ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})), + cancel_intercept(), + ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})), + Id. + + +create_db(Top, Db, QArgs) when is_binary(Db) -> + Url = Top ++ binary_to_list(Db) ++ QArgs, + {ok, Status, _, _} = test_request:put(Url, [?JSON, ?AUTH], "{}"), + ?assert(Status =:= 201 orelse Status =:= 202). + + +delete_db(Top, Db) when is_binary(Db) -> + Url = Top ++ binary_to_list(Db), + case test_request:get(Url, [?AUTH]) of + {ok, 404, _, _} -> + not_found; + {ok, 200, _, _} -> + {ok, 200, _, _} = test_request:delete(Url, [?AUTH]), + ok + end. + + +sync_delete_db(Top, Db) when is_binary(Db) -> + delete_db(Top, Db), + try + Shards = mem3:local_shards(Db), + ShardNames = [mem3:name(S) || S <- Shards], + [couch_server:delete(N, [?ADMIN_CTX]) || N <- ShardNames], + ok + catch + error:database_does_not_exist -> + ok + end. + + +req(Method, Url) -> + Headers = [?AUTH], + {ok, Code, _, Res} = test_request:request(Method, Url, Headers), + {Code, jiffy:decode(Res, [return_maps])}. + + +req(Method, Url, #{} = Body) -> + req(Method, Url, jiffy:encode(Body)); + +req(Method, Url, Body) -> + Headers = [?JSON, ?AUTH], + {ok, Code, _, Res} = test_request:request(Method, Url, Headers, Body), + {Code, jiffy:decode(Res, [return_maps])}. diff --git a/src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl b/src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl new file mode 100644 index 000000000..4b9e2a34a --- /dev/null +++ b/src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl @@ -0,0 +1,389 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(mem3_reshard_changes_feed_test). + + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("mem3/src/mem3_reshard.hrl"). + +-define(TIMEOUT, 60). % seconds + +-define(assertChanges(Expected, Received), + begin + ((fun() -> + ExpectedIDs = lists:sort([I || #{id := I} <- Expected]), + ReceivedIDs = lists:sort([I || #{id := I} <- Received]), + ?assertEqual(ExpectedIDs, ReceivedIDs) + end)()) + end). + + +setup() -> + Db1 = ?tempdb(), + create_db(Db1, [{q, 1}, {n, 1}]), + #{db1 => Db1}. + + +teardown(#{} = Dbs) -> + mem3_reshard:reset_state(), + maps:map(fun(_, Db) -> delete_db(Db) end, Dbs). + + +start_couch() -> + test_util:start_couch(?CONFIG_CHAIN, [mem3, fabric]). + + +stop_couch(Ctx) -> + test_util:stop_couch(Ctx). + + +mem3_reshard_changes_feed_test_() -> + { + "mem3 shard split changes feed tests", + { + setup, + fun start_couch/0, fun stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun normal_feed_should_work_after_split/1, + fun continuous_feed_should_work_during_split/1 + ] + } + } + }. + + +normal_feed_should_work_after_split(#{db1 := Db}) -> + {timeout, ?TIMEOUT, ?_test(begin + DocSpec = #{ + docs => [1, 10], + delete => [5, 6] + }, + add_test_docs(Db, DocSpec), + + % gather pre-shard changes + BaseArgs = #changes_args{feed = "normal", dir = fwd, since = 0}, + {ok, OldChanges, OldEndSeq} = get_changes_feed(Db, BaseArgs), + + % Split the shard + split_and_wait(Db), + + % verify changes list consistent for all the old seqs + lists:foldl(fun(#{seq := Seq} = C, ExpectedChanges) -> + Args = BaseArgs#changes_args{since = Seq}, + {ok, Changes, _EndSeq} = get_changes_feed(Db, Args), + ?assertChanges(ExpectedChanges, Changes), + [C | ExpectedChanges] + end, [], OldChanges), + + % confirm that old LastSeq respected + Args1 = BaseArgs#changes_args{since = OldEndSeq}, + {ok, Changes1, EndSeq1} = get_changes_feed(Db, Args1), + ?assertChanges([], Changes1), + + % confirm that new LastSeq also respected + Args2 = BaseArgs#changes_args{since = EndSeq1}, + {ok, Changes2, EndSeq2} = get_changes_feed(Db, Args2), + ?assertChanges([], Changes2), + ?assertEqual(EndSeq2, EndSeq1), + + % confirm we didn't lost any changes and have consistent last seq + {ok, Changes3, EndSeq3} = get_changes_feed(Db, BaseArgs), + ?assertChanges(OldChanges, Changes3), + + % add some docs + add_test_docs(Db, #{docs => [11, 15]}), + Args4 = BaseArgs#changes_args{since = EndSeq3}, + {ok, Changes4, EndSeq4} = get_changes_feed(Db, Args4), + AddedChanges = [#{id => ID} || #doc{id = ID} <- docs([11, 15])], + ?assertChanges(AddedChanges, Changes4), + + % confirm include_docs and deleted works + Args5 = BaseArgs#changes_args{include_docs = true}, + {ok, Changes5, EndSeq5} = get_changes_feed(Db, Args5), + ?assertEqual(EndSeq4, EndSeq5), + [SampleChange] = [C || #{id := ID} = C <- Changes5, ID == <<"00005">>], + ?assertMatch(#{deleted := true}, SampleChange), + ?assertMatch(#{doc := {Body}} when is_list(Body), SampleChange), + + % update and delete some pre and post split docs + AllDocs = [couch_doc:from_json_obj(Doc) || #{doc := Doc} <- Changes5], + UpdateDocs = lists:filtermap(fun + (#doc{id = <<"00002">>}) -> true; + (#doc{id = <<"00012">>}) -> true; + (#doc{id = <<"00004">>} = Doc) -> {true, Doc#doc{deleted = true}}; + (#doc{id = <<"00014">>} = Doc) -> {true, Doc#doc{deleted = true}}; + (_) -> false + end, AllDocs), + update_docs(Db, UpdateDocs), + + Args6 = BaseArgs#changes_args{since = EndSeq5}, + {ok, Changes6, EndSeq6} = get_changes_feed(Db, Args6), + UpdatedChanges = [#{id => ID} || #doc{id = ID} <- UpdateDocs], + ?assertChanges(UpdatedChanges, Changes6), + [#{seq := Seq6} | _] = Changes6, + ?assertEqual(EndSeq6, Seq6), + + Args7 = Args6#changes_args{dir = rev, limit = 4}, + {ok, Changes7, EndSeq7} = get_changes_feed(Db, Args7), + ?assertEqual(4, length(Changes7)), + [#{seq := Seq7} | _] = Changes7, + ?assertEqual(EndSeq7, Seq7) + end)}. + + +continuous_feed_should_work_during_split(#{db1 := Db}) -> + {timeout, ?TIMEOUT, ?_test(begin + {UpdaterPid, UpdaterRef} = spawn_monitor(fun() -> + Updater = fun U({State, I}) -> + receive + {get_state, {Pid, Ref}} -> + Pid ! {state, Ref, {State, I}}, + U({State, I}); + add -> + DocSpec = #{docs => [I, I]}, + add_test_docs(Db, DocSpec), + U({State, I + 1}); + split -> + spawn_monitor(fun() -> split_and_wait(Db) end), + U({"in_process", I}); + stop -> + receive {'DOWN', _, process, _, _} -> ok end, + ok + end + end, + Updater({"before", 1}) + end), + + Callback = fun + (start, Acc) -> + {ok, Acc}; + (waiting_for_updates, Acc) -> + Ref = make_ref(), + UpdaterPid ! {get_state, {self(), Ref}}, + receive {state, Ref, {State, _}} -> ok end, + case {State, length(Acc)} of + {"before", N} when N < 5 -> + UpdaterPid ! add, + {ok, Acc}; + {"before", _} -> + UpdaterPid ! split, + {ok, Acc}; + {"in_process", N} when N < 10 -> + UpdaterPid ! add, + {ok, Acc}; + {"in_process", _} -> + {ok, Acc} + end; + (timeout, Acc) -> + {ok, Acc}; + ({change, {Change}}, Acc) -> + CM = maps:from_list(Change), + {ok, [CM | Acc]}; + ({stop, EndSeq, _Pending}, Acc) -> + % Notice updater is still running + {stop, EndSeq, Acc} + end, + + BaseArgs = #changes_args{ + feed = "continuous", + heartbeat = 100, + timeout = 1000 + }, + StopResult = get_changes_feed(Db, BaseArgs, Callback), + + % Changes feed stopped when source shard was deleted + ?assertMatch({stop, _, _}, StopResult), + {stop, StopEndSeq, StopChanges} = StopResult, + + % Add 5 extra docs to the db right after changes feed was stopped + [UpdaterPid ! add || _ <- lists:seq(1, 5)], + + % The the number of documents that updater had added + Ref = make_ref(), + UpdaterPid ! {get_state, {self(), Ref}}, + DocCount = receive {state, Ref, {_, I}} -> I - 1 end, + + UpdaterPid ! stop, + receive + {'DOWN', UpdaterRef, process, UpdaterPid, normal} -> + ok; + {'DOWN', UpdaterRef, process, UpdaterPid, Error} -> + erlang:error({test_context_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {value, Error}, + {reason, "Updater died"}]}) + end, + + AfterArgs = #changes_args{feed = "normal", since = StopEndSeq}, + {ok, AfterChanges, _} = get_changes_feed(Db, AfterArgs), + DocIDs = [Id || #{id := Id} <- StopChanges ++ AfterChanges], + ExpectedDocIDs = [doc_id(<<>>, N) || N <- lists:seq(1, DocCount)], + ?assertEqual(ExpectedDocIDs, lists:usort(DocIDs)) + end)}. + + +split_and_wait(Db) -> + [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)), + {ok, JobId} = mem3_reshard:start_split_job(Shard), + wait_state(JobId, completed), + ResultShards = lists:sort(mem3:local_shards(Db)), + ?assertEqual(2, length(ResultShards)). + + +wait_state(JobId, State) -> + test_util:wait(fun() -> + case mem3_reshard:job(JobId) of + {ok, {Props}} -> + case couch_util:get_value(job_state, Props) of + State -> ok; + _ -> timer:sleep(100), wait + end; + {error, not_found} -> timer:sleep(100), wait + end + end, 30000). + + +get_changes_feed(Db, Args) -> + get_changes_feed(Db, Args, fun changes_callback/2). + + +get_changes_feed(Db, Args, Callback) -> + with_proc(fun() -> + fabric:changes(Db, Callback, [], Args) + end). + + +changes_callback(start, Acc) -> + {ok, Acc}; +changes_callback({change, {Change}}, Acc) -> + CM = maps:from_list(Change), + {ok, [CM | Acc]}; +changes_callback({stop, EndSeq, _Pending}, Acc) -> + {ok, Acc, EndSeq}. + + +%% common helpers from here + + +create_db(DbName, Opts) -> + GL = erlang:group_leader(), + with_proc(fun() -> fabric:create_db(DbName, Opts) end, GL). + + +delete_db(DbName) -> + GL = erlang:group_leader(), + with_proc(fun() -> fabric:delete_db(DbName, [?ADMIN_CTX]) end, GL). + + +with_proc(Fun) -> + with_proc(Fun, undefined, 30000). + + +with_proc(Fun, GroupLeader) -> + with_proc(Fun, GroupLeader, 30000). + + +with_proc(Fun, GroupLeader, Timeout) -> + {Pid, Ref} = spawn_monitor(fun() -> + case GroupLeader of + undefined -> ok; + _ -> erlang:group_leader(GroupLeader, self()) + end, + exit({with_proc_res, Fun()}) + end), + receive + {'DOWN', Ref, process, Pid, {with_proc_res, Res}} -> + Res; + {'DOWN', Ref, process, Pid, Error} -> + error(Error) + after Timeout -> + erlang:demonitor(Ref, [flush]), + exit(Pid, kill), + error({with_proc_timeout, Fun, Timeout}) + end. + + +add_test_docs(DbName, #{} = DocSpec) -> + Docs = docs(maps:get(docs, DocSpec, [])), + Res = update_docs(DbName, Docs), + Docs1 = lists:map(fun({Doc, {ok, {RevPos, Rev}}}) -> + Doc#doc{revs = {RevPos, [Rev]}} + end, lists:zip(Docs, Res)), + case delete_docs(maps:get(delete, DocSpec, []), Docs1) of + [] -> ok; + [_ | _] = Deleted -> update_docs(DbName, Deleted) + end, + ok. + + +update_docs(DbName, Docs) -> + with_proc(fun() -> + case fabric:update_docs(DbName, Docs, [?ADMIN_CTX]) of + {accepted, Res} -> Res; + {ok, Res} -> Res + end + end). + + +delete_docs([S, E], Docs) when E >= S -> + ToDelete = [doc_id(<<"">>, I) || I <- lists:seq(S, E)], + lists:filtermap(fun(#doc{id = Id} = Doc) -> + case lists:member(Id, ToDelete) of + true -> {true, Doc#doc{deleted = true}}; + false -> false + end + end, Docs); +delete_docs(_, _) -> + []. + + +docs([S, E]) when E >= S -> + [doc(<<"">>, I) || I <- lists:seq(S, E)]; +docs(_) -> + []. + + +doc(Pref, Id) -> + Body = [{<<"a">>, <<"b">>}], + doc(Pref, Id, Body, 42). + + +doc(Pref, Id, BodyProps, AttSize) -> + #doc{ + id = doc_id(Pref, Id), + body = {BodyProps}, + atts = atts(AttSize) + }. + + +doc_id(Pref, Id) -> + IdBin = iolist_to_binary(io_lib:format("~5..0B", [Id])), + <>. + + +atts(0) -> + []; + +atts(Size) when is_integer(Size), Size >= 1 -> + Data = << <<"x">> || _ <- lists:seq(1, Size) >>, + [couch_att:new([ + {name, <<"att">>}, + {type, <<"app/binary">>}, + {att_len, Size}, + {data, Data} + ])]. diff --git a/src/mem3/test/eunit/mem3_reshard_test.erl b/src/mem3/test/eunit/mem3_reshard_test.erl new file mode 100644 index 000000000..ab6202115 --- /dev/null +++ b/src/mem3/test/eunit/mem3_reshard_test.erl @@ -0,0 +1,805 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(mem3_reshard_test). + + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("mem3/src/mem3_reshard.hrl"). +-include_lib("couch_mrview/include/couch_mrview.hrl"). % for all_docs function + +-define(ID, <<"_id">>). +-define(TIMEOUT, 60). + +setup() -> + HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name}, + case HaveDreyfus of false -> ok; true -> + mock_dreyfus_indices() + end, + + HaveHastings = code:lib_dir(hastings) /= {error, bad_name}, + case HaveHastings of false -> ok; true -> + mock_hastings_indices() + end, + {Db1, Db2} = {?tempdb(), ?tempdb()}, + create_db(Db1, [{q, 1}, {n, 1}]), + PartProps = [{partitioned, true}, {hash, [couch_partition, hash, []]}], + create_db(Db2, [{q, 1}, {n, 1}, {props, PartProps}]), + config:set("reshard", "retry_interval_sec", "0", _Persist=false), + #{db1 => Db1, db2 => Db2}. + + +teardown(#{} = Dbs) -> + mem3_reshard:reset_state(), + maps:map(fun(_, Db) -> delete_db(Db) end, Dbs), + config:delete("reshard", "retry_interval_sec", _Persist=false), + meck:unload(). + + +start_couch() -> + test_util:start_couch(?CONFIG_CHAIN, [mem3, fabric]). + + +stop_couch(Ctx) -> + test_util:stop_couch(Ctx). + + +mem3_reshard_db_test_() -> + { + "mem3 shard split db tests", + { + setup, + fun start_couch/0, fun stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun split_one_shard/1, + fun update_docs_before_topoff1/1, + fun indices_are_built/1, + fun split_partitioned_db/1, + fun split_twice/1, + fun couch_events_are_emitted/1, + fun retries_work/1, + fun target_reset_in_initial_copy/1, + fun split_an_incomplete_shard_map/1 + ] + } + } + }. + + +% This is a basic test to check that shard splitting preserves documents, and +% db meta props like revs limits and security. +split_one_shard(#{db1 := Db}) -> + {timeout, ?TIMEOUT, ?_test(begin + DocSpec = #{docs => 10, delete => [5, 9], mrview => 1, local => 1}, + add_test_docs(Db, DocSpec), + + % Save documents before the split + Docs0 = get_all_docs(Db), + Local0 = get_local_docs(Db), + + % Set some custom metadata properties + set_revs_limit(Db, 942), + set_purge_infos_limit(Db, 943), + SecObj = {[{<<"foo">>, <<"bar">>}]}, + set_security(Db, SecObj), + + % DbInfo is saved after setting metadata bits + % as those could bump the update sequence + DbInfo0 = get_db_info(Db), + + % Split the one shard + [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)), + {ok, JobId} = mem3_reshard:start_split_job(Shard), + wait_state(JobId, completed), + + % Perform some basic checks that the shard was split + Shards1 = lists:sort(mem3:local_shards(Db)), + ?assertEqual(2, length(Shards1)), + [#shard{range = R1}, #shard{range = R2}] = Shards1, + ?assertEqual([16#00000000, 16#7fffffff], R1), + ?assertEqual([16#80000000, 16#ffffffff], R2), + + % Check metadata bits after the split + ?assertEqual(942, get_revs_limit(Db)), + ?assertEqual(943, get_purge_infos_limit(Db)), + ?assertEqual(SecObj, get_security(Db)), + + DbInfo1 = get_db_info(Db), + Docs1 = get_all_docs(Db), + Local1 = get_local_docs(Db), + + % When comparing db infos, ignore update sequences they won't be the + % same since they are more shards involved after the split + ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)), + + % Update seq prefix number is a sum of all shard update sequences + #{<<"update_seq">> := UpdateSeq0} = update_seq_to_num(DbInfo0), + #{<<"update_seq">> := UpdateSeq1} = update_seq_to_num(DbInfo1), + ?assertEqual(UpdateSeq0 * 2, UpdateSeq1), + + % Finally compare that the documents are still there after the split + ?assertEqual(Docs0, Docs1), + + % Don't forget about the local but don't include internal checkpoints + % as some of those are munged and transformed during the split + ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1)) + end)}. + + +% This test checks that document added while the shard is being split are not +% lost. Topoff1 state happens before indices are built +update_docs_before_topoff1(#{db1 := Db}) -> + {timeout, ?TIMEOUT, ?_test(begin + add_test_docs(Db, #{docs => 10}), + + intercept_state(topoff1), + + [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)), + {ok, JobId} = mem3_reshard:start_split_job(Shard), + + receive {JobPid, topoff1} -> ok end, + add_test_docs(Db, #{docs => [10, 19], local => 1}), + Docs0 = get_all_docs(Db), + Local0 = get_local_docs(Db), + DbInfo0 = get_db_info(Db), + JobPid ! continue, + + wait_state(JobId, completed), + + % Perform some basic checks that the shard was split + Shards1 = lists:sort(mem3:local_shards(Db)), + ?assertEqual(2, length(Shards1)), + + DbInfo1 = get_db_info(Db), + Docs1 = get_all_docs(Db), + Local1 = get_local_docs(Db), + + ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)), + + % Update sequence after initial copy with 10 docs would be 10 on each + % target shard (to match the source) and the total update sequence + % would have been 20. But then 10 more docs were added (3 might have + % ended up on one target and 7 on another) so the final update sequence + % would then be 20 + 10 = 30. + ?assertMatch(#{<<"update_seq">> := 30}, update_seq_to_num(DbInfo1)), + + ?assertEqual(Docs0, Docs1), + ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1)) + end)}. + + +% This test that indices are built during shard splitting. +indices_are_built(#{db1 := Db}) -> + {timeout, ?TIMEOUT, ?_test(begin + HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name}, + HaveHastings = code:lib_dir(hastings) /= {error, bad_name}, + + add_test_docs(Db, #{docs => 10, mrview => 2, search => 2, geo => 2}), + [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)), + {ok, JobId} = mem3_reshard:start_split_job(Shard), + wait_state(JobId, completed), + Shards1 = lists:sort(mem3:local_shards(Db)), + ?assertEqual(2, length(Shards1)), + MRViewGroupInfo = get_group_info(Db, <<"_design/mrview00000">>), + ?assertMatch(#{<<"update_seq">> := 32}, MRViewGroupInfo), + + HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name}, + case HaveDreyfus of false -> ok; true -> + % 4 because there are 2 indices and 2 target shards + ?assertEqual(4, meck:num_calls(dreyfus_index, await, 2)) + end, + + HaveHastings = code:lib_dir(hastings) /= {error, bad_name}, + case HaveHastings of false -> ok; true -> + % 4 because there are 2 indices and 2 target shards + ?assertEqual(4, meck:num_calls(hastings_index, await, 2)) + end + end)}. + + +mock_dreyfus_indices() -> + meck:expect(dreyfus_index, design_doc_to_indexes, fun(Doc) -> + #doc{body = {BodyProps}} = Doc, + case couch_util:get_value(<<"indexes">>, BodyProps) of + undefined -> + []; + {[_]} -> + [{dreyfus, <<"db">>, dreyfus_index1}] + end + end), + meck:expect(dreyfus_index_manager, get_index, fun(_, _) -> {ok, pid} end), + meck:expect(dreyfus_index, await, fun(_, _) -> ok end). + + +mock_hastings_indices() -> + meck:expect(hastings_index, design_doc_to_indexes, fun(Doc) -> + #doc{body = {BodyProps}} = Doc, + case couch_util:get_value(<<"st_indexes">>, BodyProps) of + undefined -> + []; + {[_]} -> + [{hastings, <<"db">>, hastings_index1}] + end + end), + meck:expect(hastings_index_manager, get_index, fun(_, _) -> {ok, pid} end), + meck:expect(hastings_index, await, fun(_, _) -> ok end). + +% Split partitioned database +split_partitioned_db(#{db2 := Db}) -> + {timeout, ?TIMEOUT, ?_test(begin + DocSpec = #{ + pdocs => #{ + <<"PX">> => 5, + <<"PY">> => 5 + }, + mrview => 1, + local => 1 + }, + add_test_docs(Db, DocSpec), + + % Save documents before the split + Docs0 = get_all_docs(Db), + Local0 = get_local_docs(Db), + + % Set some custom metadata properties + set_revs_limit(Db, 942), + set_purge_infos_limit(Db, 943), + SecObj = {[{<<"foo">>, <<"bar">>}]}, + set_security(Db, SecObj), + + % DbInfo is saved after setting metadata bits + % as those could bump the update sequence + DbInfo0 = get_db_info(Db), + PX0 = get_partition_info(Db, <<"PX">>), + PY0 = get_partition_info(Db, <<"PY">>), + + % Split the one shard + [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)), + {ok, JobId} = mem3_reshard:start_split_job(Shard), + wait_state(JobId, completed), + + % Perform some basic checks that the shard was split + Shards1 = lists:sort(mem3:local_shards(Db)), + ?assertEqual(2, length(Shards1)), + [#shard{range = R1}, #shard{range = R2}] = Shards1, + ?assertEqual([16#00000000, 16#7fffffff], R1), + ?assertEqual([16#80000000, 16#ffffffff], R2), + + % Check metadata bits after the split + ?assertEqual(942, get_revs_limit(Db)), + ?assertEqual(943, get_purge_infos_limit(Db)), + ?assertEqual(SecObj, get_security(Db)), + + DbInfo1 = get_db_info(Db), + Docs1 = get_all_docs(Db), + Local1 = get_local_docs(Db), + + % When comparing db infos, ignore update sequences they won't be the + % same since they are more shards involved after the split + ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)), + + % Update seq prefix number is a sum of all shard update sequences + #{<<"update_seq">> := UpdateSeq0} = update_seq_to_num(DbInfo0), + #{<<"update_seq">> := UpdateSeq1} = update_seq_to_num(DbInfo1), + ?assertEqual(UpdateSeq0 * 2, UpdateSeq1), + + % Finally compare that documents are still there after the split + ?assertEqual(Docs0, Docs1), + + ?assertEqual(PX0, get_partition_info(Db, <<"PX">>)), + ?assertEqual(PY0, get_partition_info(Db, <<"PY">>)), + + % Don't forget about the local but don't include internal checkpoints + % as some of those are munged and transformed during the split + ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1)) + end)}. + + +% Make sure a shard can be split again after it was split once. This checks that +% too many got added to some range, such that on next split they'd fail to fit +% in to any of the new target ranges. +split_twice(#{db1 := Db}) -> + {timeout, ?TIMEOUT, ?_test(begin + DocSpec = #{docs => 100, delete => [80, 99], mrview => 2, local => 100}, + add_test_docs(Db, DocSpec), + + % Save documents before the split + Docs0 = get_all_docs(Db), + Local0 = get_local_docs(Db), + + % Set some custom metadata properties + set_revs_limit(Db, 942), + set_purge_infos_limit(Db, 943), + SecObj = {[{<<"foo">>, <<"bar">>}]}, + set_security(Db, SecObj), + + % DbInfo is saved after setting metadata bits + % as those could bump the update sequence + DbInfo0 = get_db_info(Db), + + % Split the one shard + [#shard{name=Shard1}] = lists:sort(mem3:local_shards(Db)), + {ok, JobId1} = mem3_reshard:start_split_job(Shard1), + wait_state(JobId1, completed), + + % Perform some basic checks that the shard was split + Shards1 = lists:sort(mem3:local_shards(Db)), + ?assertEqual(2, length(Shards1)), + [#shard{range = R1}, #shard{range = R2}] = Shards1, + ?assertEqual([16#00000000, 16#7fffffff], R1), + ?assertEqual([16#80000000, 16#ffffffff], R2), + + % Check metadata bits after the split + ?assertEqual(942, get_revs_limit(Db)), + ?assertEqual(943, get_purge_infos_limit(Db)), + ?assertEqual(SecObj, get_security(Db)), + + DbInfo1 = get_db_info(Db), + Docs1 = get_all_docs(Db), + Local1 = get_local_docs(Db), + + % When comparing db infos, ignore update sequences they won't be the + % same since they are more shards involved after the split + ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)), + + % Update seq prefix number is a sum of all shard update sequences + #{<<"update_seq">> := UpdateSeq0} = update_seq_to_num(DbInfo0), + #{<<"update_seq">> := UpdateSeq1} = update_seq_to_num(DbInfo1), + ?assertEqual(UpdateSeq0 * 2, UpdateSeq1), + + ?assertEqual(Docs0, Docs1), + ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1)), + + % Split the first range again + [#shard{name=Shard2}, _] = lists:sort(mem3:local_shards(Db)), + {ok, JobId2} = mem3_reshard:start_split_job(Shard2), + wait_state(JobId2, completed), + + Shards2 = lists:sort(mem3:local_shards(Db)), + ?assertEqual(3, length(Shards2)), + [R3, R4, R5] = [R || #shard{range = R} <- Shards2], + ?assertEqual([16#00000000, 16#3fffffff], R3), + ?assertEqual([16#40000000, 16#7fffffff], R4), + ?assertEqual([16#80000000, 16#ffffffff], R5), + + % Check metadata bits after the second split + ?assertEqual(942, get_revs_limit(Db)), + ?assertEqual(943, get_purge_infos_limit(Db)), + ?assertEqual(SecObj, get_security(Db)), + + DbInfo2 = get_db_info(Db), + Docs2 = get_all_docs(Db), + Local2 = get_local_docs(Db), + + ?assertEqual(without_seqs(DbInfo1), without_seqs(DbInfo2)), + % Update seq prefix number is a sum of all shard update sequences + % But only 1 shard out of 2 was split + #{<<"update_seq">> := UpdateSeq2} = update_seq_to_num(DbInfo2), + ?assertEqual(trunc(UpdateSeq1 * 1.5), UpdateSeq2), + ?assertEqual(Docs1, Docs2), + ?assertEqual(without_meta_locals(Local1), without_meta_locals(Local2)) + end)}. + + +couch_events_are_emitted(#{db1 := Db}) -> + {timeout, ?TIMEOUT, ?_test(begin + couch_event:register_all(self()), + + % Split the one shard + [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)), + {ok, JobId} = mem3_reshard:start_split_job(Shard), + wait_state(JobId, completed), + + % Perform some basic checks that the shard was split + Shards1 = lists:sort(mem3:local_shards(Db)), + ?assertEqual(2, length(Shards1)), + [#shard{range = R1}, #shard{range = R2}] = Shards1, + ?assertEqual([16#00000000, 16#7fffffff], R1), + ?assertEqual([16#80000000, 16#ffffffff], R2), + + Flush = fun F(Events) -> + receive + {'$couch_event', DbName, Event} when Event =:= deleted + orelse Event =:= updated -> + case binary:match(DbName, Db) of + nomatch -> F(Events); + {_, _} -> F([Event | Events]) + end + after 0 -> + lists:reverse(Events) + end + end, + Events = Flush([]), + StartAtDeleted = lists:dropwhile(fun(E) -> E =/= deleted end, Events), + ?assertMatch([deleted, deleted, updated, updated | _], StartAtDeleted), + couch_event:unregister(self()) + end)}. + + +retries_work(#{db1 := Db}) -> + {timeout, ?TIMEOUT, ?_test(begin + meck:expect(couch_db_split, split, fun(_, _, _) -> + error(kapow) + end), + + [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)), + {ok, JobId} = mem3_reshard:start_split_job(Shard), + + wait_state(JobId, failed), + ?assertEqual(3, meck:num_calls(couch_db_split, split, 3)) + end)}. + + +target_reset_in_initial_copy(#{db1 := Db}) -> + {timeout, ?TIMEOUT, ?_test(begin + [#shard{} = Src] = lists:sort(mem3:local_shards(Db)), + Job = #job{ + source = Src, + target = [#shard{name= <<"t1">>}, #shard{name = <<"t2">>}], + job_state = running, + split_state = initial_copy + }, + BogusParent = spawn(fun() -> receive {ack, _, _} -> ok end end), + put('$ancestors', [BogusParent]), % make prock_lib:ack not blow up + meck:expect(mem3_reshard, checkpoint, 2, ok), + meck:expect(couch_db_split, cleanup_target, 2, ok), + meck:expect(couch_server, exists, fun + (<<"t1">>) -> true; + (<<"t2">>) -> true; + (DbName) -> meck:passthrough([DbName]) + end), + JobPid = spawn(fun() -> mem3_reshard_job:init(Job) end), + meck:wait(2, couch_db_split, cleanup_target, ['_', '_'], 5000), + exit(JobPid, kill), + exit(BogusParent, kill), + ?assertEqual(2, meck:num_calls(couch_db_split, cleanup_target, 2)) + end)}. + + +split_an_incomplete_shard_map(#{db1 := Db}) -> + {timeout, ?TIMEOUT, ?_test(begin + [#shard{} = Src] = lists:sort(mem3:local_shards(Db)), + [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)), + meck:expect(mem3_util, calculate_max_n, 1, 0), + ?assertMatch({error, {not_enough_shard_copies, _}}, + mem3_reshard:start_split_job(Shard)) + end)}. + + +intercept_state(State) -> + TestPid = self(), + meck:new(mem3_reshard_job, [passthrough]), + meck:expect(mem3_reshard_job, checkpoint_done, fun(Job) -> + case Job#job.split_state of + State -> + TestPid ! {self(), State}, + receive + continue -> meck:passthrough([Job]); + cancel -> ok + end; + _ -> + meck:passthrough([Job]) + end + end). + + +wait_state(JobId, State) -> + test_util:wait(fun() -> + case mem3_reshard:job(JobId) of + {ok, {Props}} -> + case couch_util:get_value(job_state, Props) of + State -> ok; + _ -> timer:sleep(100), wait + end; + {error, not_found} -> timer:sleep(100), wait + end + end, 30000). + + +set_revs_limit(DbName, Limit) -> + with_proc(fun() -> fabric:set_revs_limit(DbName, Limit, [?ADMIN_CTX]) end). + + +get_revs_limit(DbName) -> + with_proc(fun() -> fabric:get_revs_limit(DbName) end). + + +get_purge_infos_limit(DbName) -> + with_proc(fun() -> fabric:get_purge_infos_limit(DbName) end). + + +set_purge_infos_limit(DbName, Limit) -> + with_proc(fun() -> + fabric:set_purge_infos_limit(DbName, Limit, [?ADMIN_CTX]) + end). + + +set_security(DbName, SecObj) -> + with_proc(fun() -> fabric:set_security(DbName, SecObj) end). + + +get_security(DbName) -> + with_proc(fun() -> fabric:get_security(DbName, [?ADMIN_CTX]) end). + + +get_db_info(DbName) -> + with_proc(fun() -> + {ok, Info} = fabric:get_db_info(DbName), + maps:with([ + <<"db_name">>, <<"doc_count">>, <<"props">>, <<"doc_del_count">>, + <<"update_seq">>, <<"purge_seq">>, <<"disk_format_version">> + ], to_map(Info)) + end). + + +get_group_info(DbName, DesignId) -> + with_proc(fun() -> + {ok, GInfo} = fabric:get_view_group_info(DbName, DesignId), + maps:with([ + <<"language">>, <<"purge_seq">>, <<"signature">>, <<"update_seq">> + ], to_map(GInfo)) + end). + + +get_partition_info(DbName, Partition) -> + with_proc(fun() -> + {ok, PInfo} = fabric:get_partition_info(DbName, Partition), + maps:with([ + <<"db_name">>, <<"doc_count">>, <<"doc_del_count">>, <<"partition">> + ], to_map(PInfo)) + end). + + +get_all_docs(DbName) -> + get_all_docs(DbName, #mrargs{}). + + +get_all_docs(DbName, #mrargs{} = QArgs0) -> + GL = erlang:group_leader(), + with_proc(fun() -> + Cb = fun + ({row, Props}, Acc) -> + Doc = to_map(couch_util:get_value(doc, Props)), + #{?ID := Id} = Doc, + {ok, Acc#{Id => Doc}}; + ({meta, _}, Acc) -> {ok, Acc}; + (complete, Acc) -> {ok, Acc} + end, + QArgs = QArgs0#mrargs{include_docs = true}, + {ok, Docs} = fabric:all_docs(DbName, Cb, #{}, QArgs), + Docs + end, GL). + + +get_local_docs(DbName) -> + LocalNS = {namespace, <<"_local">>}, + maps:map(fun(_, Doc) -> + maps:without([<<"_rev">>], Doc) + end, get_all_docs(DbName, #mrargs{extra = [LocalNS]})). + + +without_seqs(#{} = InfoMap) -> + maps:without([<<"update_seq">>, <<"purge_seq">>], InfoMap). + + +without_meta_locals(#{} = Local) -> + maps:filter(fun + (<<"_local/purge-mrview-", _/binary>>, _) -> false; + (<<"_local/shard-sync-", _/binary>>, _) -> false; + (_, _) -> true + end, Local). + + +update_seq_to_num(#{} = InfoMap) -> + maps:map(fun + (<<"update_seq">>, Seq) -> seq_to_num(Seq); + (<<"purge_seq">>, PSeq) -> seq_to_num(PSeq); + (_, V) -> V + end, InfoMap). + + +seq_to_num(Seq) -> + [SeqNum, _] = binary:split(Seq, <<"-">>), + binary_to_integer(SeqNum). + + +to_map([_ | _] = Props) -> + to_map({Props}); + +to_map({[_ | _]} = EJson) -> + jiffy:decode(jiffy:encode(EJson), [return_maps]). + + +create_db(DbName, Opts) -> + GL = erlang:group_leader(), + with_proc(fun() -> fabric:create_db(DbName, Opts) end, GL). + + +delete_db(DbName) -> + GL = erlang:group_leader(), + with_proc(fun() -> fabric:delete_db(DbName, [?ADMIN_CTX]) end, GL). + + +with_proc(Fun) -> + with_proc(Fun, undefined, 30000). + + +with_proc(Fun, GroupLeader) -> + with_proc(Fun, GroupLeader, 30000). + + +with_proc(Fun, GroupLeader, Timeout) -> + {Pid, Ref} = spawn_monitor(fun() -> + case GroupLeader of + undefined -> ok; + _ -> erlang:group_leader(GroupLeader, self()) + end, + exit({with_proc_res, Fun()}) + end), + receive + {'DOWN', Ref, process, Pid, {with_proc_res, Res}} -> + Res; + {'DOWN', Ref, process, Pid, Error} -> + error(Error) + after Timeout -> + erlang:demonitor(Ref, [flush]), + exit(Pid, kill), + error({with_proc_timeout, Fun, Timeout}) + end. + + +add_test_docs(DbName, #{} = DocSpec) -> + Docs = docs(maps:get(docs, DocSpec, [])) + ++ pdocs(maps:get(pdocs, DocSpec, #{})) + ++ ddocs(mrview, maps:get(mrview, DocSpec, [])) + ++ ddocs(search, maps:get(search, DocSpec, [])) + ++ ddocs(geo, maps:get(geo, DocSpec, [])) + ++ ldocs(maps:get(local, DocSpec, [])), + Res = update_docs(DbName, Docs), + Docs1 = lists:map(fun({Doc, {ok, {RevPos, Rev}}}) -> + Doc#doc{revs = {RevPos, [Rev]}} + end, lists:zip(Docs, Res)), + case delete_docs(maps:get(delete, DocSpec, []), Docs1) of + [] -> ok; + [_ | _] = Deleted -> update_docs(DbName, Deleted) + end, + ok. + + +update_docs(DbName, Docs) -> + with_proc(fun() -> + case fabric:update_docs(DbName, Docs, [?ADMIN_CTX]) of + {accepted, Res} -> Res; + {ok, Res} -> Res + end + end). + + +delete_docs([S, E], Docs) when E >= S -> + ToDelete = [doc_id(<<"">>, I) || I <- lists:seq(S, E)], + lists:filtermap(fun(#doc{id = Id} = Doc) -> + case lists:member(Id, ToDelete) of + true -> {true, Doc#doc{deleted = true}}; + false -> false + end + end, Docs); +delete_docs(_, _) -> + []. + + +pdocs(#{} = PMap) -> + maps:fold(fun(Part, DocSpec, DocsAcc) -> + docs(DocSpec, <>) ++ DocsAcc + end, [], PMap). + + +docs(DocSpec) -> + docs(DocSpec, <<"">>). + + +docs(N, Prefix) when is_integer(N), N > 0 -> + docs([0, N - 1], Prefix); +docs([S, E], Prefix) when E >= S -> + [doc(Prefix, I) || I <- lists:seq(S, E)]; +docs(_, _) -> + []. + +ddocs(Type, N) when is_integer(N), N > 0 -> + ddocs(Type, [0, N - 1]); +ddocs(Type, [S, E]) when E >= S -> + Body = ddprop(Type), + BType = atom_to_binary(Type, utf8), + [doc(<<"_design/", BType/binary>>, I, Body, 0) || I <- lists:seq(S, E)]; +ddocs(_, _) -> + []. + + +ldocs(N) when is_integer(N), N > 0 -> + ldocs([0, N - 1]); +ldocs([S, E]) when E >= S -> + [doc(<<"_local/">>, I, bodyprops(), 0) || I <- lists:seq(S, E)]; +ldocs(_) -> + []. + + + +doc(Pref, Id) -> + Body = bodyprops(), + doc(Pref, Id, Body, 42). + + +doc(Pref, Id, BodyProps, AttSize) -> + #doc{ + id = doc_id(Pref, Id), + body = {BodyProps}, + atts = atts(AttSize) + }. + + +doc_id(Pref, Id) -> + IdBin = iolist_to_binary(io_lib:format("~5..0B", [Id])), + <>. + + +ddprop(mrview) -> + [ + {<<"views">>, {[ + {<<"v1">>, {[ + {<<"map">>, <<"function(d){emit(d);}">>} + ]}} + ]}} + ]; + +ddprop(geo) -> + [ + {<<"st_indexes">>, {[ + {<<"area">>, {[ + {<<"analyzer">>, <<"standard">>}, + {<<"index">>, <<"function(d){if(d.g){st_index(d.g)}}">> } + ]}} + ]}} + ]; + +ddprop(search) -> + [ + {<<"indexes">>, {[ + {<<"types">>, {[ + {<<"index">>, <<"function(d){if(d.g){st_index(d.g.type)}}">>} + ]}} + ]}} + ]. + + +bodyprops() -> + [ + {<<"g">>, {[ + {<<"type">>, <<"Polygon">>}, + {<<"coordinates">>, [[[-71.0, 48.4], [-70.0, 48.4], [-71.0, 48.4]]]} + ]}} + ]. + + +atts(0) -> + []; + +atts(Size) when is_integer(Size), Size >= 1 -> + Data = << <<"x">> || _ <- lists:seq(1, Size) >>, + [couch_att:new([ + {name, <<"att">>}, + {type, <<"app/binary">>}, + {att_len, Size}, + {data, Data} + ])]. diff --git a/src/mem3/test/eunit/mem3_ring_prop_tests.erl b/src/mem3/test/eunit/mem3_ring_prop_tests.erl new file mode 100644 index 000000000..9f4f86f5f --- /dev/null +++ b/src/mem3/test/eunit/mem3_ring_prop_tests.erl @@ -0,0 +1,144 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(mem3_ring_prop_tests). + + +-include_lib("triq/include/triq.hrl"). +-triq(eunit). + + +% Properties + +prop_get_ring_with_connected_intervals() -> + ?FORALL({Start, End}, oneof(ranges()), + ?FORALL(Intervals, g_connected_intervals(Start, End), + mem3_util:get_ring(Intervals, Start, End) =:= lists:sort(Intervals) + ) + ). + + +prop_get_ring_connected_plus_random_intervals() -> + ?FORALL({Intervals, Extra}, {g_connected_intervals(1, 100), + g_random_intervals(1, 100)}, + ?IMPLIES(sets:is_disjoint(endpoints(Intervals), endpoints(Extra)), + begin + AllInts = Intervals ++ Extra, + Ring = mem3_util:get_ring(AllInts, 1, 100), + Ring =:= lists:sort(Intervals) + end + ) + ). + + +prop_get_ring_connected_with_sub_intervals() -> + ?FORALL(Intervals, g_connected_intervals(1, 100), + ?FORALL(SubIntervals, g_subintervals(Intervals), + begin + AllInts = Intervals ++ SubIntervals, + Ring = mem3_util:get_ring(AllInts, 1, 100), + Ring =:= lists:sort(Intervals) + end + ) + ). + + +prop_get_ring_with_disconnected_intervals() -> + ?FORALL({Start, End}, oneof(ranges()), + ?FORALL(Intervals, g_disconnected_intervals(Start, End), + mem3_util:get_ring(Intervals, Start, End) =:= [] + ) + ). + + +% Generators + +ranges() -> + [{1, 10}, {0, 2 bsl 31 - 1}, {2 bsl 31 - 10, 2 bsl 31 - 1}]. + + +g_connected_intervals(Begin, End) -> + ?SIZED(Size, g_connected_intervals(Begin, End, 5 * Size)). + + +g_connected_intervals(Begin, End, Split) when Begin =< End -> + ?LET(N, choose(0, Split), + begin + if + N == 0 -> + [{Begin, End}]; + N > 0 -> + Ns = lists:seq(1, N - 1), + Bs = lists:usort([rand_range(Begin, End) || _ <- Ns]), + Es = [B - 1 || B <- Bs], + shuffle(lists:zip([Begin] ++ Bs, Es ++ [End])) + end + end). + + +g_non_trivial_connected_intervals(Begin, End, Split) -> + ?SUCHTHAT(Connected, g_connected_intervals(Begin, End, Split), + length(Connected) > 1). + + +g_disconnected_intervals(Begin, End) -> + ?SIZED(Size, g_disconnected_intervals(Begin, End, Size)). + + +g_disconnected_intervals(Begin, End, Split) when Begin =< End -> + ?LET(Connected, g_non_trivial_connected_intervals(Begin, End, Split), + begin + I = triq_rnd:uniform(length(Connected)) - 1, + {Before, [_ | After]} = lists:split(I, Connected), + Before ++ After + end). + + +g_subintervals(Intervals) -> + lists:foldl(fun(R, Acc) -> split_interval(R) ++ Acc end, [], Intervals). + + +split_interval({B, E}) when E - B >= 2 -> + E1 = rand_range(B, E) - 1, + B1 = E1 + 1, + [{B, E1}, {B1, E}]; + +split_interval(_Range) -> + []. + + +g_random_intervals(Start, End) -> + ?LET(N, choose(1, 10), + begin + [begin + B = rand_range(Start, End), + E = rand_range(B, End), + {B, E} + end || _ <- lists:seq(1, N)] + end). + + +rand_range(B, B) -> + B; + +rand_range(B, E) -> + B + triq_rnd:uniform(E - B). + + +shuffle(L) -> + Tagged = [{triq_rnd:uniform(), X} || X <- L], + [X || {_, X} <- lists:sort(Tagged)]. + + +endpoints(Ranges) -> + {Begins, Ends} = lists:unzip(Ranges), + sets:from_list(Begins ++ Ends). diff --git a/src/mem3/test/eunit/mem3_seeds_test.erl b/src/mem3/test/eunit/mem3_seeds_test.erl new file mode 100644 index 000000000..ba83b66be --- /dev/null +++ b/src/mem3/test/eunit/mem3_seeds_test.erl @@ -0,0 +1,69 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(mem3_seeds_test). + +-include_lib("couch/include/couch_eunit.hrl"). + +a_test_() -> + Tests = [ + {"empty seedlist should set status ok", fun empty_seedlist_status_ok/0}, + {"all seedlist nodes unreachable keeps status seeding", fun seedlist_misconfiguration/0}, + {"seedlist entries should be present in _nodes", fun check_nodelist/0} + ], + {setup, fun setup/0, fun teardown/1, Tests}. + +empty_seedlist_status_ok() -> + ok = application:start(mem3), + try + {ok, {Result}} = mem3_seeds:get_status(), + ?assertEqual({[]}, couch_util:get_value(seeds, Result)), + ?assertEqual(ok, couch_util:get_value(status, Result)) + after + cleanup() + end. + +seedlist_misconfiguration() -> + config:set("cluster", "seedlist", "couchdb@node1.example.com,couchdb@node2.example.com", false), + ok = application:start(mem3), + try + {ok, {Result}} = mem3_seeds:get_status(), + {Seeds} = couch_util:get_value(seeds, Result), + ?assertEqual(2, length(Seeds)), + ?assertMatch({_}, couch_util:get_value('couchdb@node1.example.com', Seeds)), + ?assertMatch({_}, couch_util:get_value('couchdb@node2.example.com', Seeds)), + ?assertEqual(seeding, couch_util:get_value(status, Result)) + after + cleanup() + end. + +check_nodelist() -> + config:set("cluster", "seedlist", "couchdb@node1.example.com,couchdb@node2.example.com", false), + ok = application:start(mem3), + try + Nodes = mem3:nodes(), + ?assert(lists:member('couchdb@node1.example.com', Nodes)), + ?assert(lists:member('couchdb@node2.example.com', Nodes)) + after + cleanup() + end. + +cleanup() -> + application:stop(mem3), + Filename = config:get("mem3", "nodes_db", "_nodes") ++ ".couch", + file:delete(filename:join([?BUILDDIR(), "tmp", "data", Filename])). + +setup() -> + test_util:start_couch([rexi]). + +teardown(Ctx) -> + test_util:stop_couch(Ctx). diff --git a/src/mem3/test/eunit/mem3_sync_security_test.erl b/src/mem3/test/eunit/mem3_sync_security_test.erl new file mode 100644 index 000000000..e67a72017 --- /dev/null +++ b/src/mem3/test/eunit/mem3_sync_security_test.erl @@ -0,0 +1,54 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(mem3_sync_security_test). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include("mem3.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-define(TIMEOUT, 5). % seconds + +go_test_() -> + { + "security property sync test", + { + setup, + fun start_couch/0, fun stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun sync_security_ok/1 + ] + } + } + }. + +start_couch() -> + test_util:start_couch([fabric, mem3]). + +stop_couch(Ctx) -> + test_util:stop_couch(Ctx). + +setup() -> + ok = meck:new(fabric, [passthrough]), + meck:expect(fabric, all_dbs, fun() -> + {ok, [<<"NoExistDb1">>, <<"NoExistDb2">>]} + end). + +teardown(_) -> + meck:unload(). + +sync_security_ok(_) -> + {timeout, ?TIMEOUT, ?_assertEqual(ok, mem3_sync_security:go())}. diff --git a/src/mem3/test/eunit/mem3_util_test.erl b/src/mem3/test/eunit/mem3_util_test.erl new file mode 100644 index 000000000..8b74c4b2b --- /dev/null +++ b/src/mem3/test/eunit/mem3_util_test.erl @@ -0,0 +1,130 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(mem3_util_test). + +-include("mem3.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +name_shard_test() -> + Shard1 = #shard{}, + ?assertError(function_clause, mem3_util:name_shard(Shard1, ".1234")), + + Shard2 = #shard{dbname = <<"testdb">>, range = [0,100]}, + #shard{name=Name2} = mem3_util:name_shard(Shard2, ".1234"), + ?assertEqual(<<"shards/00000000-00000064/testdb.1234">>, Name2), + + ok. + +create_partition_map_test() -> + {DbName1, N1, Q1, Nodes1} = {<<"testdb1">>, 3, 4, [a,b,c,d]}, + Map1 = mem3_util:create_partition_map(DbName1, N1, Q1, Nodes1), + ?assertEqual(12, length(Map1)), + + {DbName2, N2, Q2, Nodes2} = {<<"testdb2">>, 1, 1, [a,b,c,d]}, + [#shard{name=Name2,node=Node2}] = Map2 = + mem3_util:create_partition_map(DbName2, N2, Q2, Nodes2, ".1234"), + ?assertEqual(1, length(Map2)), + ?assertEqual(<<"shards/00000000-ffffffff/testdb2.1234">>, Name2), + ?assertEqual(a, Node2), + ok. + +build_shards_test() -> + DocProps1 = + [{<<"changelog">>, + [[<<"add">>,<<"00000000-1fffffff">>, + <<"bigcouch@node.local">>], + [<<"add">>,<<"20000000-3fffffff">>, + <<"bigcouch@node.local">>], + [<<"add">>,<<"40000000-5fffffff">>, + <<"bigcouch@node.local">>], + [<<"add">>,<<"60000000-7fffffff">>, + <<"bigcouch@node.local">>], + [<<"add">>,<<"80000000-9fffffff">>, + <<"bigcouch@node.local">>], + [<<"add">>,<<"a0000000-bfffffff">>, + <<"bigcouch@node.local">>], + [<<"add">>,<<"c0000000-dfffffff">>, + <<"bigcouch@node.local">>], + [<<"add">>,<<"e0000000-ffffffff">>, + <<"bigcouch@node.local">>]]}, + {<<"by_node">>, + {[{<<"bigcouch@node.local">>, + [<<"00000000-1fffffff">>,<<"20000000-3fffffff">>, + <<"40000000-5fffffff">>,<<"60000000-7fffffff">>, + <<"80000000-9fffffff">>,<<"a0000000-bfffffff">>, + <<"c0000000-dfffffff">>,<<"e0000000-ffffffff">>]}]}}, + {<<"by_range">>, + {[{<<"00000000-1fffffff">>,[<<"bigcouch@node.local">>]}, + {<<"20000000-3fffffff">>,[<<"bigcouch@node.local">>]}, + {<<"40000000-5fffffff">>,[<<"bigcouch@node.local">>]}, + {<<"60000000-7fffffff">>,[<<"bigcouch@node.local">>]}, + {<<"80000000-9fffffff">>,[<<"bigcouch@node.local">>]}, + {<<"a0000000-bfffffff">>,[<<"bigcouch@node.local">>]}, + {<<"c0000000-dfffffff">>,[<<"bigcouch@node.local">>]}, + {<<"e0000000-ffffffff">>,[<<"bigcouch@node.local">>]}]}}], + Shards1 = mem3_util:build_shards(<<"testdb1">>, DocProps1), + ExpectedShards1 = + [{shard,<<"shards/00000000-1fffffff/testdb1">>, + 'bigcouch@node.local',<<"testdb1">>, + [0,536870911], + undefined,[]}, + {shard,<<"shards/20000000-3fffffff/testdb1">>, + 'bigcouch@node.local',<<"testdb1">>, + [536870912,1073741823], + undefined,[]}, + {shard,<<"shards/40000000-5fffffff/testdb1">>, + 'bigcouch@node.local',<<"testdb1">>, + [1073741824,1610612735], + undefined,[]}, + {shard,<<"shards/60000000-7fffffff/testdb1">>, + 'bigcouch@node.local',<<"testdb1">>, + [1610612736,2147483647], + undefined,[]}, + {shard,<<"shards/80000000-9fffffff/testdb1">>, + 'bigcouch@node.local',<<"testdb1">>, + [2147483648,2684354559], + undefined,[]}, + {shard,<<"shards/a0000000-bfffffff/testdb1">>, + 'bigcouch@node.local',<<"testdb1">>, + [2684354560,3221225471], + undefined,[]}, + {shard,<<"shards/c0000000-dfffffff/testdb1">>, + 'bigcouch@node.local',<<"testdb1">>, + [3221225472,3758096383], + undefined,[]}, + {shard,<<"shards/e0000000-ffffffff/testdb1">>, + 'bigcouch@node.local',<<"testdb1">>, + [3758096384,4294967295], + undefined,[]}], + ?assertEqual(ExpectedShards1, Shards1), + ok. + + +%% n_val tests + +nval_test_() -> + { + setup, + fun() -> + meck:new([config, couch_log]), + meck:expect(couch_log, error, 2, ok), + meck:expect(config, get, 3, "5") + end, + fun(_) -> meck:unload() end, + [ + ?_assertEqual(2, mem3_util:n_val(2, 4)), + ?_assertEqual(1, mem3_util:n_val(-1, 4)), + ?_assertEqual(4, mem3_util:n_val(6, 4)), + ?_assertEqual(5, mem3_util:n_val(undefined, 6)) + ] + }. diff --git a/src/mem3/test/mem3_cluster_test.erl b/src/mem3/test/mem3_cluster_test.erl deleted file mode 100644 index 4610d64bd..000000000 --- a/src/mem3/test/mem3_cluster_test.erl +++ /dev/null @@ -1,133 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(mem3_cluster_test). - --behavior(mem3_cluster). - --include_lib("eunit/include/eunit.hrl"). - --export([ - cluster_unstable/1, - cluster_stable/1 -]). - - -% Mem3 cluster callbacks - -cluster_unstable(Server) -> - Server ! cluster_unstable, - Server. - -cluster_stable(Server) -> - Server ! cluster_stable, - Server. - - -mem3_cluster_test_test_() -> - { - foreach, - fun setup/0, - fun teardown/1, - [ - t_cluster_stable_during_startup_period(), - t_cluster_unstable_delivered_on_nodeup(), - t_cluster_unstable_delivered_on_nodedown(), - t_wait_period_is_reset_after_last_change() - ] - }. - - -t_cluster_stable_during_startup_period() -> - ?_test(begin - {ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 2), - register(?MODULE, Pid), - receive - cluster_stable -> - ?assert(true) - after 1500 -> - throw(timeout) - end, - unlink(Pid), - exit(Pid, kill) - end). - - -t_cluster_unstable_delivered_on_nodeup() -> - ?_test(begin - {ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 2), - register(?MODULE, Pid), - Pid ! {nodeup, node()}, - receive - cluster_unstable -> - ?assert(true) - after 1000 -> - throw(timeout) - end, - unlink(Pid), - exit(Pid, kill) - end). - - -t_cluster_unstable_delivered_on_nodedown() -> - ?_test(begin - {ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 2), - register(?MODULE, Pid), - Pid ! {nodedown, node()}, - receive - cluster_unstable -> - ?assert(true) - after 1000 -> - throw(timeout) - end, - unlink(Pid), - exit(Pid, kill) - end). - - -t_wait_period_is_reset_after_last_change() -> - ?_test(begin - {ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 1), - register(?MODULE, Pid), - timer:sleep(800), - Pid ! {nodeup, node()}, % after 800 sec send a nodeup - receive - cluster_stable -> - ?assert(false) - after 400 -> - ?assert(true) % stability check should have been reset - end, - timer:sleep(1000), - receive - cluster_stable -> - ?assert(true) - after 0 -> - ?assert(false) % cluster_stable arrives after enough quiet time - end, - unlink(Pid), - exit(Pid, kill) - end). - - -% Test helper functions - -setup() -> - ok. - -teardown(_) -> - case whereis(?MODULE) of - undefined -> - ok; - Pid when is_pid(Pid) -> - unlink(Pid), - exit(Pid, kill) - end. diff --git a/src/mem3/test/mem3_hash_test.erl b/src/mem3/test/mem3_hash_test.erl deleted file mode 100644 index 7a40c5366..000000000 --- a/src/mem3/test/mem3_hash_test.erl +++ /dev/null @@ -1,23 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(mem3_hash_test). - --include_lib("eunit/include/eunit.hrl"). - -hash_test() -> - ?assertEqual(1624516141,mem3_hash:crc32(0)), - ?assertEqual(3816901808,mem3_hash:crc32("0")), - ?assertEqual(3523407757,mem3_hash:crc32(<<0>>)), - ?assertEqual(4108050209,mem3_hash:crc32(<<"0">>)), - ?assertEqual(3094724072,mem3_hash:crc32(zero)), - ok. diff --git a/src/mem3/test/mem3_rep_test.erl b/src/mem3/test/mem3_rep_test.erl deleted file mode 100644 index 4a46e7b93..000000000 --- a/src/mem3/test/mem3_rep_test.erl +++ /dev/null @@ -1,321 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(mem3_rep_test). - - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include_lib("couch_mrview/include/couch_mrview.hrl"). --include_lib("mem3/include/mem3.hrl"). - - --define(ID, <<"_id">>). --define(TIMEOUT, 60). % seconds - -setup() -> - {AllSrc, AllTgt} = {?tempdb(), ?tempdb()}, - {PartSrc, PartTgt} = {?tempdb(), ?tempdb()}, - create_db(AllSrc, [{q, 1}, {n, 1}]), - create_db(AllTgt, [{q, 2}, {n, 1}]), - PartProps = [{partitioned, true}, {hash, [couch_partition, hash, []]}], - create_db(PartSrc, [{q, 1}, {n, 1}, {props, PartProps}]), - create_db(PartTgt, [{q, 2}, {n, 1}, {props, PartProps}]), - #{allsrc => AllSrc, alltgt => AllTgt, partsrc => PartSrc, parttgt => PartTgt}. - - -teardown(#{} = Dbs) -> - maps:map(fun(_, Db) -> delete_db(Db) end, Dbs). - - -start_couch() -> - test_util:start_couch([mem3, fabric]). - - -stop_couch(Ctx) -> - test_util:stop_couch(Ctx). - - -mem3_reshard_db_test_() -> - { - "mem3 rep db tests", - { - setup, - fun start_couch/0, fun stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun replicate_basics/1, - fun replicate_small_batches/1, - fun replicate_low_batch_count/1, - fun replicate_with_partitions/1 - ] - } - } - }. - - -replicate_basics(#{allsrc := AllSrc, alltgt := AllTgt}) -> - {timeout, ?TIMEOUT, ?_test(begin - DocSpec = #{docs => 10, delete => [5, 9]}, - add_test_docs(AllSrc, DocSpec), - SDocs = get_all_docs(AllSrc), - - [Src] = lists:sort(mem3:local_shards(AllSrc)), - [Tgt1, Tgt2] = lists:sort(mem3:local_shards(AllTgt)), - #shard{range = R1} = Tgt1, - #shard{range = R2} = Tgt2, - TMap = #{R1 => Tgt1, R2 => Tgt2}, - Opts = [{batch_size, 1000}, {batch_count, all}], - ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)), - - ?assertEqual(SDocs, get_all_docs(AllTgt)) - end)}. - - -replicate_small_batches(#{allsrc := AllSrc, alltgt := AllTgt}) -> - {timeout, ?TIMEOUT, ?_test(begin - DocSpec = #{docs => 10, delete => [5, 9]}, - add_test_docs(AllSrc, DocSpec), - SDocs = get_all_docs(AllSrc), - - [Src] = lists:sort(mem3:local_shards(AllSrc)), - [Tgt1, Tgt2] = lists:sort(mem3:local_shards(AllTgt)), - #shard{range = R1} = Tgt1, - #shard{range = R2} = Tgt2, - TMap = #{R1 => Tgt1, R2 => Tgt2}, - Opts = [{batch_size, 2}, {batch_count, all}], - ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)), - - ?assertEqual(SDocs, get_all_docs(AllTgt)) - end)}. - - -replicate_low_batch_count(#{allsrc := AllSrc, alltgt := AllTgt}) -> - {timeout, ?TIMEOUT, ?_test(begin - DocSpec = #{docs => 10, delete => [5, 9]}, - add_test_docs(AllSrc, DocSpec), - SDocs = get_all_docs(AllSrc), - - [Src] = lists:sort(mem3:local_shards(AllSrc)), - [Tgt1, Tgt2] = lists:sort(mem3:local_shards(AllTgt)), - #shard{range = R1} = Tgt1, - #shard{range = R2} = Tgt2, - TMap = #{R1 => Tgt1, R2 => Tgt2}, - - Opts1 = [{batch_size, 2}, {batch_count, 1}], - ?assertMatch({ok, 8}, mem3_rep:go(Src, TMap, Opts1)), - - Opts2 = [{batch_size, 1}, {batch_count, 2}], - ?assertMatch({ok, 6}, mem3_rep:go(Src, TMap, Opts2)), - - Opts3 = [{batch_size, 1000}, {batch_count, all}], - ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts3)), - - ?assertEqual(SDocs, get_all_docs(AllTgt)) - end)}. - - -replicate_with_partitions(#{partsrc := PartSrc, parttgt := PartTgt}) -> - {timeout, ?TIMEOUT, ?_test(begin - DocSpec = #{ - pdocs => #{ - <<"PX">> => 15, - <<"PY">> => 19 - } - }, - add_test_docs(PartSrc, DocSpec), - SDocs = get_all_docs(PartSrc), - PXSrc = get_partition_info(PartSrc, <<"PX">>), - PYSrc = get_partition_info(PartSrc, <<"PY">>), - - [Src] = lists:sort(mem3:local_shards(PartSrc)), - [Tgt1, Tgt2] = lists:sort(mem3:local_shards(PartTgt)), - #shard{range = R1} = Tgt1, - #shard{range = R2} = Tgt2, - TMap = #{R1 => Tgt1, R2 => Tgt2}, - Opts = [{batch_size, 1000}, {batch_count, all}], - ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)), - - ?assertEqual(PXSrc, get_partition_info(PartTgt, <<"PX">>)), - ?assertEqual(PYSrc, get_partition_info(PartTgt, <<"PY">>)), - ?assertEqual(SDocs, get_all_docs(PartTgt)) - end)}. - - -get_partition_info(DbName, Partition) -> - with_proc(fun() -> - {ok, PInfo} = fabric:get_partition_info(DbName, Partition), - maps:with([ - <<"doc_count">>, <<"doc_del_count">>, <<"partition">> - ], to_map(PInfo)) - end). - - -get_all_docs(DbName) -> - get_all_docs(DbName, #mrargs{}). - - -get_all_docs(DbName, #mrargs{} = QArgs0) -> - GL = erlang:group_leader(), - with_proc(fun() -> - Cb = fun - ({row, Props}, Acc) -> - Doc = to_map(couch_util:get_value(doc, Props)), - #{?ID := Id} = Doc, - {ok, Acc#{Id => Doc}}; - ({meta, _}, Acc) -> {ok, Acc}; - (complete, Acc) -> {ok, Acc} - end, - QArgs = QArgs0#mrargs{include_docs = true}, - {ok, Docs} = fabric:all_docs(DbName, Cb, #{}, QArgs), - Docs - end, GL). - - -to_map([_ | _] = Props) -> - to_map({Props}); - -to_map({[_ | _]} = EJson) -> - jiffy:decode(jiffy:encode(EJson), [return_maps]). - - -create_db(DbName, Opts) -> - GL = erlang:group_leader(), - with_proc(fun() -> fabric:create_db(DbName, Opts) end, GL). - - -delete_db(DbName) -> - GL = erlang:group_leader(), - with_proc(fun() -> fabric:delete_db(DbName, [?ADMIN_CTX]) end, GL). - - -with_proc(Fun) -> - with_proc(Fun, undefined, 30000). - - -with_proc(Fun, GroupLeader) -> - with_proc(Fun, GroupLeader, 30000). - - -with_proc(Fun, GroupLeader, Timeout) -> - {Pid, Ref} = spawn_monitor(fun() -> - case GroupLeader of - undefined -> ok; - _ -> erlang:group_leader(GroupLeader, self()) - end, - exit({with_proc_res, Fun()}) - end), - receive - {'DOWN', Ref, process, Pid, {with_proc_res, Res}} -> - Res; - {'DOWN', Ref, process, Pid, Error} -> - error(Error) - after Timeout -> - erlang:demonitor(Ref, [flush]), - exit(Pid, kill), - error({with_proc_timeout, Fun, Timeout}) - end. - - -add_test_docs(DbName, #{} = DocSpec) -> - Docs = docs(maps:get(docs, DocSpec, [])) - ++ pdocs(maps:get(pdocs, DocSpec, #{})), - Res = update_docs(DbName, Docs), - Docs1 = lists:map(fun({Doc, {ok, {RevPos, Rev}}}) -> - Doc#doc{revs = {RevPos, [Rev]}} - end, lists:zip(Docs, Res)), - case delete_docs(maps:get(delete, DocSpec, []), Docs1) of - [] -> ok; - [_ | _] = Deleted -> update_docs(DbName, Deleted) - end, - ok. - - -update_docs(DbName, Docs) -> - with_proc(fun() -> - case fabric:update_docs(DbName, Docs, [?ADMIN_CTX]) of - {accepted, Res} -> Res; - {ok, Res} -> Res - end - end). - - -delete_docs([S, E], Docs) when E >= S -> - ToDelete = [doc_id(<<"">>, I) || I <- lists:seq(S, E)], - lists:filtermap(fun(#doc{id = Id} = Doc) -> - case lists:member(Id, ToDelete) of - true -> {true, Doc#doc{deleted = true}}; - false -> false - end - end, Docs); -delete_docs(_, _) -> - []. - - -pdocs(#{} = PMap) -> - maps:fold(fun(Part, DocSpec, DocsAcc) -> - docs(DocSpec, <>) ++ DocsAcc - end, [], PMap). - - -docs(DocSpec) -> - docs(DocSpec, <<"">>). - - -docs(N, Prefix) when is_integer(N), N > 0 -> - docs([0, N - 1], Prefix); -docs([S, E], Prefix) when E >= S -> - [doc(Prefix, I) || I <- lists:seq(S, E)]; -docs(_, _) -> - []. - - -doc(Pref, Id) -> - Body = bodyprops(), - doc(Pref, Id, Body, 42). - - -doc(Pref, Id, BodyProps, AttSize) -> - #doc{ - id = doc_id(Pref, Id), - body = {BodyProps}, - atts = atts(AttSize) - }. - - -doc_id(Pref, Id) -> - IdBin = iolist_to_binary(io_lib:format("~5..0B", [Id])), - <>. - - -bodyprops() -> - [ - {<<"g">>, {[ - {<<"type">>, <<"Polygon">>}, - {<<"coordinates">>, [[[-71.0, 48.4], [-70.0, 48.4], [-71.0, 48.4]]]} - ]}} - ]. - - -atts(0) -> - []; - -atts(Size) when is_integer(Size), Size >= 1 -> - Data = << <<"x">> || _ <- lists:seq(1, Size) >>, - [couch_att:new([ - {name, <<"att">>}, - {type, <<"app/binary">>}, - {att_len, Size}, - {data, Data} - ])]. diff --git a/src/mem3/test/mem3_reshard_api_test.erl b/src/mem3/test/mem3_reshard_api_test.erl deleted file mode 100644 index c4df24ad3..000000000 --- a/src/mem3/test/mem3_reshard_api_test.erl +++ /dev/null @@ -1,847 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(mem3_reshard_api_test). - - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include_lib("mem3/src/mem3_reshard.hrl"). - - --define(USER, "mem3_reshard_api_test_admin"). --define(PASS, "pass"). --define(AUTH, {basic_auth, {?USER, ?PASS}}). --define(JSON, {"Content-Type", "application/json"}). --define(RESHARD, "_reshard/"). --define(JOBS, "_reshard/jobs/"). --define(STATE, "_reshard/state"). --define(ID, <<"id">>). --define(OK, <<"ok">>). --define(TIMEOUT, 60). % seconds - - -setup() -> - Hashed = couch_passwords:hash_admin_password(?PASS), - ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), - Addr = config:get("chttpd", "bind_address", "127.0.0.1"), - Port = mochiweb_socket_server:get(chttpd, port), - Url = lists:concat(["http://", Addr, ":", Port, "/"]), - {Db1, Db2, Db3} = {?tempdb(), ?tempdb(), ?tempdb()}, - create_db(Url, Db1, "?q=1&n=1"), - create_db(Url, Db2, "?q=1&n=1"), - create_db(Url, Db3, "?q=2&n=1"), - {Url, {Db1, Db2, Db3}}. - - -teardown({Url, {Db1, Db2, Db3}}) -> - mem3_reshard:reset_state(), - application:unset_env(mem3, reshard_disabled), - delete_db(Url, Db1), - delete_db(Url, Db2), - delete_db(Url, Db3), - ok = config:delete("reshard", "max_jobs", _Persist=false), - ok = config:delete("reshard", "require_node_param", _Persist=false), - ok = config:delete("reshard", "require_range_param", _Persist=false), - ok = config:delete("admins", ?USER, _Persist=false), - meck:unload(). - - -start_couch() -> - test_util:start_couch([mem3, chttpd]). - - -stop_couch(Ctx) -> - test_util:stop_couch(Ctx). - - -mem3_reshard_api_test_() -> - { - "mem3 shard split api tests", - { - setup, - fun start_couch/0, fun stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun basics/1, - fun create_job_basic/1, - fun create_two_jobs/1, - fun create_multiple_jobs_from_one_post/1, - fun start_stop_cluster_basic/1, - fun test_disabled/1, - fun start_stop_cluster_with_a_job/1, - fun individual_job_start_stop/1, - fun individual_job_stop_when_cluster_stopped/1, - fun create_job_with_invalid_arguments/1, - fun create_job_with_db/1, - fun create_job_with_shard_name/1, - fun completed_job_handling/1, - fun handle_db_deletion_in_initial_copy/1, - fun handle_db_deletion_in_topoff1/1, - fun handle_db_deletion_in_copy_local_docs/1, - fun handle_db_deletion_in_build_indices/1, - fun handle_db_deletion_in_update_shard_map/1, - fun handle_db_deletion_in_wait_source_close/1, - fun recover_in_initial_copy/1, - fun recover_in_topoff1/1, - fun recover_in_copy_local_docs/1, - fun recover_in_build_indices/1, - fun recover_in_update_shard_map/1, - fun recover_in_wait_source_close/1, - fun recover_in_topoff3/1, - fun recover_in_source_delete/1, - fun check_max_jobs/1, - fun check_node_and_range_required_params/1, - fun cleanup_completed_jobs/1 - ] - } - } - }. - - -basics({Top, _}) -> - {timeout, ?TIMEOUT, ?_test(begin - % GET /_reshard - ?assertMatch({200, #{ - <<"state">> := <<"running">>, - <<"state_reason">> := null, - <<"completed">> := 0, - <<"failed">> := 0, - <<"running">> := 0, - <<"stopped">> := 0, - <<"total">> := 0 - }}, req(get, Top ++ ?RESHARD)), - - % GET _reshard/state - ?assertMatch({200, #{<<"state">> := <<"running">>}}, - req(get, Top ++ ?STATE)), - - % GET _reshard/jobs - ?assertMatch({200, #{ - <<"jobs">> := [], - <<"offset">> := 0, - <<"total_rows">> := 0 - }}, req(get, Top ++ ?JOBS)), - - % Some invalid paths and methods - ?assertMatch({404, _}, req(get, Top ++ ?RESHARD ++ "/invalidpath")), - ?assertMatch({405, _}, req(put, Top ++ ?RESHARD, #{dont => thinkso})), - ?assertMatch({405, _}, req(post, Top ++ ?RESHARD, #{nope => nope})) - end)}. - - -create_job_basic({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - % POST /_reshard/jobs - {C1, R1} = req(post, Top ++ ?JOBS, #{type => split, db => Db1}), - ?assertEqual(201, C1), - ?assertMatch([#{?OK := true, ?ID := J, <<"shard">> := S}] - when is_binary(J) andalso is_binary(S), R1), - [#{?ID := Id, <<"shard">> := Shard}] = R1, - - % GET /_reshard/jobs - ?assertMatch({200, #{ - <<"jobs">> := [#{?ID := Id, <<"type">> := <<"split">>}], - <<"offset">> := 0, - <<"total_rows">> := 1 - }}, req(get, Top ++ ?JOBS)), - - % GET /_reshard/job/$jobid - {C2, R2} = req(get, Top ++ ?JOBS ++ ?b2l(Id)), - ?assertEqual(200, C2), - ThisNode = atom_to_binary(node(), utf8), - ?assertMatch(#{?ID := Id}, R2), - ?assertMatch(#{<<"type">> := <<"split">>}, R2), - ?assertMatch(#{<<"source">> := Shard}, R2), - ?assertMatch(#{<<"history">> := History} when length(History) > 1, R2), - ?assertMatch(#{<<"node">> := ThisNode}, R2), - ?assertMatch(#{<<"split_state">> := SSt} when is_binary(SSt), R2), - ?assertMatch(#{<<"job_state">> := JSt} when is_binary(JSt), R2), - ?assertMatch(#{<<"state_info">> := #{}}, R2), - ?assertMatch(#{<<"target">> := Target} when length(Target) == 2, R2), - - % GET /_reshard/job/$jobid/state - ?assertMatch({200, #{<<"state">> := S, <<"reason">> := R}} - when is_binary(S) andalso (is_binary(R) orelse R =:= null), - req(get, Top ++ ?JOBS ++ ?b2l(Id) ++ "/state")), - - % GET /_reshard - ?assertMatch({200, #{<<"state">> := <<"running">>, <<"total">> := 1}}, - req(get, Top ++ ?RESHARD)), - - % DELETE /_reshard/jobs/$jobid - ?assertMatch({200, #{?OK := true}}, - req(delete, Top ++ ?JOBS ++ ?b2l(Id))), - - % GET _reshard/jobs - ?assertMatch({200, #{<<"jobs">> := [], <<"total_rows">> := 0}}, - req(get, Top ++ ?JOBS)), - - % GET /_reshard/job/$jobid should be a 404 - ?assertMatch({404, #{}}, req(get, Top ++ ?JOBS ++ ?b2l(Id))), - - % DELETE /_reshard/jobs/$jobid should be a 404 as well - ?assertMatch({404, #{}}, req(delete, Top ++ ?JOBS ++ ?b2l(Id))) - end)}. - - -create_two_jobs({Top, {Db1, Db2, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - Jobs = Top ++ ?JOBS, - - ?assertMatch({201, [#{?OK := true}]}, - req(post, Jobs, #{type => split, db => Db1})), - ?assertMatch({201, [#{?OK := true}]}, - req(post, Jobs, #{type => split, db => Db2})), - - ?assertMatch({200, #{<<"total">> := 2}}, req(get, Top ++ ?RESHARD)), - - ?assertMatch({200, #{ - <<"jobs">> := [#{?ID := Id1}, #{?ID := Id2}], - <<"offset">> := 0, - <<"total_rows">> := 2 - }} when Id1 =/= Id2, req(get, Jobs)), - - {200, #{<<"jobs">> := [#{?ID := Id1}, #{?ID := Id2}]}} = req(get, Jobs), - - {200, #{?OK := true}} = req(delete, Jobs ++ ?b2l(Id1)), - ?assertMatch({200, #{<<"total">> := 1}}, req(get, Top ++ ?RESHARD)), - {200, #{?OK := true}} = req(delete, Jobs ++ ?b2l(Id2)), - ?assertMatch({200, #{<<"total">> := 0}}, req(get, Top ++ ?RESHARD)) - end)}. - - -create_multiple_jobs_from_one_post({Top, {_, _, Db3}}) -> - {timeout, ?TIMEOUT, ?_test(begin - Jobs = Top ++ ?JOBS, - {C1, R1} = req(post, Jobs, #{type => split, db => Db3}), - ?assertMatch({201, [#{?OK := true}, #{?OK := true}]}, {C1, R1}), - ?assertMatch({200, #{<<"total">> := 2}}, req(get, Top ++ ?RESHARD)) - end)}. - - -start_stop_cluster_basic({Top, _}) -> - {timeout, ?TIMEOUT, ?_test(begin - Url = Top ++ ?STATE, - - ?assertMatch({200, #{ - <<"state">> := <<"running">>, - <<"reason">> := null - }}, req(get, Url)), - - ?assertMatch({200, _}, req(put, Url, #{state => stopped})), - ?assertMatch({200, #{ - <<"state">> := <<"stopped">>, - <<"reason">> := R - }} when is_binary(R), req(get, Url)), - - ?assertMatch({200, _}, req(put, Url, #{state => running})), - - % Make sure the reason shows in the state GET request - Reason = <<"somereason">>, - ?assertMatch({200, _}, req(put, Url, #{state => stopped, - reason => Reason})), - ?assertMatch({200, #{<<"state">> := <<"stopped">>, - <<"reason">> := Reason}}, req(get, Url)), - - % Top level summary also shows the reason - ?assertMatch({200, #{ - <<"state">> := <<"stopped">>, - <<"state_reason">> := Reason - }}, req(get, Top ++ ?RESHARD)), - ?assertMatch({200, _}, req(put, Url, #{state => running})), - ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, Url)) - end)}. - - -test_disabled({Top, _}) -> - {timeout, ?TIMEOUT, ?_test(begin - application:set_env(mem3, reshard_disabled, true), - ?assertMatch({501, _}, req(get, Top ++ ?RESHARD)), - ?assertMatch({501, _}, req(put, Top ++ ?STATE, #{state => running})), - - application:unset_env(mem3, reshard_disabled), - ?assertMatch({200, _}, req(get, Top ++ ?RESHARD)), - ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})) - end)}. - - -start_stop_cluster_with_a_job({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - Url = Top ++ ?STATE, - - ?assertMatch({200, _}, req(put, Url, #{state => stopped})), - ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, Url)), - - % Can add jobs with global state stopped, they just won't be running - {201, R1} = req(post, Top ++ ?JOBS, #{type => split, db => Db1}), - ?assertMatch([#{?OK := true}], R1), - [#{?ID := Id1}] = R1, - {200, J1} = req(get, Top ++ ?JOBS ++ ?b2l(Id1)), - ?assertMatch(#{?ID := Id1, <<"job_state">> := <<"stopped">>}, J1), - % Check summary stats - ?assertMatch({200, #{ - <<"state">> := <<"stopped">>, - <<"running">> := 0, - <<"stopped">> := 1, - <<"total">> := 1 - }}, req(get, Top ++ ?RESHARD)), - - % Can delete the job when stopped - {200, #{?OK := true}} = req(delete, Top ++ ?JOBS ++ ?b2l(Id1)), - ?assertMatch({200, #{ - <<"state">> := <<"stopped">>, - <<"running">> := 0, - <<"stopped">> := 0, - <<"total">> := 0 - }}, req(get, Top ++ ?RESHARD)), - - % Add same job again - {201, [#{?ID := Id2}]} = req(post, Top ++ ?JOBS, #{type => split, - db => Db1}), - ?assertMatch({200, #{?ID := Id2, <<"job_state">> := <<"stopped">>}}, - req(get, Top ++ ?JOBS ++ ?b2l(Id2))), - - % Job should start after resharding is started on the cluster - ?assertMatch({200, _}, req(put, Url, #{state => running})), - ?assertMatch({200, #{?ID := Id2, <<"job_state">> := JSt}} - when JSt =/= <<"stopped">>, req(get, Top ++ ?JOBS ++ ?b2l(Id2))) - end)}. - - -individual_job_start_stop({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - intercept_state(topoff1), - - Body = #{type => split, db => Db1}, - {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body), - - JobUrl = Top ++ ?JOBS ++ ?b2l(Id), - StUrl = JobUrl ++ "/state", - - % Wait for the the job to start running and intercept it in topoff1 state - receive {JobPid, topoff1} -> ok end, - % Tell the intercept to never finish checkpointing so job is left hanging - % forever in running state - JobPid ! cancel, - ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)), - - {200, _} = req(put, StUrl, #{state => stopped}), - wait_state(StUrl, <<"stopped">>), - - % Stop/start resharding globally and job should still stay stopped - ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})), - ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})), - ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, StUrl)), - - % Start the job again - ?assertMatch({200, _}, req(put, StUrl, #{state => running})), - % Wait for the the job to start running and intercept it in topoff1 state - receive {JobPid2, topoff1} -> ok end, - ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)), - % Let it continue running and it should complete eventually - JobPid2 ! continue, - wait_state(StUrl, <<"completed">>) - end)}. - - -individual_job_stop_when_cluster_stopped({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - intercept_state(topoff1), - - Body = #{type => split, db => Db1}, - {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body), - - JobUrl = Top ++ ?JOBS ++ ?b2l(Id), - StUrl = JobUrl ++ "/state", - - % Wait for the the job to start running and intercept in topoff1 - receive {JobPid, topoff1} -> ok end, - % Tell the intercept to never finish checkpointing so job is left - % hanging forever in running state - JobPid ! cancel, - ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)), - - % Stop resharding globally - ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})), - wait_state(StUrl, <<"stopped">>), - - % Stop the job specifically - {200, _} = req(put, StUrl, #{state => stopped}), - % Job stays stopped - ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, StUrl)), - - % Set cluster to running again - ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})), - - % The job should stay stopped - ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, StUrl)), - - % It should be possible to resume job and it should complete - ?assertMatch({200, _}, req(put, StUrl, #{state => running})), - - % Wait for the the job to start running and intercept in topoff1 state - receive {JobPid2, topoff1} -> ok end, - ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)), - - % Let it continue running and it should complete eventually - JobPid2 ! continue, - wait_state(StUrl, <<"completed">>) - end)}. - - -create_job_with_invalid_arguments({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - Jobs = Top ++ ?JOBS, - - % Nothing in the body - ?assertMatch({400, _}, req(post, Jobs, #{})), - - % Missing type - ?assertMatch({400, _}, req(post, Jobs, #{db => Db1})), - - % Have type but no db and no shard - ?assertMatch({400, _}, req(post, Jobs, #{type => split})), - - % Have type and db but db is invalid - ?assertMatch({400, _}, req(post, Jobs, #{db => <<"baddb">>, - type => split})), - - % Have type and shard but shard is not an existing database - ?assertMatch({404, _}, req(post, Jobs, #{type => split, - shard => <<"shards/80000000-ffffffff/baddb.1549492084">>})), - - % Bad range values, too large, different types, inverted - ?assertMatch({400, _}, req(post, Jobs, #{db => Db1, range => 42, - type => split})), - ?assertMatch({400, _}, req(post, Jobs, #{db => Db1, - range => <<"x">>, type => split})), - ?assertMatch({400, _}, req(post, Jobs, #{db => Db1, - range => <<"ffffffff-80000000">>, type => split})), - ?assertMatch({400, _}, req(post, Jobs, #{db => Db1, - range => <<"00000000-fffffffff">>, type => split})), - - % Can't have both db and shard - ?assertMatch({400, _}, req(post, Jobs, #{type => split, db => Db1, - shard => <<"blah">>})) - end)}. - - -create_job_with_db({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - Jobs = Top ++ ?JOBS, - Body1 = #{type => split, db => Db1}, - - % Node with db - N = atom_to_binary(node(), utf8), - {C1, R1} = req(post, Jobs, Body1#{node => N}), - ?assertMatch({201, [#{?OK := true}]}, {C1, R1}), - wait_to_complete_then_cleanup(Top, R1), - - % Range and db - {C2, R2} = req(post, Jobs, Body1#{range => <<"00000000-7fffffff">>}), - ?assertMatch({201, [#{?OK := true}]}, {C2, R2}), - wait_to_complete_then_cleanup(Top, R2), - - % Node, range and db - Range = <<"80000000-ffffffff">>, - {C3, R3} = req(post, Jobs, Body1#{range => Range, node => N}), - ?assertMatch({201, [#{?OK := true}]}, {C3, R3}), - wait_to_complete_then_cleanup(Top, R3), - - ?assertMatch([ - [16#00000000, 16#3fffffff], - [16#40000000, 16#7fffffff], - [16#80000000, 16#bfffffff], - [16#c0000000, 16#ffffffff] - ], [mem3:range(S) || S <- lists:sort(mem3:shards(Db1))]) - end)}. - - -create_job_with_shard_name({Top, {_, _, Db3}}) -> - {timeout, ?TIMEOUT, ?_test(begin - Jobs = Top ++ ?JOBS, - [S1, S2] = [mem3:name(S) || S <- lists:sort(mem3:shards(Db3))], - - % Shard only - {C1, R1} = req(post, Jobs, #{type => split, shard => S1}), - ?assertMatch({201, [#{?OK := true}]}, {C1, R1}), - wait_to_complete_then_cleanup(Top, R1), - - % Shard with a node - N = atom_to_binary(node(), utf8), - {C2, R2} = req(post, Jobs, #{type => split, shard => S2, node => N}), - ?assertMatch({201, [#{?OK := true}]}, {C2, R2}), - wait_to_complete_then_cleanup(Top, R2), - - ?assertMatch([ - [16#00000000, 16#3fffffff], - [16#40000000, 16#7fffffff], - [16#80000000, 16#bfffffff], - [16#c0000000, 16#ffffffff] - ], [mem3:range(S) || S <- lists:sort(mem3:shards(Db3))]) - end)}. - - -completed_job_handling({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - Jobs = Top ++ ?JOBS, - - % Run job to completion - {C1, R1} = req(post, Jobs, #{type => split, db => Db1}), - ?assertMatch({201, [#{?OK := true}]}, {C1, R1}), - [#{?ID := Id}] = R1, - wait_to_complete(Top, R1), - - % Check top level stats - ?assertMatch({200, #{ - <<"state">> := <<"running">>, - <<"state_reason">> := null, - <<"completed">> := 1, - <<"failed">> := 0, - <<"running">> := 0, - <<"stopped">> := 0, - <<"total">> := 1 - }}, req(get, Top ++ ?RESHARD)), - - % Job state itself - JobUrl = Jobs ++ ?b2l(Id), - ?assertMatch({200, #{ - <<"split_state">> := <<"completed">>, - <<"job_state">> := <<"completed">> - }}, req(get, JobUrl)), - - % Job's state endpoint - StUrl = Jobs ++ ?b2l(Id) ++ "/state", - ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)), - - % Try to stop it and it should stay completed - {200, _} = req(put, StUrl, #{state => stopped}), - ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)), - - % Try to resume it and it should stay completed - {200, _} = req(put, StUrl, #{state => running}), - ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)), - - % Stop resharding globally and job should still stay completed - ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})), - ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)), - - % Start resharding and job stays completed - ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})), - ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)), - - ?assertMatch({200, #{?OK := true}}, req(delete, JobUrl)) - end)}. - - -handle_db_deletion_in_topoff1({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - JobId = delete_source_in_state(Top, Db1, topoff1), - wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>) - end)}. - - -handle_db_deletion_in_initial_copy({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - JobId = delete_source_in_state(Top, Db1, initial_copy), - wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>) - end)}. - - -handle_db_deletion_in_copy_local_docs({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - JobId = delete_source_in_state(Top, Db1, copy_local_docs), - wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>) - end)}. - - -handle_db_deletion_in_build_indices({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - JobId = delete_source_in_state(Top, Db1, build_indices), - wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>) - end)}. - - -handle_db_deletion_in_update_shard_map({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - JobId = delete_source_in_state(Top, Db1, update_shardmap), - wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>) - end)}. - - -handle_db_deletion_in_wait_source_close({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - JobId = delete_source_in_state(Top, Db1, wait_source_close), - wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>) - end)}. - - -recover_in_topoff1({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - JobId = recover_in_state(Top, Db1, topoff1), - wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) - end)}. - - -recover_in_initial_copy({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - JobId = recover_in_state(Top, Db1, initial_copy), - wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) - end)}. - - -recover_in_copy_local_docs({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - JobId = recover_in_state(Top, Db1, copy_local_docs), - wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) - end)}. - - -recover_in_build_indices({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - JobId = recover_in_state(Top, Db1, build_indices), - wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) - end)}. - - -recover_in_update_shard_map({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - JobId = recover_in_state(Top, Db1, update_shardmap), - wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) - end)}. - - -recover_in_wait_source_close({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - JobId = recover_in_state(Top, Db1, wait_source_close), - wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) - end)}. - - -recover_in_topoff3({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - JobId = recover_in_state(Top, Db1, topoff3), - wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) - end)}. - - -recover_in_source_delete({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - JobId = recover_in_state(Top, Db1, source_delete), - wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>) - end)}. - - -check_max_jobs({Top, {Db1, Db2, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - Jobs = Top ++ ?JOBS, - - config:set("reshard", "max_jobs", "0", _Persist=false), - {C1, R1} = req(post, Jobs, #{type => split, db => Db1}), - ?assertMatch({500, [#{<<"error">> := <<"max_jobs_exceeded">>}]}, {C1, R1}), - - config:set("reshard", "max_jobs", "1", _Persist=false), - {201, R2} = req(post, Jobs, #{type => split, db => Db1}), - wait_to_complete(Top, R2), - - % Stop clustering so jobs are not started anymore and ensure max jobs - % is enforced even if jobs are stopped - ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})), - - {C3, R3} = req(post, Jobs, #{type => split, db => Db2}), - ?assertMatch({500, [#{<<"error">> := <<"max_jobs_exceeded">>}]}, - {C3, R3}), - - % Allow the job to be created by raising max_jobs - config:set("reshard", "max_jobs", "2", _Persist=false), - - {C4, R4} = req(post, Jobs, #{type => split, db => Db2}), - ?assertEqual(201, C4), - - % Lower max_jobs after job is created but it's not running - config:set("reshard", "max_jobs", "1", _Persist=false), - - % Start resharding again - ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})), - - % Jobs that have been created already are not removed if max jobs is lowered - % so make sure the job completes - wait_to_complete(Top, R4) - end)}. - - -check_node_and_range_required_params({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - Jobs = Top ++ ?JOBS, - - Node = atom_to_binary(node(), utf8), - Range = <<"00000000-ffffffff">>, - - config:set("reshard", "require_node_param", "true", _Persist=false), - {C1, R1} = req(post, Jobs, #{type => split, db => Db1}), - NodeRequiredErr = <<"`node` prameter is required">>, - ?assertEqual({400, #{<<"error">> => <<"bad_request">>, - <<"reason">> => NodeRequiredErr}}, {C1, R1}), - - config:set("reshard", "require_range_param", "true", _Persist=false), - {C2, R2} = req(post, Jobs, #{type => split, db => Db1, node => Node}), - RangeRequiredErr = <<"`range` prameter is required">>, - ?assertEqual({400, #{<<"error">> => <<"bad_request">>, - <<"reason">> => RangeRequiredErr}}, {C2, R2}), - - Body = #{type => split, db => Db1, range => Range, node => Node}, - {C3, R3} = req(post, Jobs, Body), - ?assertMatch({201, [#{?OK := true}]}, {C3, R3}), - wait_to_complete_then_cleanup(Top, R3) - end)}. - - -cleanup_completed_jobs({Top, {Db1, _, _}}) -> - {timeout, ?TIMEOUT, ?_test(begin - Body = #{type => split, db => Db1}, - {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body), - JobUrl = Top ++ ?JOBS ++ ?b2l(Id), - wait_state(JobUrl ++ "/state", <<"completed">>), - delete_db(Top, Db1), - wait_for_http_code(JobUrl, 404) - end)}. - - -% Test help functions - -wait_to_complete_then_cleanup(Top, Jobs) -> - JobsUrl = Top ++ ?JOBS, - lists:foreach(fun(#{?ID := Id}) -> - wait_state(JobsUrl ++ ?b2l(Id) ++ "/state", <<"completed">>), - {200, _} = req(delete, JobsUrl ++ ?b2l(Id)) - end, Jobs). - - -wait_to_complete(Top, Jobs) -> - JobsUrl = Top ++ ?JOBS, - lists:foreach(fun(#{?ID := Id}) -> - wait_state(JobsUrl ++ ?b2l(Id) ++ "/state", <<"completed">>) - end, Jobs). - - -intercept_state(State) -> - TestPid = self(), - meck:new(mem3_reshard_job, [passthrough]), - meck:expect(mem3_reshard_job, checkpoint_done, fun(Job) -> - case Job#job.split_state of - State -> - TestPid ! {self(), State}, - receive - continue -> meck:passthrough([Job]); - cancel -> ok - end; - _ -> - meck:passthrough([Job]) - end - end). - - -cancel_intercept() -> - meck:expect(mem3_reshard_job, checkpoint_done, fun(Job) -> - meck:passthrough([Job]) - end). - - -wait_state(Url, State) -> - test_util:wait(fun() -> - case req(get, Url) of - {200, #{<<"state">> := State}} -> ok; - {200, #{}} -> timer:sleep(100), wait - end - end, 30000). - - -wait_for_http_code(Url, Code) when is_integer(Code) -> - test_util:wait(fun() -> - case req(get, Url) of - {Code, _} -> ok; - {_, _} -> timer:sleep(100), wait - end - end, 30000). - - -delete_source_in_state(Top, Db, State) when is_atom(State), is_binary(Db) -> - intercept_state(State), - Body = #{type => split, db => Db}, - {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body), - receive {JobPid, State} -> ok end, - sync_delete_db(Top, Db), - JobPid ! continue, - Id. - - -recover_in_state(Top, Db, State) when is_atom(State) -> - intercept_state(State), - Body = #{type => split, db => Db}, - {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body), - receive {JobPid, State} -> ok end, - % Job is now stuck in running we prevented it from executing - % the given state - JobPid ! cancel, - % Now restart resharding - ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})), - cancel_intercept(), - ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})), - Id. - - -create_db(Top, Db, QArgs) when is_binary(Db) -> - Url = Top ++ binary_to_list(Db) ++ QArgs, - {ok, Status, _, _} = test_request:put(Url, [?JSON, ?AUTH], "{}"), - ?assert(Status =:= 201 orelse Status =:= 202). - - -delete_db(Top, Db) when is_binary(Db) -> - Url = Top ++ binary_to_list(Db), - case test_request:get(Url, [?AUTH]) of - {ok, 404, _, _} -> - not_found; - {ok, 200, _, _} -> - {ok, 200, _, _} = test_request:delete(Url, [?AUTH]), - ok - end. - - -sync_delete_db(Top, Db) when is_binary(Db) -> - delete_db(Top, Db), - try - Shards = mem3:local_shards(Db), - ShardNames = [mem3:name(S) || S <- Shards], - [couch_server:delete(N, [?ADMIN_CTX]) || N <- ShardNames], - ok - catch - error:database_does_not_exist -> - ok - end. - - -req(Method, Url) -> - Headers = [?AUTH], - {ok, Code, _, Res} = test_request:request(Method, Url, Headers), - {Code, jiffy:decode(Res, [return_maps])}. - - -req(Method, Url, #{} = Body) -> - req(Method, Url, jiffy:encode(Body)); - -req(Method, Url, Body) -> - Headers = [?JSON, ?AUTH], - {ok, Code, _, Res} = test_request:request(Method, Url, Headers, Body), - {Code, jiffy:decode(Res, [return_maps])}. diff --git a/src/mem3/test/mem3_reshard_changes_feed_test.erl b/src/mem3/test/mem3_reshard_changes_feed_test.erl deleted file mode 100644 index 4b9e2a34a..000000000 --- a/src/mem3/test/mem3_reshard_changes_feed_test.erl +++ /dev/null @@ -1,389 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(mem3_reshard_changes_feed_test). - - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include_lib("mem3/src/mem3_reshard.hrl"). - --define(TIMEOUT, 60). % seconds - --define(assertChanges(Expected, Received), - begin - ((fun() -> - ExpectedIDs = lists:sort([I || #{id := I} <- Expected]), - ReceivedIDs = lists:sort([I || #{id := I} <- Received]), - ?assertEqual(ExpectedIDs, ReceivedIDs) - end)()) - end). - - -setup() -> - Db1 = ?tempdb(), - create_db(Db1, [{q, 1}, {n, 1}]), - #{db1 => Db1}. - - -teardown(#{} = Dbs) -> - mem3_reshard:reset_state(), - maps:map(fun(_, Db) -> delete_db(Db) end, Dbs). - - -start_couch() -> - test_util:start_couch(?CONFIG_CHAIN, [mem3, fabric]). - - -stop_couch(Ctx) -> - test_util:stop_couch(Ctx). - - -mem3_reshard_changes_feed_test_() -> - { - "mem3 shard split changes feed tests", - { - setup, - fun start_couch/0, fun stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun normal_feed_should_work_after_split/1, - fun continuous_feed_should_work_during_split/1 - ] - } - } - }. - - -normal_feed_should_work_after_split(#{db1 := Db}) -> - {timeout, ?TIMEOUT, ?_test(begin - DocSpec = #{ - docs => [1, 10], - delete => [5, 6] - }, - add_test_docs(Db, DocSpec), - - % gather pre-shard changes - BaseArgs = #changes_args{feed = "normal", dir = fwd, since = 0}, - {ok, OldChanges, OldEndSeq} = get_changes_feed(Db, BaseArgs), - - % Split the shard - split_and_wait(Db), - - % verify changes list consistent for all the old seqs - lists:foldl(fun(#{seq := Seq} = C, ExpectedChanges) -> - Args = BaseArgs#changes_args{since = Seq}, - {ok, Changes, _EndSeq} = get_changes_feed(Db, Args), - ?assertChanges(ExpectedChanges, Changes), - [C | ExpectedChanges] - end, [], OldChanges), - - % confirm that old LastSeq respected - Args1 = BaseArgs#changes_args{since = OldEndSeq}, - {ok, Changes1, EndSeq1} = get_changes_feed(Db, Args1), - ?assertChanges([], Changes1), - - % confirm that new LastSeq also respected - Args2 = BaseArgs#changes_args{since = EndSeq1}, - {ok, Changes2, EndSeq2} = get_changes_feed(Db, Args2), - ?assertChanges([], Changes2), - ?assertEqual(EndSeq2, EndSeq1), - - % confirm we didn't lost any changes and have consistent last seq - {ok, Changes3, EndSeq3} = get_changes_feed(Db, BaseArgs), - ?assertChanges(OldChanges, Changes3), - - % add some docs - add_test_docs(Db, #{docs => [11, 15]}), - Args4 = BaseArgs#changes_args{since = EndSeq3}, - {ok, Changes4, EndSeq4} = get_changes_feed(Db, Args4), - AddedChanges = [#{id => ID} || #doc{id = ID} <- docs([11, 15])], - ?assertChanges(AddedChanges, Changes4), - - % confirm include_docs and deleted works - Args5 = BaseArgs#changes_args{include_docs = true}, - {ok, Changes5, EndSeq5} = get_changes_feed(Db, Args5), - ?assertEqual(EndSeq4, EndSeq5), - [SampleChange] = [C || #{id := ID} = C <- Changes5, ID == <<"00005">>], - ?assertMatch(#{deleted := true}, SampleChange), - ?assertMatch(#{doc := {Body}} when is_list(Body), SampleChange), - - % update and delete some pre and post split docs - AllDocs = [couch_doc:from_json_obj(Doc) || #{doc := Doc} <- Changes5], - UpdateDocs = lists:filtermap(fun - (#doc{id = <<"00002">>}) -> true; - (#doc{id = <<"00012">>}) -> true; - (#doc{id = <<"00004">>} = Doc) -> {true, Doc#doc{deleted = true}}; - (#doc{id = <<"00014">>} = Doc) -> {true, Doc#doc{deleted = true}}; - (_) -> false - end, AllDocs), - update_docs(Db, UpdateDocs), - - Args6 = BaseArgs#changes_args{since = EndSeq5}, - {ok, Changes6, EndSeq6} = get_changes_feed(Db, Args6), - UpdatedChanges = [#{id => ID} || #doc{id = ID} <- UpdateDocs], - ?assertChanges(UpdatedChanges, Changes6), - [#{seq := Seq6} | _] = Changes6, - ?assertEqual(EndSeq6, Seq6), - - Args7 = Args6#changes_args{dir = rev, limit = 4}, - {ok, Changes7, EndSeq7} = get_changes_feed(Db, Args7), - ?assertEqual(4, length(Changes7)), - [#{seq := Seq7} | _] = Changes7, - ?assertEqual(EndSeq7, Seq7) - end)}. - - -continuous_feed_should_work_during_split(#{db1 := Db}) -> - {timeout, ?TIMEOUT, ?_test(begin - {UpdaterPid, UpdaterRef} = spawn_monitor(fun() -> - Updater = fun U({State, I}) -> - receive - {get_state, {Pid, Ref}} -> - Pid ! {state, Ref, {State, I}}, - U({State, I}); - add -> - DocSpec = #{docs => [I, I]}, - add_test_docs(Db, DocSpec), - U({State, I + 1}); - split -> - spawn_monitor(fun() -> split_and_wait(Db) end), - U({"in_process", I}); - stop -> - receive {'DOWN', _, process, _, _} -> ok end, - ok - end - end, - Updater({"before", 1}) - end), - - Callback = fun - (start, Acc) -> - {ok, Acc}; - (waiting_for_updates, Acc) -> - Ref = make_ref(), - UpdaterPid ! {get_state, {self(), Ref}}, - receive {state, Ref, {State, _}} -> ok end, - case {State, length(Acc)} of - {"before", N} when N < 5 -> - UpdaterPid ! add, - {ok, Acc}; - {"before", _} -> - UpdaterPid ! split, - {ok, Acc}; - {"in_process", N} when N < 10 -> - UpdaterPid ! add, - {ok, Acc}; - {"in_process", _} -> - {ok, Acc} - end; - (timeout, Acc) -> - {ok, Acc}; - ({change, {Change}}, Acc) -> - CM = maps:from_list(Change), - {ok, [CM | Acc]}; - ({stop, EndSeq, _Pending}, Acc) -> - % Notice updater is still running - {stop, EndSeq, Acc} - end, - - BaseArgs = #changes_args{ - feed = "continuous", - heartbeat = 100, - timeout = 1000 - }, - StopResult = get_changes_feed(Db, BaseArgs, Callback), - - % Changes feed stopped when source shard was deleted - ?assertMatch({stop, _, _}, StopResult), - {stop, StopEndSeq, StopChanges} = StopResult, - - % Add 5 extra docs to the db right after changes feed was stopped - [UpdaterPid ! add || _ <- lists:seq(1, 5)], - - % The the number of documents that updater had added - Ref = make_ref(), - UpdaterPid ! {get_state, {self(), Ref}}, - DocCount = receive {state, Ref, {_, I}} -> I - 1 end, - - UpdaterPid ! stop, - receive - {'DOWN', UpdaterRef, process, UpdaterPid, normal} -> - ok; - {'DOWN', UpdaterRef, process, UpdaterPid, Error} -> - erlang:error({test_context_failed, [ - {module, ?MODULE}, - {line, ?LINE}, - {value, Error}, - {reason, "Updater died"}]}) - end, - - AfterArgs = #changes_args{feed = "normal", since = StopEndSeq}, - {ok, AfterChanges, _} = get_changes_feed(Db, AfterArgs), - DocIDs = [Id || #{id := Id} <- StopChanges ++ AfterChanges], - ExpectedDocIDs = [doc_id(<<>>, N) || N <- lists:seq(1, DocCount)], - ?assertEqual(ExpectedDocIDs, lists:usort(DocIDs)) - end)}. - - -split_and_wait(Db) -> - [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)), - {ok, JobId} = mem3_reshard:start_split_job(Shard), - wait_state(JobId, completed), - ResultShards = lists:sort(mem3:local_shards(Db)), - ?assertEqual(2, length(ResultShards)). - - -wait_state(JobId, State) -> - test_util:wait(fun() -> - case mem3_reshard:job(JobId) of - {ok, {Props}} -> - case couch_util:get_value(job_state, Props) of - State -> ok; - _ -> timer:sleep(100), wait - end; - {error, not_found} -> timer:sleep(100), wait - end - end, 30000). - - -get_changes_feed(Db, Args) -> - get_changes_feed(Db, Args, fun changes_callback/2). - - -get_changes_feed(Db, Args, Callback) -> - with_proc(fun() -> - fabric:changes(Db, Callback, [], Args) - end). - - -changes_callback(start, Acc) -> - {ok, Acc}; -changes_callback({change, {Change}}, Acc) -> - CM = maps:from_list(Change), - {ok, [CM | Acc]}; -changes_callback({stop, EndSeq, _Pending}, Acc) -> - {ok, Acc, EndSeq}. - - -%% common helpers from here - - -create_db(DbName, Opts) -> - GL = erlang:group_leader(), - with_proc(fun() -> fabric:create_db(DbName, Opts) end, GL). - - -delete_db(DbName) -> - GL = erlang:group_leader(), - with_proc(fun() -> fabric:delete_db(DbName, [?ADMIN_CTX]) end, GL). - - -with_proc(Fun) -> - with_proc(Fun, undefined, 30000). - - -with_proc(Fun, GroupLeader) -> - with_proc(Fun, GroupLeader, 30000). - - -with_proc(Fun, GroupLeader, Timeout) -> - {Pid, Ref} = spawn_monitor(fun() -> - case GroupLeader of - undefined -> ok; - _ -> erlang:group_leader(GroupLeader, self()) - end, - exit({with_proc_res, Fun()}) - end), - receive - {'DOWN', Ref, process, Pid, {with_proc_res, Res}} -> - Res; - {'DOWN', Ref, process, Pid, Error} -> - error(Error) - after Timeout -> - erlang:demonitor(Ref, [flush]), - exit(Pid, kill), - error({with_proc_timeout, Fun, Timeout}) - end. - - -add_test_docs(DbName, #{} = DocSpec) -> - Docs = docs(maps:get(docs, DocSpec, [])), - Res = update_docs(DbName, Docs), - Docs1 = lists:map(fun({Doc, {ok, {RevPos, Rev}}}) -> - Doc#doc{revs = {RevPos, [Rev]}} - end, lists:zip(Docs, Res)), - case delete_docs(maps:get(delete, DocSpec, []), Docs1) of - [] -> ok; - [_ | _] = Deleted -> update_docs(DbName, Deleted) - end, - ok. - - -update_docs(DbName, Docs) -> - with_proc(fun() -> - case fabric:update_docs(DbName, Docs, [?ADMIN_CTX]) of - {accepted, Res} -> Res; - {ok, Res} -> Res - end - end). - - -delete_docs([S, E], Docs) when E >= S -> - ToDelete = [doc_id(<<"">>, I) || I <- lists:seq(S, E)], - lists:filtermap(fun(#doc{id = Id} = Doc) -> - case lists:member(Id, ToDelete) of - true -> {true, Doc#doc{deleted = true}}; - false -> false - end - end, Docs); -delete_docs(_, _) -> - []. - - -docs([S, E]) when E >= S -> - [doc(<<"">>, I) || I <- lists:seq(S, E)]; -docs(_) -> - []. - - -doc(Pref, Id) -> - Body = [{<<"a">>, <<"b">>}], - doc(Pref, Id, Body, 42). - - -doc(Pref, Id, BodyProps, AttSize) -> - #doc{ - id = doc_id(Pref, Id), - body = {BodyProps}, - atts = atts(AttSize) - }. - - -doc_id(Pref, Id) -> - IdBin = iolist_to_binary(io_lib:format("~5..0B", [Id])), - <>. - - -atts(0) -> - []; - -atts(Size) when is_integer(Size), Size >= 1 -> - Data = << <<"x">> || _ <- lists:seq(1, Size) >>, - [couch_att:new([ - {name, <<"att">>}, - {type, <<"app/binary">>}, - {att_len, Size}, - {data, Data} - ])]. diff --git a/src/mem3/test/mem3_reshard_test.erl b/src/mem3/test/mem3_reshard_test.erl deleted file mode 100644 index ab6202115..000000000 --- a/src/mem3/test/mem3_reshard_test.erl +++ /dev/null @@ -1,805 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(mem3_reshard_test). - - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include_lib("mem3/src/mem3_reshard.hrl"). --include_lib("couch_mrview/include/couch_mrview.hrl"). % for all_docs function - --define(ID, <<"_id">>). --define(TIMEOUT, 60). - -setup() -> - HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name}, - case HaveDreyfus of false -> ok; true -> - mock_dreyfus_indices() - end, - - HaveHastings = code:lib_dir(hastings) /= {error, bad_name}, - case HaveHastings of false -> ok; true -> - mock_hastings_indices() - end, - {Db1, Db2} = {?tempdb(), ?tempdb()}, - create_db(Db1, [{q, 1}, {n, 1}]), - PartProps = [{partitioned, true}, {hash, [couch_partition, hash, []]}], - create_db(Db2, [{q, 1}, {n, 1}, {props, PartProps}]), - config:set("reshard", "retry_interval_sec", "0", _Persist=false), - #{db1 => Db1, db2 => Db2}. - - -teardown(#{} = Dbs) -> - mem3_reshard:reset_state(), - maps:map(fun(_, Db) -> delete_db(Db) end, Dbs), - config:delete("reshard", "retry_interval_sec", _Persist=false), - meck:unload(). - - -start_couch() -> - test_util:start_couch(?CONFIG_CHAIN, [mem3, fabric]). - - -stop_couch(Ctx) -> - test_util:stop_couch(Ctx). - - -mem3_reshard_db_test_() -> - { - "mem3 shard split db tests", - { - setup, - fun start_couch/0, fun stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun split_one_shard/1, - fun update_docs_before_topoff1/1, - fun indices_are_built/1, - fun split_partitioned_db/1, - fun split_twice/1, - fun couch_events_are_emitted/1, - fun retries_work/1, - fun target_reset_in_initial_copy/1, - fun split_an_incomplete_shard_map/1 - ] - } - } - }. - - -% This is a basic test to check that shard splitting preserves documents, and -% db meta props like revs limits and security. -split_one_shard(#{db1 := Db}) -> - {timeout, ?TIMEOUT, ?_test(begin - DocSpec = #{docs => 10, delete => [5, 9], mrview => 1, local => 1}, - add_test_docs(Db, DocSpec), - - % Save documents before the split - Docs0 = get_all_docs(Db), - Local0 = get_local_docs(Db), - - % Set some custom metadata properties - set_revs_limit(Db, 942), - set_purge_infos_limit(Db, 943), - SecObj = {[{<<"foo">>, <<"bar">>}]}, - set_security(Db, SecObj), - - % DbInfo is saved after setting metadata bits - % as those could bump the update sequence - DbInfo0 = get_db_info(Db), - - % Split the one shard - [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)), - {ok, JobId} = mem3_reshard:start_split_job(Shard), - wait_state(JobId, completed), - - % Perform some basic checks that the shard was split - Shards1 = lists:sort(mem3:local_shards(Db)), - ?assertEqual(2, length(Shards1)), - [#shard{range = R1}, #shard{range = R2}] = Shards1, - ?assertEqual([16#00000000, 16#7fffffff], R1), - ?assertEqual([16#80000000, 16#ffffffff], R2), - - % Check metadata bits after the split - ?assertEqual(942, get_revs_limit(Db)), - ?assertEqual(943, get_purge_infos_limit(Db)), - ?assertEqual(SecObj, get_security(Db)), - - DbInfo1 = get_db_info(Db), - Docs1 = get_all_docs(Db), - Local1 = get_local_docs(Db), - - % When comparing db infos, ignore update sequences they won't be the - % same since they are more shards involved after the split - ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)), - - % Update seq prefix number is a sum of all shard update sequences - #{<<"update_seq">> := UpdateSeq0} = update_seq_to_num(DbInfo0), - #{<<"update_seq">> := UpdateSeq1} = update_seq_to_num(DbInfo1), - ?assertEqual(UpdateSeq0 * 2, UpdateSeq1), - - % Finally compare that the documents are still there after the split - ?assertEqual(Docs0, Docs1), - - % Don't forget about the local but don't include internal checkpoints - % as some of those are munged and transformed during the split - ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1)) - end)}. - - -% This test checks that document added while the shard is being split are not -% lost. Topoff1 state happens before indices are built -update_docs_before_topoff1(#{db1 := Db}) -> - {timeout, ?TIMEOUT, ?_test(begin - add_test_docs(Db, #{docs => 10}), - - intercept_state(topoff1), - - [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)), - {ok, JobId} = mem3_reshard:start_split_job(Shard), - - receive {JobPid, topoff1} -> ok end, - add_test_docs(Db, #{docs => [10, 19], local => 1}), - Docs0 = get_all_docs(Db), - Local0 = get_local_docs(Db), - DbInfo0 = get_db_info(Db), - JobPid ! continue, - - wait_state(JobId, completed), - - % Perform some basic checks that the shard was split - Shards1 = lists:sort(mem3:local_shards(Db)), - ?assertEqual(2, length(Shards1)), - - DbInfo1 = get_db_info(Db), - Docs1 = get_all_docs(Db), - Local1 = get_local_docs(Db), - - ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)), - - % Update sequence after initial copy with 10 docs would be 10 on each - % target shard (to match the source) and the total update sequence - % would have been 20. But then 10 more docs were added (3 might have - % ended up on one target and 7 on another) so the final update sequence - % would then be 20 + 10 = 30. - ?assertMatch(#{<<"update_seq">> := 30}, update_seq_to_num(DbInfo1)), - - ?assertEqual(Docs0, Docs1), - ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1)) - end)}. - - -% This test that indices are built during shard splitting. -indices_are_built(#{db1 := Db}) -> - {timeout, ?TIMEOUT, ?_test(begin - HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name}, - HaveHastings = code:lib_dir(hastings) /= {error, bad_name}, - - add_test_docs(Db, #{docs => 10, mrview => 2, search => 2, geo => 2}), - [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)), - {ok, JobId} = mem3_reshard:start_split_job(Shard), - wait_state(JobId, completed), - Shards1 = lists:sort(mem3:local_shards(Db)), - ?assertEqual(2, length(Shards1)), - MRViewGroupInfo = get_group_info(Db, <<"_design/mrview00000">>), - ?assertMatch(#{<<"update_seq">> := 32}, MRViewGroupInfo), - - HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name}, - case HaveDreyfus of false -> ok; true -> - % 4 because there are 2 indices and 2 target shards - ?assertEqual(4, meck:num_calls(dreyfus_index, await, 2)) - end, - - HaveHastings = code:lib_dir(hastings) /= {error, bad_name}, - case HaveHastings of false -> ok; true -> - % 4 because there are 2 indices and 2 target shards - ?assertEqual(4, meck:num_calls(hastings_index, await, 2)) - end - end)}. - - -mock_dreyfus_indices() -> - meck:expect(dreyfus_index, design_doc_to_indexes, fun(Doc) -> - #doc{body = {BodyProps}} = Doc, - case couch_util:get_value(<<"indexes">>, BodyProps) of - undefined -> - []; - {[_]} -> - [{dreyfus, <<"db">>, dreyfus_index1}] - end - end), - meck:expect(dreyfus_index_manager, get_index, fun(_, _) -> {ok, pid} end), - meck:expect(dreyfus_index, await, fun(_, _) -> ok end). - - -mock_hastings_indices() -> - meck:expect(hastings_index, design_doc_to_indexes, fun(Doc) -> - #doc{body = {BodyProps}} = Doc, - case couch_util:get_value(<<"st_indexes">>, BodyProps) of - undefined -> - []; - {[_]} -> - [{hastings, <<"db">>, hastings_index1}] - end - end), - meck:expect(hastings_index_manager, get_index, fun(_, _) -> {ok, pid} end), - meck:expect(hastings_index, await, fun(_, _) -> ok end). - -% Split partitioned database -split_partitioned_db(#{db2 := Db}) -> - {timeout, ?TIMEOUT, ?_test(begin - DocSpec = #{ - pdocs => #{ - <<"PX">> => 5, - <<"PY">> => 5 - }, - mrview => 1, - local => 1 - }, - add_test_docs(Db, DocSpec), - - % Save documents before the split - Docs0 = get_all_docs(Db), - Local0 = get_local_docs(Db), - - % Set some custom metadata properties - set_revs_limit(Db, 942), - set_purge_infos_limit(Db, 943), - SecObj = {[{<<"foo">>, <<"bar">>}]}, - set_security(Db, SecObj), - - % DbInfo is saved after setting metadata bits - % as those could bump the update sequence - DbInfo0 = get_db_info(Db), - PX0 = get_partition_info(Db, <<"PX">>), - PY0 = get_partition_info(Db, <<"PY">>), - - % Split the one shard - [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)), - {ok, JobId} = mem3_reshard:start_split_job(Shard), - wait_state(JobId, completed), - - % Perform some basic checks that the shard was split - Shards1 = lists:sort(mem3:local_shards(Db)), - ?assertEqual(2, length(Shards1)), - [#shard{range = R1}, #shard{range = R2}] = Shards1, - ?assertEqual([16#00000000, 16#7fffffff], R1), - ?assertEqual([16#80000000, 16#ffffffff], R2), - - % Check metadata bits after the split - ?assertEqual(942, get_revs_limit(Db)), - ?assertEqual(943, get_purge_infos_limit(Db)), - ?assertEqual(SecObj, get_security(Db)), - - DbInfo1 = get_db_info(Db), - Docs1 = get_all_docs(Db), - Local1 = get_local_docs(Db), - - % When comparing db infos, ignore update sequences they won't be the - % same since they are more shards involved after the split - ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)), - - % Update seq prefix number is a sum of all shard update sequences - #{<<"update_seq">> := UpdateSeq0} = update_seq_to_num(DbInfo0), - #{<<"update_seq">> := UpdateSeq1} = update_seq_to_num(DbInfo1), - ?assertEqual(UpdateSeq0 * 2, UpdateSeq1), - - % Finally compare that documents are still there after the split - ?assertEqual(Docs0, Docs1), - - ?assertEqual(PX0, get_partition_info(Db, <<"PX">>)), - ?assertEqual(PY0, get_partition_info(Db, <<"PY">>)), - - % Don't forget about the local but don't include internal checkpoints - % as some of those are munged and transformed during the split - ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1)) - end)}. - - -% Make sure a shard can be split again after it was split once. This checks that -% too many got added to some range, such that on next split they'd fail to fit -% in to any of the new target ranges. -split_twice(#{db1 := Db}) -> - {timeout, ?TIMEOUT, ?_test(begin - DocSpec = #{docs => 100, delete => [80, 99], mrview => 2, local => 100}, - add_test_docs(Db, DocSpec), - - % Save documents before the split - Docs0 = get_all_docs(Db), - Local0 = get_local_docs(Db), - - % Set some custom metadata properties - set_revs_limit(Db, 942), - set_purge_infos_limit(Db, 943), - SecObj = {[{<<"foo">>, <<"bar">>}]}, - set_security(Db, SecObj), - - % DbInfo is saved after setting metadata bits - % as those could bump the update sequence - DbInfo0 = get_db_info(Db), - - % Split the one shard - [#shard{name=Shard1}] = lists:sort(mem3:local_shards(Db)), - {ok, JobId1} = mem3_reshard:start_split_job(Shard1), - wait_state(JobId1, completed), - - % Perform some basic checks that the shard was split - Shards1 = lists:sort(mem3:local_shards(Db)), - ?assertEqual(2, length(Shards1)), - [#shard{range = R1}, #shard{range = R2}] = Shards1, - ?assertEqual([16#00000000, 16#7fffffff], R1), - ?assertEqual([16#80000000, 16#ffffffff], R2), - - % Check metadata bits after the split - ?assertEqual(942, get_revs_limit(Db)), - ?assertEqual(943, get_purge_infos_limit(Db)), - ?assertEqual(SecObj, get_security(Db)), - - DbInfo1 = get_db_info(Db), - Docs1 = get_all_docs(Db), - Local1 = get_local_docs(Db), - - % When comparing db infos, ignore update sequences they won't be the - % same since they are more shards involved after the split - ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)), - - % Update seq prefix number is a sum of all shard update sequences - #{<<"update_seq">> := UpdateSeq0} = update_seq_to_num(DbInfo0), - #{<<"update_seq">> := UpdateSeq1} = update_seq_to_num(DbInfo1), - ?assertEqual(UpdateSeq0 * 2, UpdateSeq1), - - ?assertEqual(Docs0, Docs1), - ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1)), - - % Split the first range again - [#shard{name=Shard2}, _] = lists:sort(mem3:local_shards(Db)), - {ok, JobId2} = mem3_reshard:start_split_job(Shard2), - wait_state(JobId2, completed), - - Shards2 = lists:sort(mem3:local_shards(Db)), - ?assertEqual(3, length(Shards2)), - [R3, R4, R5] = [R || #shard{range = R} <- Shards2], - ?assertEqual([16#00000000, 16#3fffffff], R3), - ?assertEqual([16#40000000, 16#7fffffff], R4), - ?assertEqual([16#80000000, 16#ffffffff], R5), - - % Check metadata bits after the second split - ?assertEqual(942, get_revs_limit(Db)), - ?assertEqual(943, get_purge_infos_limit(Db)), - ?assertEqual(SecObj, get_security(Db)), - - DbInfo2 = get_db_info(Db), - Docs2 = get_all_docs(Db), - Local2 = get_local_docs(Db), - - ?assertEqual(without_seqs(DbInfo1), without_seqs(DbInfo2)), - % Update seq prefix number is a sum of all shard update sequences - % But only 1 shard out of 2 was split - #{<<"update_seq">> := UpdateSeq2} = update_seq_to_num(DbInfo2), - ?assertEqual(trunc(UpdateSeq1 * 1.5), UpdateSeq2), - ?assertEqual(Docs1, Docs2), - ?assertEqual(without_meta_locals(Local1), without_meta_locals(Local2)) - end)}. - - -couch_events_are_emitted(#{db1 := Db}) -> - {timeout, ?TIMEOUT, ?_test(begin - couch_event:register_all(self()), - - % Split the one shard - [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)), - {ok, JobId} = mem3_reshard:start_split_job(Shard), - wait_state(JobId, completed), - - % Perform some basic checks that the shard was split - Shards1 = lists:sort(mem3:local_shards(Db)), - ?assertEqual(2, length(Shards1)), - [#shard{range = R1}, #shard{range = R2}] = Shards1, - ?assertEqual([16#00000000, 16#7fffffff], R1), - ?assertEqual([16#80000000, 16#ffffffff], R2), - - Flush = fun F(Events) -> - receive - {'$couch_event', DbName, Event} when Event =:= deleted - orelse Event =:= updated -> - case binary:match(DbName, Db) of - nomatch -> F(Events); - {_, _} -> F([Event | Events]) - end - after 0 -> - lists:reverse(Events) - end - end, - Events = Flush([]), - StartAtDeleted = lists:dropwhile(fun(E) -> E =/= deleted end, Events), - ?assertMatch([deleted, deleted, updated, updated | _], StartAtDeleted), - couch_event:unregister(self()) - end)}. - - -retries_work(#{db1 := Db}) -> - {timeout, ?TIMEOUT, ?_test(begin - meck:expect(couch_db_split, split, fun(_, _, _) -> - error(kapow) - end), - - [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)), - {ok, JobId} = mem3_reshard:start_split_job(Shard), - - wait_state(JobId, failed), - ?assertEqual(3, meck:num_calls(couch_db_split, split, 3)) - end)}. - - -target_reset_in_initial_copy(#{db1 := Db}) -> - {timeout, ?TIMEOUT, ?_test(begin - [#shard{} = Src] = lists:sort(mem3:local_shards(Db)), - Job = #job{ - source = Src, - target = [#shard{name= <<"t1">>}, #shard{name = <<"t2">>}], - job_state = running, - split_state = initial_copy - }, - BogusParent = spawn(fun() -> receive {ack, _, _} -> ok end end), - put('$ancestors', [BogusParent]), % make prock_lib:ack not blow up - meck:expect(mem3_reshard, checkpoint, 2, ok), - meck:expect(couch_db_split, cleanup_target, 2, ok), - meck:expect(couch_server, exists, fun - (<<"t1">>) -> true; - (<<"t2">>) -> true; - (DbName) -> meck:passthrough([DbName]) - end), - JobPid = spawn(fun() -> mem3_reshard_job:init(Job) end), - meck:wait(2, couch_db_split, cleanup_target, ['_', '_'], 5000), - exit(JobPid, kill), - exit(BogusParent, kill), - ?assertEqual(2, meck:num_calls(couch_db_split, cleanup_target, 2)) - end)}. - - -split_an_incomplete_shard_map(#{db1 := Db}) -> - {timeout, ?TIMEOUT, ?_test(begin - [#shard{} = Src] = lists:sort(mem3:local_shards(Db)), - [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)), - meck:expect(mem3_util, calculate_max_n, 1, 0), - ?assertMatch({error, {not_enough_shard_copies, _}}, - mem3_reshard:start_split_job(Shard)) - end)}. - - -intercept_state(State) -> - TestPid = self(), - meck:new(mem3_reshard_job, [passthrough]), - meck:expect(mem3_reshard_job, checkpoint_done, fun(Job) -> - case Job#job.split_state of - State -> - TestPid ! {self(), State}, - receive - continue -> meck:passthrough([Job]); - cancel -> ok - end; - _ -> - meck:passthrough([Job]) - end - end). - - -wait_state(JobId, State) -> - test_util:wait(fun() -> - case mem3_reshard:job(JobId) of - {ok, {Props}} -> - case couch_util:get_value(job_state, Props) of - State -> ok; - _ -> timer:sleep(100), wait - end; - {error, not_found} -> timer:sleep(100), wait - end - end, 30000). - - -set_revs_limit(DbName, Limit) -> - with_proc(fun() -> fabric:set_revs_limit(DbName, Limit, [?ADMIN_CTX]) end). - - -get_revs_limit(DbName) -> - with_proc(fun() -> fabric:get_revs_limit(DbName) end). - - -get_purge_infos_limit(DbName) -> - with_proc(fun() -> fabric:get_purge_infos_limit(DbName) end). - - -set_purge_infos_limit(DbName, Limit) -> - with_proc(fun() -> - fabric:set_purge_infos_limit(DbName, Limit, [?ADMIN_CTX]) - end). - - -set_security(DbName, SecObj) -> - with_proc(fun() -> fabric:set_security(DbName, SecObj) end). - - -get_security(DbName) -> - with_proc(fun() -> fabric:get_security(DbName, [?ADMIN_CTX]) end). - - -get_db_info(DbName) -> - with_proc(fun() -> - {ok, Info} = fabric:get_db_info(DbName), - maps:with([ - <<"db_name">>, <<"doc_count">>, <<"props">>, <<"doc_del_count">>, - <<"update_seq">>, <<"purge_seq">>, <<"disk_format_version">> - ], to_map(Info)) - end). - - -get_group_info(DbName, DesignId) -> - with_proc(fun() -> - {ok, GInfo} = fabric:get_view_group_info(DbName, DesignId), - maps:with([ - <<"language">>, <<"purge_seq">>, <<"signature">>, <<"update_seq">> - ], to_map(GInfo)) - end). - - -get_partition_info(DbName, Partition) -> - with_proc(fun() -> - {ok, PInfo} = fabric:get_partition_info(DbName, Partition), - maps:with([ - <<"db_name">>, <<"doc_count">>, <<"doc_del_count">>, <<"partition">> - ], to_map(PInfo)) - end). - - -get_all_docs(DbName) -> - get_all_docs(DbName, #mrargs{}). - - -get_all_docs(DbName, #mrargs{} = QArgs0) -> - GL = erlang:group_leader(), - with_proc(fun() -> - Cb = fun - ({row, Props}, Acc) -> - Doc = to_map(couch_util:get_value(doc, Props)), - #{?ID := Id} = Doc, - {ok, Acc#{Id => Doc}}; - ({meta, _}, Acc) -> {ok, Acc}; - (complete, Acc) -> {ok, Acc} - end, - QArgs = QArgs0#mrargs{include_docs = true}, - {ok, Docs} = fabric:all_docs(DbName, Cb, #{}, QArgs), - Docs - end, GL). - - -get_local_docs(DbName) -> - LocalNS = {namespace, <<"_local">>}, - maps:map(fun(_, Doc) -> - maps:without([<<"_rev">>], Doc) - end, get_all_docs(DbName, #mrargs{extra = [LocalNS]})). - - -without_seqs(#{} = InfoMap) -> - maps:without([<<"update_seq">>, <<"purge_seq">>], InfoMap). - - -without_meta_locals(#{} = Local) -> - maps:filter(fun - (<<"_local/purge-mrview-", _/binary>>, _) -> false; - (<<"_local/shard-sync-", _/binary>>, _) -> false; - (_, _) -> true - end, Local). - - -update_seq_to_num(#{} = InfoMap) -> - maps:map(fun - (<<"update_seq">>, Seq) -> seq_to_num(Seq); - (<<"purge_seq">>, PSeq) -> seq_to_num(PSeq); - (_, V) -> V - end, InfoMap). - - -seq_to_num(Seq) -> - [SeqNum, _] = binary:split(Seq, <<"-">>), - binary_to_integer(SeqNum). - - -to_map([_ | _] = Props) -> - to_map({Props}); - -to_map({[_ | _]} = EJson) -> - jiffy:decode(jiffy:encode(EJson), [return_maps]). - - -create_db(DbName, Opts) -> - GL = erlang:group_leader(), - with_proc(fun() -> fabric:create_db(DbName, Opts) end, GL). - - -delete_db(DbName) -> - GL = erlang:group_leader(), - with_proc(fun() -> fabric:delete_db(DbName, [?ADMIN_CTX]) end, GL). - - -with_proc(Fun) -> - with_proc(Fun, undefined, 30000). - - -with_proc(Fun, GroupLeader) -> - with_proc(Fun, GroupLeader, 30000). - - -with_proc(Fun, GroupLeader, Timeout) -> - {Pid, Ref} = spawn_monitor(fun() -> - case GroupLeader of - undefined -> ok; - _ -> erlang:group_leader(GroupLeader, self()) - end, - exit({with_proc_res, Fun()}) - end), - receive - {'DOWN', Ref, process, Pid, {with_proc_res, Res}} -> - Res; - {'DOWN', Ref, process, Pid, Error} -> - error(Error) - after Timeout -> - erlang:demonitor(Ref, [flush]), - exit(Pid, kill), - error({with_proc_timeout, Fun, Timeout}) - end. - - -add_test_docs(DbName, #{} = DocSpec) -> - Docs = docs(maps:get(docs, DocSpec, [])) - ++ pdocs(maps:get(pdocs, DocSpec, #{})) - ++ ddocs(mrview, maps:get(mrview, DocSpec, [])) - ++ ddocs(search, maps:get(search, DocSpec, [])) - ++ ddocs(geo, maps:get(geo, DocSpec, [])) - ++ ldocs(maps:get(local, DocSpec, [])), - Res = update_docs(DbName, Docs), - Docs1 = lists:map(fun({Doc, {ok, {RevPos, Rev}}}) -> - Doc#doc{revs = {RevPos, [Rev]}} - end, lists:zip(Docs, Res)), - case delete_docs(maps:get(delete, DocSpec, []), Docs1) of - [] -> ok; - [_ | _] = Deleted -> update_docs(DbName, Deleted) - end, - ok. - - -update_docs(DbName, Docs) -> - with_proc(fun() -> - case fabric:update_docs(DbName, Docs, [?ADMIN_CTX]) of - {accepted, Res} -> Res; - {ok, Res} -> Res - end - end). - - -delete_docs([S, E], Docs) when E >= S -> - ToDelete = [doc_id(<<"">>, I) || I <- lists:seq(S, E)], - lists:filtermap(fun(#doc{id = Id} = Doc) -> - case lists:member(Id, ToDelete) of - true -> {true, Doc#doc{deleted = true}}; - false -> false - end - end, Docs); -delete_docs(_, _) -> - []. - - -pdocs(#{} = PMap) -> - maps:fold(fun(Part, DocSpec, DocsAcc) -> - docs(DocSpec, <>) ++ DocsAcc - end, [], PMap). - - -docs(DocSpec) -> - docs(DocSpec, <<"">>). - - -docs(N, Prefix) when is_integer(N), N > 0 -> - docs([0, N - 1], Prefix); -docs([S, E], Prefix) when E >= S -> - [doc(Prefix, I) || I <- lists:seq(S, E)]; -docs(_, _) -> - []. - -ddocs(Type, N) when is_integer(N), N > 0 -> - ddocs(Type, [0, N - 1]); -ddocs(Type, [S, E]) when E >= S -> - Body = ddprop(Type), - BType = atom_to_binary(Type, utf8), - [doc(<<"_design/", BType/binary>>, I, Body, 0) || I <- lists:seq(S, E)]; -ddocs(_, _) -> - []. - - -ldocs(N) when is_integer(N), N > 0 -> - ldocs([0, N - 1]); -ldocs([S, E]) when E >= S -> - [doc(<<"_local/">>, I, bodyprops(), 0) || I <- lists:seq(S, E)]; -ldocs(_) -> - []. - - - -doc(Pref, Id) -> - Body = bodyprops(), - doc(Pref, Id, Body, 42). - - -doc(Pref, Id, BodyProps, AttSize) -> - #doc{ - id = doc_id(Pref, Id), - body = {BodyProps}, - atts = atts(AttSize) - }. - - -doc_id(Pref, Id) -> - IdBin = iolist_to_binary(io_lib:format("~5..0B", [Id])), - <>. - - -ddprop(mrview) -> - [ - {<<"views">>, {[ - {<<"v1">>, {[ - {<<"map">>, <<"function(d){emit(d);}">>} - ]}} - ]}} - ]; - -ddprop(geo) -> - [ - {<<"st_indexes">>, {[ - {<<"area">>, {[ - {<<"analyzer">>, <<"standard">>}, - {<<"index">>, <<"function(d){if(d.g){st_index(d.g)}}">> } - ]}} - ]}} - ]; - -ddprop(search) -> - [ - {<<"indexes">>, {[ - {<<"types">>, {[ - {<<"index">>, <<"function(d){if(d.g){st_index(d.g.type)}}">>} - ]}} - ]}} - ]. - - -bodyprops() -> - [ - {<<"g">>, {[ - {<<"type">>, <<"Polygon">>}, - {<<"coordinates">>, [[[-71.0, 48.4], [-70.0, 48.4], [-71.0, 48.4]]]} - ]}} - ]. - - -atts(0) -> - []; - -atts(Size) when is_integer(Size), Size >= 1 -> - Data = << <<"x">> || _ <- lists:seq(1, Size) >>, - [couch_att:new([ - {name, <<"att">>}, - {type, <<"app/binary">>}, - {att_len, Size}, - {data, Data} - ])]. diff --git a/src/mem3/test/mem3_ring_prop_tests.erl b/src/mem3/test/mem3_ring_prop_tests.erl deleted file mode 100644 index 9f4f86f5f..000000000 --- a/src/mem3/test/mem3_ring_prop_tests.erl +++ /dev/null @@ -1,144 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(mem3_ring_prop_tests). - - --include_lib("triq/include/triq.hrl"). --triq(eunit). - - -% Properties - -prop_get_ring_with_connected_intervals() -> - ?FORALL({Start, End}, oneof(ranges()), - ?FORALL(Intervals, g_connected_intervals(Start, End), - mem3_util:get_ring(Intervals, Start, End) =:= lists:sort(Intervals) - ) - ). - - -prop_get_ring_connected_plus_random_intervals() -> - ?FORALL({Intervals, Extra}, {g_connected_intervals(1, 100), - g_random_intervals(1, 100)}, - ?IMPLIES(sets:is_disjoint(endpoints(Intervals), endpoints(Extra)), - begin - AllInts = Intervals ++ Extra, - Ring = mem3_util:get_ring(AllInts, 1, 100), - Ring =:= lists:sort(Intervals) - end - ) - ). - - -prop_get_ring_connected_with_sub_intervals() -> - ?FORALL(Intervals, g_connected_intervals(1, 100), - ?FORALL(SubIntervals, g_subintervals(Intervals), - begin - AllInts = Intervals ++ SubIntervals, - Ring = mem3_util:get_ring(AllInts, 1, 100), - Ring =:= lists:sort(Intervals) - end - ) - ). - - -prop_get_ring_with_disconnected_intervals() -> - ?FORALL({Start, End}, oneof(ranges()), - ?FORALL(Intervals, g_disconnected_intervals(Start, End), - mem3_util:get_ring(Intervals, Start, End) =:= [] - ) - ). - - -% Generators - -ranges() -> - [{1, 10}, {0, 2 bsl 31 - 1}, {2 bsl 31 - 10, 2 bsl 31 - 1}]. - - -g_connected_intervals(Begin, End) -> - ?SIZED(Size, g_connected_intervals(Begin, End, 5 * Size)). - - -g_connected_intervals(Begin, End, Split) when Begin =< End -> - ?LET(N, choose(0, Split), - begin - if - N == 0 -> - [{Begin, End}]; - N > 0 -> - Ns = lists:seq(1, N - 1), - Bs = lists:usort([rand_range(Begin, End) || _ <- Ns]), - Es = [B - 1 || B <- Bs], - shuffle(lists:zip([Begin] ++ Bs, Es ++ [End])) - end - end). - - -g_non_trivial_connected_intervals(Begin, End, Split) -> - ?SUCHTHAT(Connected, g_connected_intervals(Begin, End, Split), - length(Connected) > 1). - - -g_disconnected_intervals(Begin, End) -> - ?SIZED(Size, g_disconnected_intervals(Begin, End, Size)). - - -g_disconnected_intervals(Begin, End, Split) when Begin =< End -> - ?LET(Connected, g_non_trivial_connected_intervals(Begin, End, Split), - begin - I = triq_rnd:uniform(length(Connected)) - 1, - {Before, [_ | After]} = lists:split(I, Connected), - Before ++ After - end). - - -g_subintervals(Intervals) -> - lists:foldl(fun(R, Acc) -> split_interval(R) ++ Acc end, [], Intervals). - - -split_interval({B, E}) when E - B >= 2 -> - E1 = rand_range(B, E) - 1, - B1 = E1 + 1, - [{B, E1}, {B1, E}]; - -split_interval(_Range) -> - []. - - -g_random_intervals(Start, End) -> - ?LET(N, choose(1, 10), - begin - [begin - B = rand_range(Start, End), - E = rand_range(B, End), - {B, E} - end || _ <- lists:seq(1, N)] - end). - - -rand_range(B, B) -> - B; - -rand_range(B, E) -> - B + triq_rnd:uniform(E - B). - - -shuffle(L) -> - Tagged = [{triq_rnd:uniform(), X} || X <- L], - [X || {_, X} <- lists:sort(Tagged)]. - - -endpoints(Ranges) -> - {Begins, Ends} = lists:unzip(Ranges), - sets:from_list(Begins ++ Ends). diff --git a/src/mem3/test/mem3_seeds_test.erl b/src/mem3/test/mem3_seeds_test.erl deleted file mode 100644 index ba83b66be..000000000 --- a/src/mem3/test/mem3_seeds_test.erl +++ /dev/null @@ -1,69 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(mem3_seeds_test). - --include_lib("couch/include/couch_eunit.hrl"). - -a_test_() -> - Tests = [ - {"empty seedlist should set status ok", fun empty_seedlist_status_ok/0}, - {"all seedlist nodes unreachable keeps status seeding", fun seedlist_misconfiguration/0}, - {"seedlist entries should be present in _nodes", fun check_nodelist/0} - ], - {setup, fun setup/0, fun teardown/1, Tests}. - -empty_seedlist_status_ok() -> - ok = application:start(mem3), - try - {ok, {Result}} = mem3_seeds:get_status(), - ?assertEqual({[]}, couch_util:get_value(seeds, Result)), - ?assertEqual(ok, couch_util:get_value(status, Result)) - after - cleanup() - end. - -seedlist_misconfiguration() -> - config:set("cluster", "seedlist", "couchdb@node1.example.com,couchdb@node2.example.com", false), - ok = application:start(mem3), - try - {ok, {Result}} = mem3_seeds:get_status(), - {Seeds} = couch_util:get_value(seeds, Result), - ?assertEqual(2, length(Seeds)), - ?assertMatch({_}, couch_util:get_value('couchdb@node1.example.com', Seeds)), - ?assertMatch({_}, couch_util:get_value('couchdb@node2.example.com', Seeds)), - ?assertEqual(seeding, couch_util:get_value(status, Result)) - after - cleanup() - end. - -check_nodelist() -> - config:set("cluster", "seedlist", "couchdb@node1.example.com,couchdb@node2.example.com", false), - ok = application:start(mem3), - try - Nodes = mem3:nodes(), - ?assert(lists:member('couchdb@node1.example.com', Nodes)), - ?assert(lists:member('couchdb@node2.example.com', Nodes)) - after - cleanup() - end. - -cleanup() -> - application:stop(mem3), - Filename = config:get("mem3", "nodes_db", "_nodes") ++ ".couch", - file:delete(filename:join([?BUILDDIR(), "tmp", "data", Filename])). - -setup() -> - test_util:start_couch([rexi]). - -teardown(Ctx) -> - test_util:stop_couch(Ctx). diff --git a/src/mem3/test/mem3_sync_security_test.erl b/src/mem3/test/mem3_sync_security_test.erl deleted file mode 100644 index e67a72017..000000000 --- a/src/mem3/test/mem3_sync_security_test.erl +++ /dev/null @@ -1,54 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(mem3_sync_security_test). - --include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). --include("mem3.hrl"). --include_lib("eunit/include/eunit.hrl"). - --define(TIMEOUT, 5). % seconds - -go_test_() -> - { - "security property sync test", - { - setup, - fun start_couch/0, fun stop_couch/1, - { - foreach, - fun setup/0, fun teardown/1, - [ - fun sync_security_ok/1 - ] - } - } - }. - -start_couch() -> - test_util:start_couch([fabric, mem3]). - -stop_couch(Ctx) -> - test_util:stop_couch(Ctx). - -setup() -> - ok = meck:new(fabric, [passthrough]), - meck:expect(fabric, all_dbs, fun() -> - {ok, [<<"NoExistDb1">>, <<"NoExistDb2">>]} - end). - -teardown(_) -> - meck:unload(). - -sync_security_ok(_) -> - {timeout, ?TIMEOUT, ?_assertEqual(ok, mem3_sync_security:go())}. diff --git a/src/mem3/test/mem3_util_test.erl b/src/mem3/test/mem3_util_test.erl deleted file mode 100644 index 8b74c4b2b..000000000 --- a/src/mem3/test/mem3_util_test.erl +++ /dev/null @@ -1,130 +0,0 @@ -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - --module(mem3_util_test). - --include("mem3.hrl"). --include_lib("eunit/include/eunit.hrl"). - -name_shard_test() -> - Shard1 = #shard{}, - ?assertError(function_clause, mem3_util:name_shard(Shard1, ".1234")), - - Shard2 = #shard{dbname = <<"testdb">>, range = [0,100]}, - #shard{name=Name2} = mem3_util:name_shard(Shard2, ".1234"), - ?assertEqual(<<"shards/00000000-00000064/testdb.1234">>, Name2), - - ok. - -create_partition_map_test() -> - {DbName1, N1, Q1, Nodes1} = {<<"testdb1">>, 3, 4, [a,b,c,d]}, - Map1 = mem3_util:create_partition_map(DbName1, N1, Q1, Nodes1), - ?assertEqual(12, length(Map1)), - - {DbName2, N2, Q2, Nodes2} = {<<"testdb2">>, 1, 1, [a,b,c,d]}, - [#shard{name=Name2,node=Node2}] = Map2 = - mem3_util:create_partition_map(DbName2, N2, Q2, Nodes2, ".1234"), - ?assertEqual(1, length(Map2)), - ?assertEqual(<<"shards/00000000-ffffffff/testdb2.1234">>, Name2), - ?assertEqual(a, Node2), - ok. - -build_shards_test() -> - DocProps1 = - [{<<"changelog">>, - [[<<"add">>,<<"00000000-1fffffff">>, - <<"bigcouch@node.local">>], - [<<"add">>,<<"20000000-3fffffff">>, - <<"bigcouch@node.local">>], - [<<"add">>,<<"40000000-5fffffff">>, - <<"bigcouch@node.local">>], - [<<"add">>,<<"60000000-7fffffff">>, - <<"bigcouch@node.local">>], - [<<"add">>,<<"80000000-9fffffff">>, - <<"bigcouch@node.local">>], - [<<"add">>,<<"a0000000-bfffffff">>, - <<"bigcouch@node.local">>], - [<<"add">>,<<"c0000000-dfffffff">>, - <<"bigcouch@node.local">>], - [<<"add">>,<<"e0000000-ffffffff">>, - <<"bigcouch@node.local">>]]}, - {<<"by_node">>, - {[{<<"bigcouch@node.local">>, - [<<"00000000-1fffffff">>,<<"20000000-3fffffff">>, - <<"40000000-5fffffff">>,<<"60000000-7fffffff">>, - <<"80000000-9fffffff">>,<<"a0000000-bfffffff">>, - <<"c0000000-dfffffff">>,<<"e0000000-ffffffff">>]}]}}, - {<<"by_range">>, - {[{<<"00000000-1fffffff">>,[<<"bigcouch@node.local">>]}, - {<<"20000000-3fffffff">>,[<<"bigcouch@node.local">>]}, - {<<"40000000-5fffffff">>,[<<"bigcouch@node.local">>]}, - {<<"60000000-7fffffff">>,[<<"bigcouch@node.local">>]}, - {<<"80000000-9fffffff">>,[<<"bigcouch@node.local">>]}, - {<<"a0000000-bfffffff">>,[<<"bigcouch@node.local">>]}, - {<<"c0000000-dfffffff">>,[<<"bigcouch@node.local">>]}, - {<<"e0000000-ffffffff">>,[<<"bigcouch@node.local">>]}]}}], - Shards1 = mem3_util:build_shards(<<"testdb1">>, DocProps1), - ExpectedShards1 = - [{shard,<<"shards/00000000-1fffffff/testdb1">>, - 'bigcouch@node.local',<<"testdb1">>, - [0,536870911], - undefined,[]}, - {shard,<<"shards/20000000-3fffffff/testdb1">>, - 'bigcouch@node.local',<<"testdb1">>, - [536870912,1073741823], - undefined,[]}, - {shard,<<"shards/40000000-5fffffff/testdb1">>, - 'bigcouch@node.local',<<"testdb1">>, - [1073741824,1610612735], - undefined,[]}, - {shard,<<"shards/60000000-7fffffff/testdb1">>, - 'bigcouch@node.local',<<"testdb1">>, - [1610612736,2147483647], - undefined,[]}, - {shard,<<"shards/80000000-9fffffff/testdb1">>, - 'bigcouch@node.local',<<"testdb1">>, - [2147483648,2684354559], - undefined,[]}, - {shard,<<"shards/a0000000-bfffffff/testdb1">>, - 'bigcouch@node.local',<<"testdb1">>, - [2684354560,3221225471], - undefined,[]}, - {shard,<<"shards/c0000000-dfffffff/testdb1">>, - 'bigcouch@node.local',<<"testdb1">>, - [3221225472,3758096383], - undefined,[]}, - {shard,<<"shards/e0000000-ffffffff/testdb1">>, - 'bigcouch@node.local',<<"testdb1">>, - [3758096384,4294967295], - undefined,[]}], - ?assertEqual(ExpectedShards1, Shards1), - ok. - - -%% n_val tests - -nval_test_() -> - { - setup, - fun() -> - meck:new([config, couch_log]), - meck:expect(couch_log, error, 2, ok), - meck:expect(config, get, 3, "5") - end, - fun(_) -> meck:unload() end, - [ - ?_assertEqual(2, mem3_util:n_val(2, 4)), - ?_assertEqual(1, mem3_util:n_val(-1, 4)), - ?_assertEqual(4, mem3_util:n_val(6, 4)), - ?_assertEqual(5, mem3_util:n_val(undefined, 6)) - ] - }. -- cgit v1.2.1 From d0ccfa2b463b7087c1c3a766ca3ae963c1d1738d Mon Sep 17 00:00:00 2001 From: ILYA Khlopotov Date: Mon, 27 May 2019 19:17:16 +0000 Subject: Add chained setups --- test/elixir/README.md | 143 ++++++++++++++++++++++++++++++++++++++ test/elixir/lib/ex_unit.ex | 44 ++++++++++++ test/elixir/lib/setup.ex | 97 ++++++++++++++++++++++++++ test/elixir/lib/setup/common.ex | 22 ++++++ test/elixir/lib/step.ex | 44 ++++++++++++ test/elixir/lib/step/config.ex | 33 +++++++++ test/elixir/lib/step/create_db.ex | 53 ++++++++++++++ test/elixir/lib/step/start.ex | 85 ++++++++++++++++++++++ test/elixir/lib/step/user.ex | 104 +++++++++++++++++++++++++++ test/elixir/lib/utils.ex | 60 ++++++++++++++++ 10 files changed, 685 insertions(+) create mode 100644 test/elixir/lib/ex_unit.ex create mode 100644 test/elixir/lib/setup.ex create mode 100644 test/elixir/lib/setup/common.ex create mode 100644 test/elixir/lib/step.ex create mode 100644 test/elixir/lib/step/config.ex create mode 100644 test/elixir/lib/step/create_db.ex create mode 100644 test/elixir/lib/step/start.ex create mode 100644 test/elixir/lib/step/user.ex create mode 100644 test/elixir/lib/utils.ex diff --git a/test/elixir/README.md b/test/elixir/README.md index a59b4df90..f7691ad3c 100644 --- a/test/elixir/README.md +++ b/test/elixir/README.md @@ -111,3 +111,146 @@ X means done, - means partially - [ ] Port view_pagination.js - [ ] Port view_sandboxing.js - [ ] Port view_update_seq.js + +# Using ExUnit to write unit tests + +Elixir has a number of benefits which makes writing unit tests easier. +For example it is trivial to do codegeneration of tests. +Bellow we present a few use cases where code-generation is really helpful. + +## How to write ExUnit tests + +1. Create new file in test/exunit/ directory (the file name should match *_test.exs) +2. In case it is a first file in the directory create test_helper.exs (look at src/couch/test/exunit/test_helper.exs to get an idea) +3. define test module which does `use Couch.Test.ExUnit.Case` +4. Define test cases in the module + +You can run tests either: +- using make: `make exunit` +- using mix: BUILDDIR=`pwd` ERL_LIBS=`pwd`/src MIX_ENV=test mix test --trace + +## Generating tests from spec + +Sometimes we have some data in structured format and want +to generate test cases using that data. This is easy in Elixir. +For example suppose we have following spec: +``` +{ + "{db_name}/_view_cleanup": { + "roles": ["_admin"] + } +} +``` +We can use this spec to generate test cases +``` +defmodule GenerateTestsFromSpec do + use ExUnit.Case + require Record + Record.defrecordp :user_ctx, Record.extract(:user_ctx, from_lib: "couch/include/couch_db.hrl") + Record.defrecordp :httpd, Record.extract(:httpd, from_lib: "couch/include/couch_db.hrl") + + {:ok, spec_bin} = File.read("roles.json") + spec = :jiffy.decode(spec_bin, [:return_maps]) + Enum.each spec, fn {path, path_spec} -> + roles = path_spec["roles"] + @roles roles + @path_parts String.split(path, "/") + test "Access with `#{inspect(roles)}` roles" do + req = httpd(path_parts: @path_parts, user_ctx: user_ctx(roles: @roles)) + :chttpd_auth_request.authorize_request(req) + end + end +end +``` +As a result we would get +``` +GenerateTestsFromSpec + * test Access with `["_admin"]` roles (0.00ms) +``` + +## Test all possible combinations + +Sometimes we want to test all possible permutations for parameters. +This can be accomplished using something like the following: + +``` +defmodule Permutations do + use ExUnit.Case + pairs = :couch_tests_combinatorics.product([ + [:remote, :local], [:remote, :local] + ]) + for [source, dest] <- pairs do + @source source + @dest dest + test "Replication #{source} -> #{dest}" do + assert :ok == :ok + end + end +end +``` + +This would produce following tests +``` +Permutations + * test Replication remote -> remote (0.00ms) + * test Replication local -> remote (0.00ms) + * test Replication remote -> local (0.00ms) + * test Replication local -> local (0.00ms) +``` + +## Reuseing of common setups + +The setup functions are quite similar in lots of tests therefore it makes +sense to reuse them. The idea is to add shared setup functions into either +- test/elixir/lib/setup/common.ex +- test/elixir/lib/setup/.ex + +The setup functions looks like the following: +``` +defmodule Foo do + alias Couch.Test.Setup.Step + + def httpd_with_admin(setup) do + setup + |> Step.Start.new(:start, extra_apps: [:chttpd]) + |> Step.User.new(:admin, roles: [:server_admin]) + end +end +``` + +These parts of a setup chain can be invoked as follows: +``` +defmodule Couch.Test.CRUD do + use Couch.Test.ExUnit.Case + alias Couch.Test.Utils + + alias Couch.Test.Setup + + alias Couch.Test.Setup.Step + + def with_db(context, setup) do + setup = + setup + |> Setup.Common.httpd_with_db() + |> Setup.run() + + context = + Map.merge(context, %{ + db_name: setup |> Setup.get(:db) |> Step.Create.DB.name(), + base_url: setup |> Setup.get(:start) |> Step.Start.clustered_url(), + user: setup |> Setup.get(:admin) |> Step.User.name() + }) + + {context, setup} + end + + describe "Database CRUD using Fabric API" do + @describetag setup: &__MODULE__.with_db/2 + test "Create DB", ctx do + IO.puts("base_url: #{ctx.base_url}") + IO.puts("admin: #{ctx.user}") + IO.puts("db_name: #{ctx.db_name}") + end + end +end +``` \ No newline at end of file diff --git a/test/elixir/lib/ex_unit.ex b/test/elixir/lib/ex_unit.ex new file mode 100644 index 000000000..7abba07ef --- /dev/null +++ b/test/elixir/lib/ex_unit.ex @@ -0,0 +1,44 @@ +defmodule Couch.Test.ExUnit.Case do + @moduledoc """ + Template for ExUnit test case. It can be used as follows: + ``` + defmodule Couch.Test.CRUD do + use Couch.Test.ExUnit.Case + ... + def with_db(context, setup) do + setup = setup + |> Step.Start.new(:start, extra_apps: [:chttpd]) + |> Setup.run + context = Map.merge(context, %{ + base_url: setup |> Setup.get(:start) |> Step.Start.clustered_url + }) + {context, setup} + end + describe "Group of tests" do + @describetag setup: &__MODULE__.with_db/2 + test "Single test in a group", ctx do + ctx.base_url + end + ... + end + ``` + """ + + use ExUnit.CaseTemplate + alias Couch.Test.Setup + + using do + quote do + require Logger + use ExUnit.Case + end + end + + setup context do + case context do + %{:setup => setup_fun} -> + {:ok, Setup.setup(context, setup_fun)} + _ -> {:ok, context} + end + end +end \ No newline at end of file diff --git a/test/elixir/lib/setup.ex b/test/elixir/lib/setup.ex new file mode 100644 index 000000000..037988521 --- /dev/null +++ b/test/elixir/lib/setup.ex @@ -0,0 +1,97 @@ +defmodule Couch.Test.Setup do + @moduledoc """ + Allows to chain setup functions. + Example of using: + + ``` + alias Couch,Test.Utils + def with_db_name(context, setup) do + setup = + setup + |> Step.Start.new(:start, extra_apps: [:chttpd]) + |> Step.User.new(:admin, roles: [:server_admin]) + |> Setup.run() + + context = + Map.merge(context, %{ + db_name: Utils.random_name("db") + base_url: setup |> Setup.get(:start) |> Step.Start.clustered_url(), + user: setup |> Setup.get(:admin) |> Step.User.name() + }) + {context, setup} + end + + @tag setup: &__MODULE__.with_db_name/2 + test "Create", %{db_name: db_name, user: user} do + ... + end + ``` + """ + import ExUnit.Callbacks, only: [on_exit: 1] + import ExUnit.Assertions, only: [assert: 2] + require Logger + + alias Couch.Test.Setup + alias Couch.Test.Setup.Step + defstruct stages: [], by_type: %{}, state: %{} + + def step(%Setup{stages: stages} = setup, id, step) do + %{setup | stages: [{id, step} | stages]} + end + + defp setup_step({id, step}, %Setup{state: state, by_type: by_type} = setup) do + %module{} = step + # credo:disable-for-next-line Credo.Check.Warning.LazyLogging + Logger.debug("Calling 'setup/2' for '#{module}'") + step = module.setup(setup, step) + state = Map.put(state, id, step) + by_type = Map.update(by_type, module, [id], fn ids -> [id | ids] end) + on_exit(fn -> + # credo:disable-for-next-line Credo.Check.Warning.LazyLogging + Logger.debug("Calling 'teardown/3' for '#{module}'") + try do + module.teardown(setup, step) + :ok + catch + _ -> :ok + _, _ -> :ok + end + end) + {{id, step}, %{setup | state: state, by_type: by_type}} + end + + def run(%Setup{stages: stages} = setup) do + {stages, setup} = stages + |> Enum.reverse + |> Enum.map_reduce(setup, &setup_step/2) + %{setup | stages: stages} + end + + def setup(ctx) do + Map.get(ctx, :__setup) + end + + def setup(ctx, setup_fun) do + setup = %Setup{} |> Step.Config.new(:test_config, config_file: nil) + {ctx, setup} = setup_fun.(ctx, setup) + assert not Map.has_key?(ctx, :__setup), "Key `__setup` is reserved for internal purposes" + Map.put(ctx, :__setup, setup) + end + + def completed?(%Setup{by_type: by_type}, step) do + Map.has_key?(by_type, step) + end + + def all_for(%Setup{by_type: by_type, state: state}, step_module) do + Map.take(state, by_type[step_module] || []) + end + + def reduce_for(setup, step_module, acc, fun) do + Enum.reduce(all_for(setup, step_module), acc, fun) + end + + def get(%Setup{state: state}, id) do + state[id] + end + +end \ No newline at end of file diff --git a/test/elixir/lib/setup/common.ex b/test/elixir/lib/setup/common.ex new file mode 100644 index 000000000..3b59e9476 --- /dev/null +++ b/test/elixir/lib/setup/common.ex @@ -0,0 +1,22 @@ +defmodule Couch.Test.Setup.Common do + @moduledoc """ + A set of common setup pipelines for reuse + + - httpd_with_admin - chttpd is started and new admin is created + - httpd_with_db - httpd_with_admin and new database is created + """ + alias Couch.Test.Setup.Step + + def httpd_with_admin(setup) do + setup + |> Step.Start.new(:start, extra_apps: [:chttpd]) + |> Step.User.new(:admin, roles: [:server_admin]) + end + + def httpd_with_db(setup) do + setup + |> httpd_with_admin() + |> Step.Create.DB.new(:db) + end + +end \ No newline at end of file diff --git a/test/elixir/lib/step.ex b/test/elixir/lib/step.ex new file mode 100644 index 000000000..316d765aa --- /dev/null +++ b/test/elixir/lib/step.ex @@ -0,0 +1,44 @@ +defmodule Couch.Test.Setup.Step do + @moduledoc """ + A behaviour module for implementing custom setup steps for future reuse. + + Every module implementing this behaviour must implement following three functions: + - new + - setup + - teardown + + Here is an example of a custom step + ``` + defmodule Couch.Test.Setup.Step.Foo do + + alias Couch.Test.Setup + + defstruct [:foo_data, :foo_arg] + + def new(setup, id, arg: arg) do + setup |> Setup.step(id, %__MODULE__{foo_arg: arg}) + end + + def setup(_setup, %__MODULE__{foo_arg: arg} = step) do + ... + foo_data = ... + %{step | foo_data: foo_data} + end + + def teardown(_setup, _step) do + end + + def get_data(%__MODULE__{foo_data: data}) do + data + end + end + ``` + """ + @type t :: struct() + @callback new(setup :: %Couch.Test.Setup{}, id :: atom(), args: Keyword.t()) :: + %Couch.Test.Setup{} + @callback setup(setup :: %Couch.Test.Setup{}, step :: t()) :: + t() + @callback teardown(setup :: %Couch.Test.Setup{}, step :: t()) :: + any() +end \ No newline at end of file diff --git a/test/elixir/lib/step/config.ex b/test/elixir/lib/step/config.ex new file mode 100644 index 000000000..9d9ac8eab --- /dev/null +++ b/test/elixir/lib/step/config.ex @@ -0,0 +1,33 @@ +defmodule Couch.Test.Setup.Step.Config do + @moduledoc """ + This setup reads configuration for a test run. + It is not supposed to be called manually. + """ + + alias Couch.Test.Setup + + defstruct [:config, :config_file] + + def new(setup, id, config_file: config_file) do + setup |> Setup.step(id, %__MODULE__{config_file: config_file}) + end + + def setup(_setup, %__MODULE__{config_file: config_file} = step) do + # TODO we would need to access config file here + %{step | config: %{ + backdoor: %{ + protocol: "http" + }, + clustered: %{ + protocol: "http" + } + }} + end + + def teardown(_setup, _step) do + end + + def get(%__MODULE__{config: config}) do + config + end +end \ No newline at end of file diff --git a/test/elixir/lib/step/create_db.ex b/test/elixir/lib/step/create_db.ex new file mode 100644 index 000000000..3cca3c55a --- /dev/null +++ b/test/elixir/lib/step/create_db.ex @@ -0,0 +1,53 @@ +defmodule Couch.Test.Setup.Step.Create.DB do + @moduledoc """ + This setup step creates a database with given name. + If name is not provided random name would be used. + + Example + setup + ... + |> Setup.Step.Create.DB.new(:db) + ... + |> Setup.run + ... + + db_name = setup |> Setup.get(:db) |> Setup.Step.Create.DB.name + """ + alias Couch.Test.Setup + alias Couch.Test.Setup.Step + alias Couch.Test.Utils + + defstruct [:name] + + import ExUnit.Assertions, only: [assert: 1, assert: 2] + + import Utils + + @admin {:user_ctx, user_ctx(roles: ["_admin"])} + + def new(setup, id) do + new(setup, id, name: Utils.random_name("db")) + end + + def new(setup, id, name: name) do + setup |> Setup.step(id, %__MODULE__{name: name}) + end + + def setup(setup, %__MODULE__{name: name} = step) do + assert Setup.completed?(setup, Step.Start), "Require `Start` step" + assert :fabric in Step.Start.apps(), "Fabric is not started" + res = :fabric.create_db(name, [@admin]) + assert res in [:ok, :accepted], "Cannot create `#{name}` database" + step + end + + def teardown(setup, %__MODULE__{name: name} = step) do + :fabric.delete_db(name, [@admin]) + :ok + end + + def name(%__MODULE__{name: name}) do + name + end + +end \ No newline at end of file diff --git a/test/elixir/lib/step/start.ex b/test/elixir/lib/step/start.ex new file mode 100644 index 000000000..ea7c70f5a --- /dev/null +++ b/test/elixir/lib/step/start.ex @@ -0,0 +1,85 @@ +defmodule Couch.Test.Setup.Step.Start do + @moduledoc """ + Step to start a set of couchdb applications. By default it starts + list of applications from DEFAULT_APPS macro defined in `test_util.erl`. + At the time of writing this list included: + - inets + - ibrowse + - ssl + - config + - couch_epi + - couch_event + - couch + + It is possible to specify additional list of applications to start. + + This setup is also maintains `clustered_url` and `backdoor_url` for future use. + The value for `clustered_url` could be nil if :chttpd app is not included in extra_apps. + + Example + setup + |> Setup.Step.Start.new(:start, extra_apps: [:fabric, :chttpd]) + ... + |> Setup.run + ... + + started_apps = Setup.Step.Start.apps + clustered_url = setup |> Setup.get(:start) |> Setup.Step.Start.clustered_url + backdoor_url = setup |> Setup.get(:start) |> Setup.Step.Start.backdoor_url + """ + alias Couch.Test.Setup + alias Couch.Test.Setup.Step + + defstruct [:test_ctx, :extra_apps, :clustered_url, :backdoor_url] + + def new(setup, id, extra_apps: extra_apps) do + setup |> Setup.step(id, %__MODULE__{extra_apps: extra_apps || []}) + end + + def setup(setup, %__MODULE__{extra_apps: extra_apps} = step) do + test_config = setup |> Setup.get(:test_config) |> Step.Config.get() + protocol = test_config[:backdoor][:protocol] || "http" + test_ctx = :test_util.start_couch(extra_apps) + addr = :config.get('couch_httpd', 'bind_address', '127.0.0.1') + port = :mochiweb_socket_server.get(:couch_httpd, :port) + backdoor_url = "#{protocol}://#{addr}:#{port}" + clustered_url = + if :chttpd in extra_apps do + protocol = test_config[:clustered][:protocol] || "http" + addr = :config.get('chttpd', 'bind_address', '127.0.0.1') + port = :mochiweb_socket_server.get(:chttpd, :port) + "#{protocol}://#{addr}:#{port}" + else + nil + end + %{step | + test_ctx: test_ctx, + clustered_url: clustered_url, + backdoor_url: backdoor_url + } + end + + def teardown(_setup, %___MODULE__{test_ctx: test_ctx}) do + :test_util.stop_couch(test_ctx) + end + + def backdoor_url(%__MODULE__{backdoor_url: url}) do + url + end + + def clustered_url(%__MODULE__{clustered_url: url}) do + url + end + + def extra_apps(%__MODULE__{extra_apps: apps}) do + apps + end + + @doc """ + Returns list of currently running applications + """ + def apps() do + for {x, _, _} <- Application.started_applications, do: x + end + +end \ No newline at end of file diff --git a/test/elixir/lib/step/user.ex b/test/elixir/lib/step/user.ex new file mode 100644 index 000000000..5a1cab33c --- /dev/null +++ b/test/elixir/lib/step/user.ex @@ -0,0 +1,104 @@ +defmodule Couch.Test.Setup.Step.User do + @moduledoc """ + Step to create user with given list of roles. + The :server_admin is a special role which is used to put user + into `admins` section of a config instead of a database. + + Example + setup + |> Setup.Step.User.new(:admin, roles: [:server_admin]) + ... + |> Setup.run + ... + + user = setup |> Setup.get(:admin) |> Step.User.name() + """ + + alias Couch.Test.Setup + alias Couch.Test.Setup.Step + alias Couch.Test.Utils + + import ExUnit.Callbacks, only: [on_exit: 1] + + defstruct [:roles, :name, :password, :users_db] + + import ExUnit.Assertions, only: [assert: 1, assert: 2] + + import Utils + + @admin {:user_ctx, user_ctx(roles: ["_admin"])} + + def new(setup, id, roles: roles) do + setup |> Setup.step(id, %__MODULE__{roles: roles || []}) + end + + def setup(setup, %__MODULE__{roles: roles} = step) do + users_db = IO.chardata_to_string( + :config.get('chttpd_auth', 'authentication_db', '_users')) + if not Utils.db_exists?(users_db) do + on_exit fn -> + :fabric.delete_db(users_db, [@admin]) + end + res = :fabric.create_db(users_db, [@admin]) + assert res in [:ok, :accepted], "Cannot create `users` database #{users_db}" + end + + if :server_admin in roles do + name = Utils.random_name("admin") + pass = Utils.random_password() + :config.set( + 'admins', String.to_charlist(name), String.to_charlist(pass), false) + %{step | + name: name, + password: pass, + users_db: users_db + } + else + name = Utils.random_name("admin") + pass = Utils.random_password() + doc_id = "org.couchdb.user:#{name}" + user_doc = :couch_doc.from_json_obj(%{ + _id: doc_id, + name: name, + type: "user", + roles: roles, + password: pass + }) + res = :fabric.update_doc(users_db, user_doc, [@admin]) + assert res in [:ok, :accepted], "Cannot create user document" + %{step | + name: name, + password: pass, + users_db: users_db, + roles: roles + } + end + end + + def teardown(setup, %__MODULE__{name: name, users_db: users_db, roles: roles} = step) do + if :server_admin in roles do + :config.delete("admins", String.to_charlist(name), false) + else + doc_id = "org.couchdb.user:#{name}" + assert {:ok, doc_info(revs: [rev | _])} = :fabric.get_doc_info(users_db) + doc = :couch_doc.from_json_obj(%{ + _id: doc_id, + _rev: rev, + _deleted: true + }) + assert {:ok, _resp} = :fabric.update_doc(users_db, doc, [@admin]) + end + :ok + end + + def name(%__MODULE__{name: name}) do + name + end + def password(%__MODULE__{password: pass}) do + pass + end + def credentials(%__MODULE__{name: name, password: pass}) do + {name, pass} + end + +end \ No newline at end of file diff --git a/test/elixir/lib/utils.ex b/test/elixir/lib/utils.ex new file mode 100644 index 000000000..4d3f33f69 --- /dev/null +++ b/test/elixir/lib/utils.ex @@ -0,0 +1,60 @@ +defmodule Couch.Test.Utils do + require Record + @moduledoc "Helper functions for testing" + Record.defrecord :user_ctx, Record.extract( + :user_ctx, from_lib: "couch/include/couch_db.hrl") + + Record.defrecord :doc_info, Record.extract( + :doc_info, from_lib: "couch/include/couch_db.hrl") + + def random_name(prefix) do + time = :erlang.monotonic_time() + umi = :erlang.unique_integer([:monotonic]) + "#{prefix}-#{time}-#{umi}" + end + + def random_password() do + rand_bytes = :crypto.strong_rand_bytes(16) + rand_bytes + |> :base64.encode() + |> String.slice(0..16) + end + + def db_exists?(db_name) do + try do + :fabric.get_db_info(db_name) + catch + :error, :database_does_not_exist -> false + end + end + + @doc """ + In some cases we need to access record definition at compile time. + We cannot use Record.defrecord in such cases. This helper function + can be used instead. Use it as follows: + ``` + defmodule Foo do + admin_ctx = {:user_ctx, Utils.erlang_record( + :user_ctx, "couch/include/couch_db.hrl", roles: ["_admin"])} + end + ``` + + Longer term we should wrap erlang records as it is done for user_ctx + see beginning of the Utils.ex. In this case we would be able to use + them at compile time in other modules. + ``` + Record.defrecord :user_ctx, Record.extract( + :user_ctx, from_lib: "couch/include/couch_db.hrl") + ``` + """ + def erlang_record(name, from_lib, opts \\ []) do + record_info = Record.extract(name, from_lib: from_lib) + index = [name | Keyword.keys(record_info)] |> Enum.with_index + draft = [name | Keyword.values(record_info)] |> List.to_tuple + opts + |> Enum.reduce(draft, fn + {k, v}, acc -> put_elem(acc, index[k], v) + end) + end + +end \ No newline at end of file -- cgit v1.2.1 From d427f353ab4be03998685f6a167d9559c7b50a76 Mon Sep 17 00:00:00 2001 From: ILYA Khlopotov Date: Thu, 20 Jun 2019 21:41:40 +0000 Subject: Unify runners for unit and integration tests --- .credo.exs | 3 ++- Makefile | 29 +++++++++++++-------------- Makefile.win | 18 +++++++---------- config/integration.exs | 12 ++++++++++++ mix.exs | 25 +++++++++++++++++------ test/elixir/Makefile | 4 +++- test/elixir/lib/utils.ex | 5 +++-- test/elixir/mix.exs | 37 ----------------------------------- test/elixir/mix.lock | 9 --------- test/elixir/run | 6 ------ test/elixir/test/replication_test.exs | 4 ++-- test/elixir/test/test_helper.exs | 8 +++++--- 12 files changed, 67 insertions(+), 93 deletions(-) create mode 100644 config/integration.exs delete mode 100644 test/elixir/mix.exs delete mode 100644 test/elixir/mix.lock delete mode 100755 test/elixir/run diff --git a/.credo.exs b/.credo.exs index db301e4d0..2b84a5064 100644 --- a/.credo.exs +++ b/.credo.exs @@ -29,7 +29,8 @@ ~r"/src/httpotion", ~r"/src/credo", ~r"/src/junit_formatter", - ~r"/src/bunt" + ~r"/src/bunt", + ~r"/test/elixir/deps/" ] }, # diff --git a/Makefile b/Makefile index 3d1be5149..58c814885 100644 --- a/Makefile +++ b/Makefile @@ -180,11 +180,7 @@ exunit: export MIX_ENV=test exunit: export ERL_LIBS = $(shell pwd)/src exunit: export ERL_AFLAGS = -config $(shell pwd)/rel/files/eunit.config exunit: export COUCHDB_QUERY_SERVER_JAVASCRIPT = $(shell pwd)/bin/couchjs $(shell pwd)/share/server/main.js -exunit: couch elixir-check-formatted elixir-credo - @mix local.hex --force - @mix local.rebar rebar ${REBAR} --force - @mix deps.get - @$(REBAR) setup_eunit 2> /dev/null +exunit: couch elixir-init setup-eunit elixir-check-formatted elixir-credo @mix test --trace $(EXUNIT_OPTS) setup-eunit: export BUILDDIR = $(shell pwd) @@ -226,34 +222,37 @@ python-black-update: .venv/bin/black . dev/run rel/overlay/bin/couchup test/javascript/run .PHONY: elixir +elixir: export MIX_ENV=integration elixir: elixir-init elixir-check-formatted elixir-credo devclean - @dev/run -a adm:pass --no-eval 'test/elixir/run --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)' + @dev/run -a adm:pass --no-eval 'mix test --trace --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)' .PHONY: elixir-init elixir-init: - @cd test/elixir && mix local.rebar --force && mix local.hex --force && mix deps.get + @mix local.rebar --force && mix local.hex --force && mix deps.get .PHONY: elixir-cluster-without-quorum -elixir-cluster-without-quorum: elixir-check-formatted elixir-credo devclean +elixir-cluster-without-quorum: export MIX_ENV=integration +elixir-cluster-without-quorum: elixir-init elixir-check-formatted elixir-credo devclean @dev/run -n 3 -q -a adm:pass \ --degrade-cluster 2 \ - --no-eval 'test/elixir/run --only without_quorum_test $(EXUNIT_OPTS)' + --no-eval 'mix test --trace --only without_quorum_test $(EXUNIT_OPTS)' .PHONY: elixir-cluster-with-quorum -elixir-cluster-with-quorum: elixir-check-formatted elixir-credo devclean +elixir-cluster-with-quorum: export MIX_ENV=integration +elixir-cluster-with-quorum: elixir-init elixir-check-formatted elixir-credo devclean @dev/run -n 3 -q -a adm:pass \ --degrade-cluster 1 \ - --no-eval 'test/elixir/run --only with_quorum_test $(EXUNIT_OPTS)' + --no-eval 'mix test --trace --only with_quorum_test $(EXUNIT_OPTS)' .PHONY: elixir-check-formatted -elixir-check-formatted: - mix format --check-formatted +elixir-check-formatted: elixir-init + @mix format --check-formatted # Credo is a static code analysis tool for Elixir. # We use it in our tests .PHONY: elixir-credo -elixir-credo: - mix credo +elixir-credo: elixir-init + @mix credo .PHONY: javascript # target: javascript - Run JavaScript test suites or specific ones defined by suites option diff --git a/Makefile.win b/Makefile.win index 2c4dc9821..a5e23d498 100644 --- a/Makefile.win +++ b/Makefile.win @@ -151,11 +151,7 @@ exunit: export MIX_ENV=test exunit: export ERL_LIBS = $(shell echo %cd%)\src exunit: export ERL_AFLAGS = -config $(shell echo %cd%)/rel/files/eunit.config exunit: export COUCHDB_QUERY_SERVER_JAVASCRIPT = $(shell echo %cd%)/bin/couchjs $(shell echo %cd%)/share/server/main.js -exunit: couch elixir-check-formatted elixir-credo - @mix local.hex --force - @mix local.rebar rebar ${REBAR} --force - @mix deps.get - @$(REBAR) setup_eunit 2> nul +exunit: couch elixir-init setup-eunit elixir-check-formatted elixir-credo @mix test --trace $(EXUNIT_OPTS) setup-eunit: export BUILDDIR = $(shell pwd) @@ -191,33 +187,33 @@ python-black-update: .venv/bin/black .PHONY: elixir elixir: elixir-init elixir-check-formatted elixir-credo devclean - @dev\run -a adm:pass --no-eval 'test\elixir\run.cmd --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)' + @dev\run -a adm:pass --no-eval 'mix test --trace --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)' .PHONY: elixir-init elixir-init: - @cd test/elixir && mix local.rebar --force && mix local.hex --force && mix deps.get + @mix local.rebar --force && mix local.hex --force && mix deps.get .PHONY: elixir-cluster-without-quorum elixir-cluster-without-quorum: elixir-check-formatted elixir-credo devclean @dev\run -n 3 -q -a adm:pass \ --degrade-cluster 2 \ - --no-eval 'test\elixir\run.cmd --only without_quorum_test $(EXUNIT_OPTS)' + --no-eval 'mix test --trace --only without_quorum_test $(EXUNIT_OPTS)' .PHONY: elixir-cluster-with-quorum elixir-cluster-with-quorum: elixir-check-formatted elixir-credo devclean @dev\run -n 3 -q -a adm:pass \ --degrade-cluster 1 \ - --no-eval 'test\elixir\run.cmd --only with_quorum_test $(EXUNIT_OPTS)' + --no-eval 'mix test --trace --only with_quorum_test $(EXUNIT_OPTS)' .PHONY: elixir-check-formatted elixir-check-formatted: - @cd test\elixir && mix format --check-formatted + @mix format --check-formatted # Credo is a static code analysis tool for Elixir. # We use it in our tests .PHONY: elixir-credo elixir-credo: - @cd test/elixir/ && mix credo + @mix credo .PHONY: test-cluster-with-quorum test-cluster-with-quorum: devclean diff --git a/config/integration.exs b/config/integration.exs new file mode 100644 index 000000000..c5a5ed24a --- /dev/null +++ b/config/integration.exs @@ -0,0 +1,12 @@ +use Mix.Config + +config :logger, + backends: [:console], + compile_time_purge_level: :debug, + level: :debug + +config :kernel, + error_logger: false + +config :sasl, + sasl_error_logger: false diff --git a/mix.exs b/mix.exs index d9c8c2160..2e213aeb1 100644 --- a/mix.exs +++ b/mix.exs @@ -13,8 +13,8 @@ defmodule CouchDBTest.Mixfile do start_permanent: Mix.env() == :prod, build_embedded: Mix.env() == :prod, deps: deps(), - consolidate_protocols: Mix.env() not in [:test, :dev], - test_paths: get_test_paths(), + consolidate_protocols: Mix.env() not in [:test, :dev, :integration], + test_paths: get_test_paths(Mix.env()), elixirc_paths: elixirc_paths(Mix.env()) ] end @@ -22,26 +22,39 @@ defmodule CouchDBTest.Mixfile do # Run "mix help compile.app" to learn about applications. def application do [ - extra_applications: [:logger] + extra_applications: [:logger], + applications: [:httpotion] ] end # Specifies which paths to compile per environment. defp elixirc_paths(:test), do: ["test/elixir/lib", "test/elixir/test/support"] + defp elixirc_paths(:integration), do: ["test/elixir/lib", "test/elixir/test/support"] defp elixirc_paths(_), do: ["test/elixir/lib"] # Run "mix help deps" to learn about dependencies. defp deps() do [ - {:httpotion, "~> 3.0", only: [:dev, :test], runtime: false}, + {:httpotion, "~> 3.0", only: [:dev, :test, :integration], runtime: false}, {:jiffy, path: Path.expand("src/jiffy", __DIR__)}, {:ibrowse, path: Path.expand("src/ibrowse", __DIR__), override: true, compile: false}, - {:credo, "~> 1.0.0", only: [:dev, :test], runtime: false} + {:credo, "~> 1.0.0", only: [:dev, :test, :integration], runtime: false} ] end - def get_test_paths do + def get_test_paths(:test) do Path.wildcard("src/*/test/exunit") |> Enum.filter(&File.dir?/1) end + + def get_test_paths(:integration) do + integration_tests = + Path.wildcard("src/*/test/integration") |> Enum.filter(&File.dir?/1) + + ["test/elixir/test" | integration_tests] + end + + def get_test_paths(_) do + [] + end end diff --git a/test/elixir/Makefile b/test/elixir/Makefile index bfcf017d5..67ce2b427 100644 --- a/test/elixir/Makefile +++ b/test/elixir/Makefile @@ -1,2 +1,4 @@ +SELF_DIR := $(dir $(lastword $(MAKEFILE_LIST))) + all: - mix test --trace + make -C ${SELF_DIR}../.. elixir diff --git a/test/elixir/lib/utils.ex b/test/elixir/lib/utils.ex index 4d3f33f69..3ecf878e7 100644 --- a/test/elixir/lib/utils.ex +++ b/test/elixir/lib/utils.ex @@ -1,11 +1,12 @@ defmodule Couch.Test.Utils do require Record @moduledoc "Helper functions for testing" + @project_root "#{__DIR__}/../../../" Record.defrecord :user_ctx, Record.extract( - :user_ctx, from_lib: "couch/include/couch_db.hrl") + :user_ctx, from: "#{@project_root}/src/couch/include/couch_db.hrl") Record.defrecord :doc_info, Record.extract( - :doc_info, from_lib: "couch/include/couch_db.hrl") + :doc_info, from: "#{@project_root}/src/couch/include/couch_db.hrl") def random_name(prefix) do time = :erlang.monotonic_time() diff --git a/test/elixir/mix.exs b/test/elixir/mix.exs deleted file mode 100644 index f04038ef3..000000000 --- a/test/elixir/mix.exs +++ /dev/null @@ -1,37 +0,0 @@ -defmodule Foo.Mixfile do - use Mix.Project - - def project do - [ - app: :foo, - version: "0.1.0", - elixir: "~> 1.6", - elixirc_paths: elixirc_paths(Mix.env()), - start_permanent: Mix.env() == :prod, - deps: deps() - ] - end - - # Run "mix help compile.app" to learn about applications. - def application do - [ - extra_applications: [:logger] - ] - end - - # Specifies which paths to compile per environment. - defp elixirc_paths(:test), do: ["lib", "test/support"] - defp elixirc_paths(_), do: ["lib"] - - # Run "mix help deps" to learn about dependencies. - defp deps do - [ - # {:dep_from_hexpm, "~> 0.3.0"}, - {:httpotion, "~> 3.0"}, - {:jiffy, "~> 0.15.2"}, - {:credo, "~> 1.0.0", only: [:dev, :test], runtime: false}, - {:junit_formatter, "~> 3.0", only: [:test]} - # {:dep_from_git, git: "https://github.com/elixir-lang/my_dep.git", tag: "0.1.0"}, - ] - end -end diff --git a/test/elixir/mix.lock b/test/elixir/mix.lock deleted file mode 100644 index 0fc391a92..000000000 --- a/test/elixir/mix.lock +++ /dev/null @@ -1,9 +0,0 @@ -%{ - "bunt": {:hex, :bunt, "0.2.0", "951c6e801e8b1d2cbe58ebbd3e616a869061ddadcc4863d0a2182541acae9a38", [:mix], [], "hexpm"}, - "credo": {:hex, :credo, "1.0.0", "aaa40fdd0543a0cf8080e8c5949d8c25f0a24e4fc8c1d83d06c388f5e5e0ea42", [:mix], [{:bunt, "~> 0.2.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm"}, - "httpotion": {:hex, :httpotion, "3.1.0", "14d20d9b0ce4e86e253eb91e4af79e469ad949f57a5d23c0a51b2f86559f6589", [:mix], [{:ibrowse, "~> 4.4", [hex: :ibrowse, repo: "hexpm", optional: false]}], "hexpm"}, - "ibrowse": {:hex, :ibrowse, "4.4.1", "2b7d0637b0f8b9b4182de4bd0f2e826a4da2c9b04898b6e15659ba921a8d6ec2", [:rebar3], [], "hexpm"}, - "jason": {:hex, :jason, "1.1.2", "b03dedea67a99223a2eaf9f1264ce37154564de899fd3d8b9a21b1a6fd64afe7", [:mix], [{:decimal, "~> 1.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm"}, - "jiffy": {:hex, :jiffy, "0.15.2", "de266c390111fd4ea28b9302f0bc3d7472468f3b8e0aceabfbefa26d08cd73b7", [:rebar3], [], "hexpm"}, - "junit_formatter": {:hex, :junit_formatter, "3.0.0", "13950d944dbd295da7d8cc4798b8faee808a8bb9b637c88069954eac078ac9da", [:mix], [], "hexpm"}, -} diff --git a/test/elixir/run b/test/elixir/run deleted file mode 100755 index a9c2efa4d..000000000 --- a/test/elixir/run +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -e -cd "$(dirname "$0")" -mix local.hex --force -mix local.rebar --force -mix deps.get -mix test --trace "$@" diff --git a/test/elixir/test/replication_test.exs b/test/elixir/test/replication_test.exs index e98775fbd..a6e1303e6 100644 --- a/test/elixir/test/replication_test.exs +++ b/test/elixir/test/replication_test.exs @@ -1757,11 +1757,11 @@ defmodule ReplicationTest do end def get_att1_data do - File.read!("test/data/lorem.txt") + File.read!(Path.expand("data/lorem.txt", __DIR__)) end def get_att2_data do - File.read!("test/data/lorem_b64.txt") + File.read!(Path.expand("data/lorem_b64.txt", __DIR__)) end def cmp_json(lhs, rhs) when is_map(lhs) and is_map(rhs) do diff --git a/test/elixir/test/test_helper.exs b/test/elixir/test/test_helper.exs index ef71bbb1b..4bf65bcf6 100644 --- a/test/elixir/test/test_helper.exs +++ b/test/elixir/test/test_helper.exs @@ -2,12 +2,14 @@ # and skip certain tests that fail on jenkins. exclude = case System.get_env("BUILD_NUMBER") !== nil do - true -> [pending: true, skip_on_jenkins: true] - false -> [pending: true] + true -> [:pending, :skip_on_jenkins] + false -> [:pending] end +current_exclude = Keyword.get(ExUnit.configuration(), :exclude, []) + ExUnit.configure( - exclude: exclude, + exclude: Enum.uniq(exclude ++ current_exclude), formatters: [JUnitFormatter, ExUnit.CLIFormatter] ) -- cgit v1.2.1 From 25ad74a6cdc03732265d5175218e96a004dd4c40 Mon Sep 17 00:00:00 2001 From: ILYA Khlopotov Date: Fri, 21 Jun 2019 12:50:56 +0000 Subject: Update .travis.yml --- .travis.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 7ef4aeadf..4af915ee9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -41,10 +41,6 @@ env: - secure: "UdA/gKIlyuXaW+hUgRx40t1TYjLCGxMqHvM5Uw7UbUH2dqEkgJiLfhZGchS1JVzl8M01VKZUUzS7v2nvRLiHZN1kvaw5kfq31VRoafUah8jfmvqNWZVdLovHl3aw5UX/HRt0RkbWbhdbdknTfh6+YinSZ+Nb54jCErMg9nabXtM=" - COUCHDB_IO_LOG_DIR=/tmp/couchjslogs -# Change to elixir folder so that travis can run mix deps.get during install -before_install: - - cd test/elixir - # Enable this block if you want to build docs & fauxton too #node_js: # - 6 @@ -53,7 +49,6 @@ before_install: # Then comment this section out before_script: - - cd ../.. - kerl list installations - rm -rf /tmp/couchjslogs - mkdir -p /tmp/couchjslogs -- cgit v1.2.1 From 220462a1dd2d921fc4ecba3488f5fedefb75217f Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Fri, 26 Jul 2019 19:52:19 -0400 Subject: Retry EUnit tests on failure Whole app is retried 2 extra times if it fails. Added to *nix Makefile only for now. May not be needed for Windows as this is for CI flakiness mostly. --- Makefile | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 58c814885..ed22509e2 100644 --- a/Makefile +++ b/Makefile @@ -170,8 +170,18 @@ eunit: export COUCHDB_QUERY_SERVER_JAVASCRIPT = $(shell pwd)/bin/couchjs $(shell eunit: couch @$(REBAR) setup_eunit 2> /dev/null @for dir in $(subdirs); do \ - $(REBAR) -r eunit $(EUNIT_OPTS) apps=$$dir || exit 1; \ - done + tries=0; \ + while true; do \ + $(REBAR) -r eunit $(EUNIT_OPTS) apps=$$dir ; \ + if [ $$? -eq 0 ]; then \ + break; \ + else \ + let "tries=tries+1"; \ + [ $$tries -gt 2 ] && exit 1; \ + fi \ + done \ + done + .PHONY: exunit # target: exunit - Run ExUnit tests -- cgit v1.2.1 From e67903b831799568425ce93abe4589c3ee59f558 Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Sun, 28 Jul 2019 20:08:46 -0400 Subject: Fix mem3_sync_event_listener EUnit test Fix a race condition in state matching, also parameterize the state field in wait_state. --- src/mem3/src/mem3_sync_event_listener.erl | 28 ++++++++-------------------- 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/src/mem3/src/mem3_sync_event_listener.erl b/src/mem3/src/mem3_sync_event_listener.erl index d7f745137..69a7a6017 100644 --- a/src/mem3/src/mem3_sync_event_listener.erl +++ b/src/mem3/src/mem3_sync_event_listener.erl @@ -258,7 +258,7 @@ subscribe_for_config_test_() -> should_set_sync_delay(Pid) -> ?_test(begin config:set("mem3", "sync_delay", "123", false), - wait_state_delay(Pid, 123), + wait_state(Pid, #state.delay, 123), ?assertMatch(#state{delay = 123}, get_state(Pid)), ok end). @@ -266,7 +266,7 @@ should_set_sync_delay(Pid) -> should_set_sync_frequency(Pid) -> ?_test(begin config:set("mem3", "sync_frequency", "456", false), - wait_state_frequency(Pid, 456), + wait_state(Pid, #state.frequency, 456), ?assertMatch(#state{frequency = 456}, get_state(Pid)), ok end). @@ -301,30 +301,18 @@ get_state(Pid) -> Pid ! {get_state, Ref, self()}, receive {Ref, State} -> State - after 10 -> + after 500 -> timeout end. -wait_state_frequency(Pid, Val) -> +wait_state(Pid, Field, Val) when is_pid(Pid), is_integer(Field) -> WaitFun = fun() -> case get_state(Pid) of - timeout -> - wait; - #state{frequency = Val} -> - true - end - end, - test_util:wait(WaitFun). - - -wait_state_delay(Pid, Val) -> - WaitFun = fun() -> - case get_state(Pid) of - timeout -> - wait; - #state{delay = Val} -> - true + #state{} = S when element(Field, S) == Val -> + true; + _ -> + wait end end, test_util:wait(WaitFun). -- cgit v1.2.1 From 0a5b11f832722c33dd30c4150f4c02414edd85d4 Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Wed, 31 Jul 2019 11:46:22 -0400 Subject: Remove local replication endpoints in CouchDB 3.x `local` replication endpoints do something completely unexpected from a user's point of view -- they replicate to and from node local databases on a random node. The only way this worked correctly was if someone used the backend port (:5986) with a single node database. However, that port is getting closed for 3.x release as well, so it makes even less sense to keep this functionality around. For more discussion and voting results see ML list: https://lists.apache.org/thread.html/ddcd9db93cee363db7da571f5cbc7f2bd24b881a34e1ef734d6a0a1c@%3Cdev.couchdb.apache.org%3E The `_replicate` HTTP "hack" was left as is, since it does work more or less, However it is inconsistent with what _replicator docs do so we should probably deprecated it and remove it in 4.x. --- .../src/cpse_test_purge_replication.erl | 16 ++- src/couch_replicator/src/couch_replicator.erl | 1 - .../src/couch_replicator_api_wrap.erl | 150 +++------------------ .../src/couch_replicator_doc_processor.erl | 12 +- .../src/couch_replicator_doc_processor_worker.erl | 6 +- src/couch_replicator/src/couch_replicator_docs.erl | 29 +++- .../src/couch_replicator_filters.erl | 13 +- src/couch_replicator/src/couch_replicator_ids.erl | 44 +++--- .../src/couch_replicator_scheduler_job.erl | 31 +---- .../src/couch_replicator_utils.erl | 56 +------- .../src/couch_replicator_worker.erl | 143 ++------------------ .../couch_replicator_attachments_too_large.erl | 4 +- .../test/eunit/couch_replicator_compact_tests.erl | 5 +- .../test/eunit/couch_replicator_filtered_tests.erl | 10 +- .../eunit/couch_replicator_id_too_long_tests.erl | 5 +- .../eunit/couch_replicator_large_atts_tests.erl | 5 +- .../eunit/couch_replicator_many_leaves_tests.erl | 6 +- .../eunit/couch_replicator_missing_stubs_tests.erl | 5 +- .../test/eunit/couch_replicator_selector_tests.erl | 5 +- ...ch_replicator_small_max_request_size_target.erl | 5 +- .../couch_replicator_use_checkpoints_tests.erl | 5 +- test/elixir/test/replication_test.exs | 36 +++-- 22 files changed, 136 insertions(+), 456 deletions(-) diff --git a/src/couch_pse_tests/src/cpse_test_purge_replication.erl b/src/couch_pse_tests/src/cpse_test_purge_replication.erl index fb09eeba6..4a0822525 100644 --- a/src/couch_pse_tests/src/cpse_test_purge_replication.erl +++ b/src/couch_pse_tests/src/cpse_test_purge_replication.erl @@ -21,7 +21,7 @@ setup_all() -> - cpse_util:setup_all([mem3, fabric, couch_replicator]). + cpse_util:setup_all([mem3, fabric, chttpd, couch_replicator]). setup_each() -> @@ -48,8 +48,8 @@ cpse_purge_http_replication({Source, Target}) -> ]), RepObject = {[ - {<<"source">>, Source}, - {<<"target">>, Target} + {<<"source">>, db_url(Source)}, + {<<"target">>, db_url(Target)} ]}, {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), @@ -100,8 +100,8 @@ cpse_purge_http_replication({Source, Target}) -> % Show that replicating from the target % back to the source reintroduces the doc RepObject2 = {[ - {<<"source">>, Target}, - {<<"target">>, Source} + {<<"source">>, db_url(Target)}, + {<<"target">>, db_url(Source)} ]}, {ok, _} = couch_replicator:replicate(RepObject2, ?ADMIN_USER), @@ -200,3 +200,9 @@ make_shard(DbName) -> dbname = DbName, range = [0, 16#FFFFFFFF] }. + + +db_url(DbName) -> + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(couch_httpd, port), + ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])). diff --git a/src/couch_replicator/src/couch_replicator.erl b/src/couch_replicator/src/couch_replicator.erl index 39141c301..e4fa31cee 100644 --- a/src/couch_replicator/src/couch_replicator.erl +++ b/src/couch_replicator/src/couch_replicator.erl @@ -358,7 +358,6 @@ strip_url_creds_test_() -> end, fun (_) -> meck:unload() end, [ - t_strip_local_db_creds(), t_strip_http_basic_creds(), t_strip_http_props_creds() ] diff --git a/src/couch_replicator/src/couch_replicator_api_wrap.erl b/src/couch_replicator/src/couch_replicator_api_wrap.erl index 44c290d33..ab1de7df9 100644 --- a/src/couch_replicator/src/couch_replicator_api_wrap.erl +++ b/src/couch_replicator/src/couch_replicator_api_wrap.erl @@ -23,8 +23,8 @@ -include("couch_replicator_api_wrap.hrl"). -export([ - db_open/2, - db_open/4, + db_open/1, + db_open/3, db_close/1, get_db_info/1, get_pending_count/2, @@ -67,10 +67,10 @@ db_uri(Db) -> db_uri(couch_db:name(Db)). -db_open(Db, Options) -> - db_open(Db, Options, false, []). +db_open(Db) -> + db_open(Db, false, []). -db_open(#httpdb{} = Db1, _Options, Create, CreateParams) -> +db_open(#httpdb{} = Db1, Create, CreateParams) -> {ok, Db} = couch_replicator_httpc:setup(Db1), try case Create of @@ -118,51 +118,19 @@ db_open(#httpdb{} = Db1, _Options, Create, CreateParams) -> exit:Error -> db_close(Db), erlang:exit(Error) - end; -db_open(DbName, Options, Create, _CreateParams) -> - try - case Create of - false -> - ok; - true -> - ok = couch_httpd:verify_is_server_admin( - get_value(user_ctx, Options)), - couch_db:create(DbName, Options) - end, - case couch_db:open(DbName, Options) of - {error, {illegal_database_name, _}} -> - throw({db_not_found, DbName}); - {not_found, _Reason} -> - throw({db_not_found, DbName}); - {ok, _Db} = Success -> - Success - end - catch - throw:{unauthorized, _} -> - throw({unauthorized, DbName}) end. db_close(#httpdb{httpc_pool = Pool} = HttpDb) -> couch_replicator_auth:cleanup(HttpDb), unlink(Pool), - ok = couch_replicator_httpc_pool:stop(Pool); -db_close(DbName) -> - catch couch_db:close(DbName). + ok = couch_replicator_httpc_pool:stop(Pool). get_db_info(#httpdb{} = Db) -> send_req(Db, [], fun(200, _, {Props}) -> {ok, Props} - end); -get_db_info(Db) -> - DbName = couch_db:name(Db), - UserCtx = couch_db:get_user_ctx(Db), - {ok, InfoDb} = couch_db:open(DbName, [{user_ctx, UserCtx}]), - {ok, Info} = couch_db:get_db_info(InfoDb), - couch_db:close(InfoDb), - {ok, [{couch_util:to_binary(K), V} || {K, V} <- Info]}. - + end). get_pending_count(#httpdb{} = Db, Seq) when is_number(Seq) -> % Source looks like Apache CouchDB and not Cloudant so we fall @@ -179,14 +147,7 @@ get_pending_count(#httpdb{} = Db, Seq) -> Options = [{path, "_changes"}, {qs, [{"since", ?JSON_ENCODE(Seq)}, {"limit", "0"}]}], send_req(Db, Options, fun(200, _, {Props}) -> {ok, couch_util:get_value(<<"pending">>, Props, null)} - end); -get_pending_count(Db, Seq) when is_number(Seq) -> - DbName = couch_db:name(Db), - UserCtx = couch_db:get_user_ctx(Db), - {ok, CountDb} = couch_db:open(DbName, [{user_ctx, UserCtx}]), - Pending = couch_db:count_changes_since(CountDb, Seq), - couch_db:close(CountDb), - {ok, Pending}. + end). get_view_info(#httpdb{} = Db, DDocId, ViewName) -> Path = io_lib:format("~s/_view/~s/_info", [DDocId, ViewName]), @@ -194,11 +155,7 @@ get_view_info(#httpdb{} = Db, DDocId, ViewName) -> fun(200, _, {Props}) -> {VInfo} = couch_util:get_value(<<"view_index">>, Props, {[]}), {ok, VInfo} - end); -get_view_info(Db, DDocId, ViewName) -> - DbName = couch_db:name(Db), - {ok, VInfo} = couch_mrview:get_view_info(DbName, DDocId, ViewName), - {ok, [{couch_util:to_binary(K), V} || {K, V} <- VInfo]}. + end). ensure_full_commit(#httpdb{} = Db) -> @@ -210,9 +167,7 @@ ensure_full_commit(#httpdb{} = Db) -> {ok, get_value(<<"instance_start_time">>, Props)}; (_, _, {Props}) -> {error, get_value(<<"error">>, Props)} - end); -ensure_full_commit(Db) -> - couch_db:ensure_full_commit(Db). + end). get_missing_revs(#httpdb{} = Db, IdRevs) -> @@ -232,10 +187,7 @@ get_missing_revs(#httpdb{} = Db, IdRevs) -> {Id, MissingRevs, PossibleAncestors} end, {ok, lists:map(ConvertToNativeFun, Props)} - end); -get_missing_revs(Db, IdRevs) -> - couch_db:get_missing_revs(Db, IdRevs). - + end). open_doc_revs(#httpdb{retries = 0} = HttpDb, Id, Revs, Options, _Fun, _Acc) -> @@ -331,10 +283,8 @@ open_doc_revs(#httpdb{} = HttpDb, Id, Revs, Options, Fun, Acc) -> wait = Wait }, open_doc_revs(RetryDb, Id, Revs, Options, Fun, Acc) - end; -open_doc_revs(Db, Id, Revs, Options, Fun, Acc) -> - {ok, Results} = couch_db:open_doc_revs(Db, Id, Revs, Options), - {ok, lists:foldl(fun(R, A) -> {_, A2} = Fun(R, A), A2 end, Acc, Results)}. + end. + error_reason({http_request_failed, "GET", _Url, {error, timeout}}) -> timeout; @@ -353,14 +303,7 @@ open_doc(#httpdb{} = Db, Id, Options) -> {ok, couch_doc:from_json_obj(Body)}; (_, _, {Props}) -> {error, get_value(<<"error">>, Props)} - end); -open_doc(Db, Id, Options) -> - case couch_db:open_doc(Db, Id, Options) of - {ok, _} = Ok -> - Ok; - {not_found, _Reason} -> - {error, <<"not_found">>} - end. + end). update_doc(Db, Doc, Options) -> @@ -411,9 +354,7 @@ update_doc(#httpdb{} = HttpDb, #doc{id = DocId} = Doc, Options, Type) -> {_, Error} -> {error, Error} end - end); -update_doc(Db, Doc, Options, Type) -> - couch_db:update_doc(Db, Doc, Options, Type). + end). update_docs(Db, DocList, Options) -> @@ -468,10 +409,7 @@ update_docs(#httpdb{} = HttpDb, DocList, Options, UpdateType) -> {error, request_body_too_large}; (417, _, Results) when is_list(Results) -> {ok, bulk_results_to_errors(DocList, Results, remote)} - end); -update_docs(Db, DocList, Options, UpdateType) -> - Result = couch_db:update_docs(Db, DocList, Options, UpdateType), - {ok, bulk_results_to_errors(DocList, Result, UpdateType)}. + end). changes_since(#httpdb{headers = Headers1, timeout = InactiveTimeout} = HttpDb, @@ -538,38 +476,7 @@ changes_since(#httpdb{headers = Headers1, timeout = InactiveTimeout} = HttpDb, throw(retry_no_limit); exit:{http_request_failed, _, _, _} = Error -> throw({retry_limit, Error}) - end; -changes_since(Db, Style, StartSeq, UserFun, Options) -> - DocIds = get_value(doc_ids, Options), - Selector = get_value(selector, Options), - Filter = case {DocIds, Selector} of - {undefined, undefined} -> - ?b2l(get_value(filter, Options, <<>>)); - {_, undefined} -> - "_doc_ids"; - {undefined, _} -> - "_selector" - end, - Args = #changes_args{ - style = Style, - since = StartSeq, - filter = Filter, - feed = case get_value(continuous, Options, false) of - true -> - "continuous"; - false -> - "normal" - end, - timeout = infinity - }, - QueryParams = get_value(query_params, Options, {[]}), - Req = changes_json_req(Db, Filter, QueryParams, Options), - ChangesFeedFun = couch_changes:handle_db_changes(Args, {json_req, Req}, Db), - ChangesFeedFun(fun({change, Change, _}, _) -> - UserFun(json_to_doc_info(Change)); - (_, _) -> - ok - end). + end. % internal functions @@ -614,29 +521,6 @@ parse_changes_feed(Options, UserFun, DataStreamFun) -> json_stream_parse:events(DataStreamFun, EventFun) end. -changes_json_req(_Db, "", _QueryParams, _Options) -> - {[]}; -changes_json_req(_Db, "_doc_ids", _QueryParams, Options) -> - {[{<<"doc_ids">>, get_value(doc_ids, Options)}]}; -changes_json_req(_Db, "_selector", _QueryParams, Options) -> - {[{<<"selector">>, get_value(selector, Options)}]}; -changes_json_req(Db, FilterName, {QueryParams}, _Options) -> - {ok, Info} = couch_db:get_db_info(Db), - % simulate a request to db_name/_changes - {[ - {<<"info">>, {Info}}, - {<<"id">>, null}, - {<<"method">>, 'GET'}, - {<<"path">>, [couch_db:name(Db), <<"_changes">>]}, - {<<"query">>, {[{<<"filter">>, FilterName} | QueryParams]}}, - {<<"headers">>, []}, - {<<"body">>, []}, - {<<"peer">>, <<"replicator">>}, - {<<"form">>, []}, - {<<"cookie">>, []}, - {<<"userCtx">>, couch_util:json_user_ctx(Db)} - ]}. - options_to_query_args(HttpDb, Path, Options0) -> case lists:keytake(max_url_len, 1, Options0) of diff --git a/src/couch_replicator/src/couch_replicator_doc_processor.erl b/src/couch_replicator/src/couch_replicator_doc_processor.erl index 1b43598da..772037d8d 100644 --- a/src/couch_replicator/src/couch_replicator_doc_processor.erl +++ b/src/couch_replicator/src/couch_replicator_doc_processor.erl @@ -886,8 +886,8 @@ change() -> {<<"id">>, ?DOC1}, {doc, {[ {<<"_id">>, ?DOC1}, - {<<"source">>, <<"src">>}, - {<<"target">>, <<"tgt">>} + {<<"source">>, <<"http://srchost.local/src">>}, + {<<"target">>, <<"http://tgthost.local/tgt">>} ]}} ]}. @@ -897,8 +897,8 @@ change(State) -> {<<"id">>, ?DOC1}, {doc, {[ {<<"_id">>, ?DOC1}, - {<<"source">>, <<"src">>}, - {<<"target">>, <<"tgt">>}, + {<<"source">>, <<"http://srchost.local/src">>}, + {<<"target">>, <<"http://tgthost.local/tgt">>}, {<<"_replication_state">>, State} ]}} ]}. @@ -910,8 +910,8 @@ deleted_change() -> {<<"deleted">>, true}, {doc, {[ {<<"_id">>, ?DOC1}, - {<<"source">>, <<"src">>}, - {<<"target">>, <<"tgt">>} + {<<"source">>, <<"http://srchost.local/src">>}, + {<<"target">>, <<"http://tgthost.local/tgt">>} ]}} ]}. diff --git a/src/couch_replicator/src/couch_replicator_doc_processor_worker.erl b/src/couch_replicator/src/couch_replicator_doc_processor_worker.erl index aa048bfab..a4c829323 100644 --- a/src/couch_replicator/src/couch_replicator_doc_processor_worker.erl +++ b/src/couch_replicator/src/couch_replicator_doc_processor_worker.erl @@ -137,7 +137,7 @@ maybe_add_job_to_scheduler({DbName, DocId}, Rep, WRef) -> -define(DB, <<"db">>). -define(DOC1, <<"doc1">>). --define(R1, {"0b7831e9a41f9322a8600ccfa02245f2", ""}). +-define(R1, {"ad08e05057046eabe898a2572bbfb573", ""}). doc_processor_worker_test_() -> @@ -277,8 +277,8 @@ did_not_add_job() -> change() -> {[ {<<"_id">>, ?DOC1}, - {<<"source">>, <<"src">>}, - {<<"target">>, <<"tgt">>} + {<<"source">>, <<"http://srchost.local/src">>}, + {<<"target">>, <<"http://tgthost.local/tgt">>} ]}. -endif. diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl index bbf9694d7..c07caa1aa 100644 --- a/src/couch_replicator/src/couch_replicator_docs.erl +++ b/src/couch_replicator/src/couch_replicator_docs.erl @@ -423,8 +423,8 @@ parse_rep_db(<<"http://", _/binary>> = Url, Proxy, Options) -> parse_rep_db(<<"https://", _/binary>> = Url, Proxy, Options) -> parse_rep_db({[{<<"url">>, Url}]}, Proxy, Options); -parse_rep_db(<>, _Proxy, _Options) -> - DbName; +parse_rep_db(<<_/binary>>, _Proxy, _Options) -> + throw({error, <<"Local endpoints not supported since CouchDB 3.x">>}); parse_rep_db(undefined, _Proxy, _Options) -> throw({error, <<"Missing replicator database">>}). @@ -822,4 +822,29 @@ t_vdu_does_not_crash_on_save(DbName) -> ?assertEqual({ok, forbidden}, save_rep_doc(DbName, Doc)) end). + +local_replication_endpoint_error_test_() -> + { + foreach, + fun () -> meck:expect(config, get, + fun(_, _, Default) -> Default end) + end, + fun (_) -> meck:unload() end, + [ + t_error_on_local_endpoint() + ] + }. + + +t_error_on_local_endpoint() -> + ?_test(begin + RepDoc = {[ + {<<"_id">>, <<"someid">>}, + {<<"source">>, <<"localdb">>}, + {<<"target">>, <<"http://somehost.local/tgt">>} + ]}, + Expect = <<"Local endpoints not supported since CouchDB 3.x">>, + ?assertThrow({bad_rep_doc, Expect}, parse_rep_doc_without_id(RepDoc)) + end). + -endif. diff --git a/src/couch_replicator/src/couch_replicator_filters.erl b/src/couch_replicator/src/couch_replicator_filters.erl index 5668820d1..c8980001a 100644 --- a/src/couch_replicator/src/couch_replicator_filters.erl +++ b/src/couch_replicator/src/couch_replicator_filters.erl @@ -14,7 +14,7 @@ -export([ parse/1, - fetch/4, + fetch/3, view_type/2, ejsort/1 ]). @@ -63,11 +63,11 @@ parse(Options) -> % Fetches body of filter function from source database. Guaranteed to either % return {ok, Body} or an {error, Reason}. Also assume this function might % block due to network / socket issues for an undeterminted amount of time. --spec fetch(binary(), binary(), binary(), #user_ctx{}) -> +-spec fetch(binary(), binary(), binary()) -> {ok, {[_]}} | {error, binary()}. -fetch(DDocName, FilterName, Source, UserCtx) -> +fetch(DDocName, FilterName, Source) -> {Pid, Ref} = spawn_monitor(fun() -> - try fetch_internal(DDocName, FilterName, Source, UserCtx) of + try fetch_internal(DDocName, FilterName, Source) of Resp -> exit({exit_ok, Resp}) catch @@ -108,9 +108,8 @@ view_type(Props, Options) -> % Private functions -fetch_internal(DDocName, FilterName, Source, UserCtx) -> - Db = case (catch couch_replicator_api_wrap:db_open(Source, - [{user_ctx, UserCtx}])) of +fetch_internal(DDocName, FilterName, Source) -> + Db = case (catch couch_replicator_api_wrap:db_open(Source)) of {ok, Db0} -> Db0; DbError -> diff --git a/src/couch_replicator/src/couch_replicator_ids.erl b/src/couch_replicator/src/couch_replicator_ids.erl index e10b98082..04e71c3ef 100644 --- a/src/couch_replicator/src/couch_replicator_ids.erl +++ b/src/couch_replicator/src/couch_replicator_ids.erl @@ -39,19 +39,19 @@ replication_id(#rep{options = Options} = Rep) -> % If a change is made to how replications are identified, % please add a new clause and increase ?REP_ID_VERSION. -replication_id(#rep{user_ctx = UserCtx} = Rep, 4) -> +replication_id(#rep{} = Rep, 4) -> UUID = couch_server:get_uuid(), - SrcInfo = get_v4_endpoint(UserCtx, Rep#rep.source), - TgtInfo = get_v4_endpoint(UserCtx, Rep#rep.target), + SrcInfo = get_v4_endpoint(Rep#rep.source), + TgtInfo = get_v4_endpoint(Rep#rep.target), maybe_append_filters([UUID, SrcInfo, TgtInfo], Rep); -replication_id(#rep{user_ctx = UserCtx} = Rep, 3) -> +replication_id(#rep{} = Rep, 3) -> UUID = couch_server:get_uuid(), - Src = get_rep_endpoint(UserCtx, Rep#rep.source), - Tgt = get_rep_endpoint(UserCtx, Rep#rep.target), + Src = get_rep_endpoint(Rep#rep.source), + Tgt = get_rep_endpoint(Rep#rep.target), maybe_append_filters([UUID, Src, Tgt], Rep); -replication_id(#rep{user_ctx = UserCtx} = Rep, 2) -> +replication_id(#rep{} = Rep, 2) -> {ok, HostName} = inet:gethostname(), Port = case (catch mochiweb_socket_server:get(couch_httpd, port)) of P when is_number(P) -> @@ -64,14 +64,14 @@ replication_id(#rep{user_ctx = UserCtx} = Rep, 2) -> % ... mochiweb_socket_server:get(https, port) list_to_integer(config:get("httpd", "port", "5984")) end, - Src = get_rep_endpoint(UserCtx, Rep#rep.source), - Tgt = get_rep_endpoint(UserCtx, Rep#rep.target), + Src = get_rep_endpoint(Rep#rep.source), + Tgt = get_rep_endpoint(Rep#rep.target), maybe_append_filters([HostName, Port, Src, Tgt], Rep); -replication_id(#rep{user_ctx = UserCtx} = Rep, 1) -> +replication_id(#rep{} = Rep, 1) -> {ok, HostName} = inet:gethostname(), - Src = get_rep_endpoint(UserCtx, Rep#rep.source), - Tgt = get_rep_endpoint(UserCtx, Rep#rep.target), + Src = get_rep_endpoint(Rep#rep.source), + Tgt = get_rep_endpoint(Rep#rep.target), maybe_append_filters([HostName, Src, Tgt], Rep). @@ -91,7 +91,7 @@ convert({BaseId, Ext} = Id) when is_list(BaseId), is_list(Ext) -> % Private functions maybe_append_filters(Base, - #rep{source = Source, user_ctx = UserCtx, options = Options}) -> + #rep{source = Source, options = Options}) -> Base2 = Base ++ case couch_replicator_filters:parse(Options) of {ok, nil} -> @@ -99,7 +99,7 @@ maybe_append_filters(Base, {ok, {view, Filter, QueryParams}} -> [Filter, QueryParams]; {ok, {user, {Doc, Filter}, QueryParams}} -> - case couch_replicator_filters:fetch(Doc, Filter, Source, UserCtx) of + case couch_replicator_filters:fetch(Doc, Filter, Source) of {ok, Code} -> [Code, QueryParams]; {error, Error} -> @@ -127,23 +127,19 @@ maybe_append_options(Options, RepOptions) -> end, [], Options). -get_rep_endpoint(_UserCtx, #httpdb{url=Url, headers=Headers}) -> +get_rep_endpoint(#httpdb{url=Url, headers=Headers}) -> DefaultHeaders = (#httpdb{})#httpdb.headers, - {remote, Url, Headers -- DefaultHeaders}; -get_rep_endpoint(UserCtx, <>) -> - {local, DbName, UserCtx}. + {remote, Url, Headers -- DefaultHeaders}. -get_v4_endpoint(UserCtx, #httpdb{} = HttpDb) -> - {remote, Url, Headers} = get_rep_endpoint(UserCtx, HttpDb), +get_v4_endpoint(#httpdb{} = HttpDb) -> + {remote, Url, Headers} = get_rep_endpoint(HttpDb), {{UserFromHeaders, _}, HeadersWithoutBasicAuth} = couch_replicator_utils:remove_basic_auth_from_headers(Headers), {UserFromUrl, Host, NonDefaultPort, Path} = get_v4_url_info(Url), User = pick_defined_value([UserFromUrl, UserFromHeaders]), OAuth = undefined, % Keep this to ensure checkpoints don't change - {remote, User, Host, NonDefaultPort, Path, HeadersWithoutBasicAuth, OAuth}; -get_v4_endpoint(UserCtx, <>) -> - {local, DbName, UserCtx}. + {remote, User, Host, NonDefaultPort, Path, HeadersWithoutBasicAuth, OAuth}. pick_defined_value(Values) -> @@ -201,7 +197,7 @@ replication_id_convert_test_() -> http_v4_endpoint_test_() -> [?_assertMatch({remote, User, Host, Port, Path, HeadersNoAuth, undefined}, - get_v4_endpoint(nil, #httpdb{url = Url, headers = Headers})) || + get_v4_endpoint(#httpdb{url = Url, headers = Headers})) || {{User, Host, Port, Path, HeadersNoAuth}, {Url, Headers}} <- [ { {undefined, "host", default, "/", []}, diff --git a/src/couch_replicator/src/couch_replicator_scheduler_job.erl b/src/couch_replicator/src/couch_replicator_scheduler_job.erl index 412ff7d05..565a2bd97 100644 --- a/src/couch_replicator/src/couch_replicator_scheduler_job.erl +++ b/src/couch_replicator/src/couch_replicator_scheduler_job.erl @@ -40,8 +40,6 @@ ]). -import(couch_replicator_utils, [ - start_db_compaction_notifier/2, - stop_db_compaction_notifier/1, pp_rep_id/1 ]). @@ -75,8 +73,6 @@ workers, stats = couch_replicator_stats:new(), session_id, - source_db_compaction_notifier = nil, - target_db_compaction_notifier = nil, source_monitor = nil, target_monitor = nil, source_seq = nil, @@ -226,21 +222,6 @@ handle_call({report_seq_done, Seq, StatsInc}, From, update_task(NewState), {noreply, NewState}. -handle_cast({db_compacted, DbName}, State) -> - #rep_state{ - source = Source, - target = Target - } = State, - SourceName = couch_replicator_utils:local_db_name(Source), - TargetName = couch_replicator_utils:local_db_name(Target), - case DbName of - SourceName -> - {ok, NewSource} = couch_db:reopen(Source), - {noreply, State#rep_state{source = NewSource}}; - TargetName -> - {ok, NewTarget} = couch_db:reopen(Target), - {noreply, State#rep_state{target = NewTarget}} - end; handle_cast(checkpoint, State) -> case do_checkpoint(State) of @@ -412,8 +393,6 @@ terminate(Reason, State) -> terminate_cleanup(State) -> update_task(State), - stop_db_compaction_notifier(State#rep_state.source_db_compaction_notifier), - stop_db_compaction_notifier(State#rep_state.target_db_compaction_notifier), couch_replicator_api_wrap:db_close(State#rep_state.source), couch_replicator_api_wrap:db_close(State#rep_state.target). @@ -572,16 +551,16 @@ init_state(Rep) -> #rep{ id = {BaseId, _Ext}, source = Src0, target = Tgt, - options = Options, user_ctx = UserCtx, + options = Options, type = Type, view = View, start_time = StartTime, stats = Stats } = Rep, % Adjust minimum number of http source connections to 2 to avoid deadlock Src = adjust_maxconn(Src0, BaseId), - {ok, Source} = couch_replicator_api_wrap:db_open(Src, [{user_ctx, UserCtx}]), + {ok, Source} = couch_replicator_api_wrap:db_open(Src), {CreateTargetParams} = get_value(create_target_params, Options, {[]}), - {ok, Target} = couch_replicator_api_wrap:db_open(Tgt, [{user_ctx, UserCtx}], + {ok, Target} = couch_replicator_api_wrap:db_open(Tgt, get_value(create_target, Options, false), CreateTargetParams), {ok, SourceInfo} = couch_replicator_api_wrap:get_db_info(Source), @@ -613,10 +592,6 @@ init_state(Rep) -> src_starttime = get_value(<<"instance_start_time">>, SourceInfo), tgt_starttime = get_value(<<"instance_start_time">>, TargetInfo), session_id = couch_uuids:random(), - source_db_compaction_notifier = - start_db_compaction_notifier(Source, self()), - target_db_compaction_notifier = - start_db_compaction_notifier(Target, self()), source_monitor = db_monitor(Source), target_monitor = db_monitor(Target), source_seq = SourceSeq, diff --git a/src/couch_replicator/src/couch_replicator_utils.erl b/src/couch_replicator/src/couch_replicator_utils.erl index b0d706953..ccf241324 100644 --- a/src/couch_replicator/src/couch_replicator_utils.erl +++ b/src/couch_replicator/src/couch_replicator_utils.erl @@ -14,11 +14,6 @@ -export([ parse_rep_doc/2, - open_db/1, - close_db/1, - local_db_name/1, - start_db_compaction_notifier/2, - stop_db_compaction_notifier/1, replication_id/2, sum_stats/2, is_deleted/1, @@ -32,9 +27,6 @@ normalize_rep/1 ]). --export([ - handle_db_event/3 -]). -include_lib("couch/include/couch_db.hrl"). -include("couch_replicator.hrl"). @@ -46,50 +38,6 @@ ]). -open_db(#httpdb{} = HttpDb) -> - HttpDb; -open_db(Db) -> - DbName = couch_db:name(Db), - UserCtx = couch_db:get_user_ctx(Db), - {ok, NewDb} = couch_db:open(DbName, [{user_ctx, UserCtx}]), - NewDb. - - -close_db(#httpdb{}) -> - ok; -close_db(Db) -> - couch_db:close(Db). - - -local_db_name(#httpdb{}) -> - undefined; -local_db_name(Db) -> - couch_db:name(Db). - - -start_db_compaction_notifier(#httpdb{}, _) -> - nil; -start_db_compaction_notifier(Db, Server) -> - DbName = couch_db:name(Db), - {ok, Pid} = couch_event:link_listener( - ?MODULE, handle_db_event, Server, [{dbname, DbName}] - ), - Pid. - - -stop_db_compaction_notifier(nil) -> - ok; -stop_db_compaction_notifier(Listener) -> - couch_event:stop_listener(Listener). - - -handle_db_event(DbName, compacted, Server) -> - gen_server:cast(Server, {db_compacted, DbName}), - {ok, Server}; -handle_db_event(_DbName, _Event, Server) -> - {ok, Server}. - - rep_error_to_binary(Error) -> couch_util:to_binary(error_reason(Error)). @@ -289,14 +237,14 @@ normalize_rep_test_() -> ?_test(begin EJson1 = {[ {<<"source">>, <<"http://host.com/source_db">>}, - {<<"target">>, <<"local">>}, + {<<"target">>, <<"http://target.local/db">>}, {<<"doc_ids">>, [<<"a">>, <<"c">>, <<"b">>]}, {<<"other_field">>, <<"some_value">>} ]}, Rep1 = couch_replicator_docs:parse_rep_doc_without_id(EJson1), EJson2 = {[ {<<"other_field">>, <<"unrelated">>}, - {<<"target">>, <<"local">>}, + {<<"target">>, <<"http://target.local/db">>}, {<<"source">>, <<"http://host.com/source_db">>}, {<<"doc_ids">>, [<<"c">>, <<"a">>, <<"b">>]}, {<<"other_field2">>, <<"unrelated2">>} diff --git a/src/couch_replicator/src/couch_replicator_worker.erl b/src/couch_replicator/src/couch_replicator_worker.erl index ec98fa0f3..986c32c0a 100644 --- a/src/couch_replicator/src/couch_replicator_worker.erl +++ b/src/couch_replicator/src/couch_replicator_worker.erl @@ -28,18 +28,11 @@ % TODO: maybe make both buffer max sizes configurable -define(DOC_BUFFER_BYTE_SIZE, 512 * 1024). % for remote targets --define(DOC_BUFFER_LEN, 10). % for local targets, # of documents -define(MAX_BULK_ATT_SIZE, 64 * 1024). -define(MAX_BULK_ATTS_PER_DOC, 8). -define(STATS_DELAY, 10000000). % 10 seconds (in microseconds) -define(MISSING_DOC_RETRY_MSEC, 2000). --import(couch_replicator_utils, [ - open_db/1, - close_db/1, - start_db_compaction_notifier/2, - stop_db_compaction_notifier/1 -]). -import(couch_util, [ to_binary/1, get_value/3 @@ -62,8 +55,6 @@ pending_fetch = nil, flush_waiter = nil, stats = couch_replicator_stats:new(), - source_db_compaction_notifier = nil, - target_db_compaction_notifier = nil, batch = #batch{} }). @@ -71,14 +62,7 @@ start_link(Cp, #httpdb{} = Source, Target, ChangesManager, MaxConns) -> gen_server:start_link( - ?MODULE, {Cp, Source, Target, ChangesManager, MaxConns}, []); - -start_link(Cp, Source, Target, ChangesManager, _MaxConns) -> - Pid = spawn_link(fun() -> - erlang:put(last_stats_report, os:timestamp()), - queue_fetch_loop(Source, Target, Cp, Cp, ChangesManager) - end), - {ok, Pid}. + ?MODULE, {Cp, Source, Target, ChangesManager, MaxConns}, []). init({Cp, Source, Target, ChangesManager, MaxConns}) -> @@ -92,12 +76,8 @@ init({Cp, Source, Target, ChangesManager, MaxConns}) -> cp = Cp, max_parallel_conns = MaxConns, loop = LoopPid, - source = open_db(Source), - target = open_db(Target), - source_db_compaction_notifier = - start_db_compaction_notifier(Source, self()), - target_db_compaction_notifier = - start_db_compaction_notifier(Target, self()) + source = Source, + target = Target }, {ok, State}. @@ -141,24 +121,6 @@ handle_call(flush, {Pid, _} = From, {noreply, State2#state{flush_waiter = From}}. -handle_cast({db_compacted, DbName} = Msg, #state{} = State) -> - #state{ - source = Source, - target = Target - } = State, - SourceName = couch_replicator_utils:local_db_name(Source), - TargetName = couch_replicator_utils:local_db_name(Target), - case DbName of - SourceName -> - {ok, NewSource} = couch_db:reopen(Source), - {noreply, State#state{source = NewSource}}; - TargetName -> - {ok, NewTarget} = couch_db:reopen(Target), - {noreply, State#state{target = NewTarget}}; - _Else -> - {stop, {unexpected_async_call, Msg}, State} - end; - handle_cast(Msg, State) -> {stop, {unexpected_async_call, Msg}, State}. @@ -213,11 +175,8 @@ handle_info({'EXIT', Pid, Reason}, State) -> {stop, {process_died, Pid, Reason}, State}. -terminate(_Reason, State) -> - close_db(State#state.source), - close_db(State#state.target), - stop_db_compaction_notifier(State#state.source_db_compaction_notifier), - stop_db_compaction_notifier(State#state.target_db_compaction_notifier). +terminate(_Reason, _State) -> + ok. format_status(_Opt, [_PDict, State]) -> #state{ @@ -253,20 +212,10 @@ queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager) -> ok = gen_server:call(Cp, {report_seq_done, ReportSeq, Stats}, infinity), queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager); {changes, ChangesManager, Changes, ReportSeq} -> - Target2 = open_db(Target), - {IdRevs, Stats0} = find_missing(Changes, Target2), - case Source of - #httpdb{} -> - ok = gen_server:call(Parent, {add_stats, Stats0}, infinity), - remote_process_batch(IdRevs, Parent), - {ok, Stats} = gen_server:call(Parent, flush, infinity); - _Db -> - Source2 = open_db(Source), - Stats = local_process_batch( - IdRevs, Cp, Source2, Target2, #batch{}, Stats0), - close_db(Source2) - end, - close_db(Target2), + {IdRevs, Stats0} = find_missing(Changes, Target), + ok = gen_server:call(Parent, {add_stats, Stats0}, infinity), + remote_process_batch(IdRevs, Parent), + {ok, Stats} = gen_server:call(Parent, flush, infinity), ok = gen_server:call(Cp, {report_seq_done, ReportSeq, Stats}, infinity), erlang:put(last_stats_report, os:timestamp()), couch_log:debug("Worker reported completion of seq ~p", [ReportSeq]), @@ -274,32 +223,6 @@ queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager) -> end. -local_process_batch([], _Cp, _Src, _Tgt, #batch{docs = []}, Stats) -> - Stats; - -local_process_batch([], Cp, Source, Target, #batch{docs = Docs, size = Size}, Stats) -> - case Target of - #httpdb{} -> - couch_log:debug("Worker flushing doc batch of size ~p bytes", [Size]); - _Db -> - couch_log:debug("Worker flushing doc batch of ~p docs", [Size]) - end, - Stats2 = flush_docs(Target, Docs), - Stats3 = couch_replicator_utils:sum_stats(Stats, Stats2), - local_process_batch([], Cp, Source, Target, #batch{}, Stats3); - -local_process_batch([IdRevs | Rest], Cp, Source, Target, Batch, Stats) -> - {ok, {_, DocList, Stats2, _}} = fetch_doc( - Source, IdRevs, fun local_doc_handler/2, {Target, [], Stats, Cp}), - {Batch2, Stats3} = lists:foldl( - fun(Doc, {Batch0, Stats0}) -> - {Batch1, S} = maybe_flush_docs(Target, Batch0, Doc), - {Batch1, couch_replicator_utils:sum_stats(Stats0, S)} - end, - {Batch, Stats2}, DocList), - local_process_batch(Rest, Cp, Source, Target, Batch2, Stats3). - - remote_process_batch([], _Parent) -> ok; @@ -319,10 +242,8 @@ remote_process_batch([{Id, Revs, PAs} | Rest], Parent) -> spawn_doc_reader(Source, Target, FetchParams) -> Parent = self(), spawn_link(fun() -> - Source2 = open_db(Source), fetch_doc( - Source2, FetchParams, fun remote_doc_handler/2, {Parent, Target}), - close_db(Source2) + Source, FetchParams, fun remote_doc_handler/2, {Parent, Target}) end). @@ -350,29 +271,6 @@ fetch_doc(Source, {Id, Revs, PAs}, DocHandler, Acc) -> end. -local_doc_handler({ok, Doc}, {Target, DocList, Stats, Cp}) -> - Stats2 = couch_replicator_stats:increment(docs_read, Stats), - case batch_doc(Doc) of - true -> - {ok, {Target, [Doc | DocList], Stats2, Cp}}; - false -> - couch_log:debug("Worker flushing doc with attachments", []), - Target2 = open_db(Target), - Success = (flush_doc(Target2, Doc) =:= ok), - close_db(Target2), - Stats3 = case Success of - true -> - couch_replicator_stats:increment(docs_written, Stats2); - false -> - couch_replicator_stats:increment(doc_write_failures, Stats2) - end, - Stats4 = maybe_report_stats(Cp, Stats3), - {ok, {Target, DocList, Stats4, Cp}} - end; -local_doc_handler(_, Acc) -> - {ok, Acc}. - - remote_doc_handler({ok, #doc{atts = []} = Doc}, {Parent, _} = Acc) -> ok = gen_server:call(Parent, {batch_doc, Doc}, infinity), {ok, Acc}; @@ -383,9 +281,7 @@ remote_doc_handler({ok, Doc}, {Parent, Target} = Acc) -> % convenient to call it ASAP to avoid ibrowse inactivity timeouts. Stats = couch_replicator_stats:new([{docs_read, 1}]), couch_log:debug("Worker flushing doc with attachments", []), - Target2 = open_db(Target), - Success = (flush_doc(Target2, Doc) =:= ok), - close_db(Target2), + Success = (flush_doc(Target, Doc) =:= ok), {Result, Stats2} = case Success of true -> {{ok, Acc}, couch_replicator_stats:increment(docs_written, Stats)}; @@ -402,17 +298,13 @@ spawn_writer(Target, #batch{docs = DocList, size = Size}) -> case {Target, Size > 0} of {#httpdb{}, true} -> couch_log:debug("Worker flushing doc batch of size ~p bytes", [Size]); - {_Db, true} -> - couch_log:debug("Worker flushing doc batch of ~p docs", [Size]); _ -> ok end, Parent = self(), spawn_link( fun() -> - Target2 = open_db(Target), - Stats = flush_docs(Target2, DocList), - close_db(Target2), + Stats = flush_docs(Target, DocList), ok = gen_server:call(Parent, {add_stats, Stats}, infinity) end). @@ -462,17 +354,6 @@ maybe_flush_docs(#httpdb{} = Target, Batch, Doc) -> Stats = couch_replicator_stats:new(), {#batch{docs = [JsonDoc | DocAcc], size = SizeAcc2}, Stats} end - end; - -maybe_flush_docs(Target, #batch{docs = DocAcc, size = SizeAcc}, Doc) -> - case SizeAcc + 1 of - SizeAcc2 when SizeAcc2 >= ?DOC_BUFFER_LEN -> - couch_log:debug("Worker flushing doc batch of ~p docs", [SizeAcc2]), - Stats = flush_docs(Target, [Doc | DocAcc]), - {#batch{}, Stats}; - SizeAcc2 -> - Stats = couch_replicator_stats:new(), - {#batch{docs = [Doc | DocAcc], size = SizeAcc2}, Stats} end. diff --git a/src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl b/src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl index 7fe84d2d9..ac4bb84f3 100644 --- a/src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl +++ b/src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl @@ -33,7 +33,7 @@ teardown(_, {Ctx, {Source, Target}}) -> attachment_too_large_replication_test_() -> - Pairs = [{local, remote}, {remote, local}, {remote, remote}], + Pairs = [{remote, remote}], { "Attachment size too large replication tests", { @@ -96,8 +96,6 @@ delete_db(DbName) -> ok = couch_server:delete(DbName, [?ADMIN_CTX]). -db_url(local, DbName) -> - DbName; db_url(remote, DbName) -> Addr = config:get("httpd", "bind_address", "127.0.0.1"), Port = mochiweb_socket_server:get(couch_httpd, port), diff --git a/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl index 7cc530c19..eb3fc82c5 100644 --- a/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl +++ b/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl @@ -33,8 +33,6 @@ setup() -> ok = couch_db:close(Db), DbName. -setup(local) -> - setup(); setup(remote) -> {remote, setup()}; setup({A, B}) -> @@ -56,8 +54,7 @@ teardown(_, {Ctx, {Source, Target}}) -> ok = test_util:stop_couch(Ctx). compact_test_() -> - Pairs = [{local, local}, {local, remote}, - {remote, local}, {remote, remote}], + Pairs = [{remote, remote}], { "Compaction during replication tests", { diff --git a/src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl index d34e9f020..70b25a31b 100644 --- a/src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl +++ b/src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl @@ -60,8 +60,7 @@ teardown(_, {Ctx, {Source, Target}}) -> ok = test_util:stop_couch(Ctx). filtered_replication_test_() -> - Pairs = [{local, local}, {local, remote}, - {remote, local}, {remote, remote}], + Pairs = [{remote, remote}], { "Filtered replication tests", { @@ -72,8 +71,7 @@ filtered_replication_test_() -> }. query_filtered_replication_test_() -> - Pairs = [{local, local}, {local, remote}, - {remote, local}, {remote, remote}], + Pairs = [{remote, remote}], { "Filtered with query replication tests", { @@ -84,7 +82,7 @@ query_filtered_replication_test_() -> }. view_filtered_replication_test_() -> - Pairs = [{local, local}], + Pairs = [{remote, remote}], { "Filtered with a view replication tests", { @@ -236,8 +234,6 @@ create_docs(DbName) -> delete_db(DbName) -> ok = couch_server:delete(DbName, [?ADMIN_CTX]). -db_url(local, DbName) -> - DbName; db_url(remote, DbName) -> Addr = config:get("httpd", "bind_address", "127.0.0.1"), Port = mochiweb_socket_server:get(couch_httpd, port), diff --git a/src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl index 70eda0566..1447acfa7 100644 --- a/src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl +++ b/src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl @@ -33,8 +33,7 @@ teardown(_, {Ctx, {Source, Target}}) -> id_too_long_replication_test_() -> - Pairs = [{local, local}, {local, remote}, - {remote, local}, {remote, remote}], + Pairs = [{remote, remote}], { "Doc id too long tests", { @@ -86,8 +85,6 @@ delete_db(DbName) -> ok = couch_server:delete(DbName, [?ADMIN_CTX]). -db_url(local, DbName) -> - DbName; db_url(remote, DbName) -> Addr = config:get("httpd", "bind_address", "127.0.0.1"), Port = mochiweb_socket_server:get(couch_httpd, port), diff --git a/src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl index b9adf5c4b..27c89a0cd 100644 --- a/src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl +++ b/src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl @@ -33,8 +33,6 @@ setup() -> ok = couch_db:close(Db), DbName. -setup(local) -> - setup(); setup(remote) -> {remote, setup()}; setup({A, B}) -> @@ -58,8 +56,7 @@ teardown(_, {Ctx, {Source, Target}}) -> ok = test_util:stop_couch(Ctx). large_atts_test_() -> - Pairs = [{local, local}, {local, remote}, - {remote, local}, {remote, remote}], + Pairs = [{remote, remote}], { "Replicate docs with large attachments", { diff --git a/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl index eee5b1647..be1bfa344 100644 --- a/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl +++ b/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl @@ -37,8 +37,7 @@ setup() -> ok = couch_db:close(Db), DbName. -setup(local) -> - setup(); + setup(remote) -> {remote, setup()}; setup({A, B}) -> @@ -60,8 +59,7 @@ teardown(_, {Ctx, {Source, Target}}) -> ok = test_util:stop_couch(Ctx). docs_with_many_leaves_test_() -> - Pairs = [{local, local}, {local, remote}, - {remote, local}, {remote, remote}], + Pairs = [{remote, remote}], { "Replicate documents with many leaves", { diff --git a/src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl index c1681781f..ff08b5ee5 100644 --- a/src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl +++ b/src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl @@ -30,8 +30,6 @@ setup() -> ok = couch_db:close(Db), DbName. -setup(local) -> - setup(); setup(remote) -> {remote, setup()}; setup({A, B}) -> @@ -53,8 +51,7 @@ teardown(_, {Ctx, {Source, Target}}) -> ok = test_util:stop_couch(Ctx). missing_stubs_test_() -> - Pairs = [{local, local}, {local, remote}, - {remote, local}, {remote, remote}], + Pairs = [{remote, remote}], { "Replicate docs with missing stubs (COUCHDB-1365)", { diff --git a/src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl index a7f4c5df3..7d92bdcb1 100644 --- a/src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl +++ b/src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl @@ -31,8 +31,7 @@ teardown(_, {Ctx, {Source, Target}}) -> ok = test_util:stop_couch(Ctx). selector_replication_test_() -> - Pairs = [{local, local}, {local, remote}, - {remote, local}, {remote, remote}], + Pairs = [{remote, remote}], { "Selector filtered replication tests", { @@ -113,8 +112,6 @@ create_docs(DbName) -> delete_db(DbName) -> ok = couch_server:delete(DbName, [?ADMIN_CTX]). -db_url(local, DbName) -> - DbName; db_url(remote, DbName) -> Addr = config:get("httpd", "bind_address", "127.0.0.1"), Port = mochiweb_socket_server:get(couch_httpd, port), diff --git a/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl b/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl index af3a285f5..8aebbe151 100644 --- a/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl +++ b/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl @@ -19,9 +19,6 @@ setup() -> DbName. -setup(local) -> - setup(); - setup(remote) -> {remote, setup()}; @@ -47,7 +44,7 @@ teardown(_, {Ctx, {Source, Target}}) -> reduce_max_request_size_test_() -> - Pairs = [{local, remote}, {remote, remote}], + Pairs = [{remote, remote}], { "Replicate docs when target has a small max_http_request_size", { diff --git a/src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl index c2fcf8bf1..8e4a21dbb 100644 --- a/src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl +++ b/src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl @@ -51,8 +51,6 @@ setup() -> ok = couch_db:close(Db), DbName. -setup(local) -> - setup(); setup(remote) -> {remote, setup()}; setup({_, Fun, {A, B}}) -> @@ -88,8 +86,7 @@ use_checkpoints_test_() -> }. use_checkpoints_tests(UseCheckpoints, Fun) -> - Pairs = [{local, local}, {local, remote}, - {remote, local}, {remote, remote}], + Pairs = [{remote, remote}], { "use_checkpoints: " ++ atom_to_list(UseCheckpoints), { diff --git a/test/elixir/test/replication_test.exs b/test/elixir/test/replication_test.exs index a6e1303e6..6d4360d88 100644 --- a/test/elixir/test/replication_test.exs +++ b/test/elixir/test/replication_test.exs @@ -9,9 +9,6 @@ defmodule ReplicationTest do # TODO: Parameterize these @admin_account "adm:pass" @db_pairs_prefixes [ - {"local-to-local", "", ""}, - {"remote-to-local", "http://127.0.0.1:15984/", ""}, - {"local-to-remote", "", "http://127.0.0.1:15984/"}, {"remote-to-remote", "http://127.0.0.1:15984/", "http://127.0.0.1:15984/"} ] @@ -21,20 +18,11 @@ defmodule ReplicationTest do @moduletag :skip_on_jenkins - test "source database does not exist" do - name = random_db_name() - check_not_found(name <> "_src", name <> "_tgt") - end - - test "source database not found with path - COUCHDB-317" do - name = random_db_name() - check_not_found(name <> "_src", name <> "_tgt") - end - test "source database not found with host" do name = random_db_name() - url = "http://127.0.0.1:15984/" <> name <> "_src" - check_not_found(url, name <> "_tgt") + src_url = "http://127.0.0.1:15984/" <> name <> "_src" + tgt_url = "http://127.0.0.1:15984/" <> name <> "_tgt" + check_not_found(src_url, tgt_url) end def check_not_found(src, tgt) do @@ -55,7 +43,9 @@ defmodule ReplicationTest do doc = %{"_id" => "doc1"} [doc] = save_docs(src_db_name, [doc]) - result = replicate(src_db_name, "http://127.0.0.1:15984/" <> tgt_db_name) + repl_src = "http://127.0.0.1:15984/" <> src_db_name + repl_tgt = "http://127.0.0.1:15984/" <> tgt_db_name + result = replicate(repl_src, repl_tgt) assert result["ok"] assert is_list(result["history"]) history = Enum.at(result["history"], 0) @@ -79,7 +69,9 @@ defmodule ReplicationTest do [doc] = save_docs(src_db_name, [doc]) - result = replicate(src_db_name, "http://127.0.0.1:15984/" <> tgt_db_name) + repl_src = "http://127.0.0.1:15984/" <> src_db_name + repl_tgt = "http://127.0.0.1:15984/" <> tgt_db_name + result = replicate(repl_src, repl_tgt) assert result["ok"] assert is_list(result["history"]) @@ -127,7 +119,8 @@ defmodule ReplicationTest do repl_body = %{:continuous => true, :create_target => true} repl_src = "http://127.0.0.1:15984/" <> src_db_name - result = replicate(repl_src, tgt_db_name, body: repl_body) + repl_tgt = "http://127.0.0.1:15984/" <> tgt_db_name + result = replicate(repl_src, repl_tgt, body: repl_body) assert result["ok"] assert is_binary(result["_local_id"]) @@ -167,8 +160,9 @@ defmodule ReplicationTest do save_docs(src_db_name, make_docs(1..6)) repl_src = "http://127.0.0.1:15984/" <> src_db_name + repl_tgt = "http://127.0.0.1:15984/" <> tgt_db_name repl_body = %{"continuous" => true} - result = replicate(repl_src, tgt_db_name, body: repl_body) + result = replicate(repl_src, repl_tgt, body: repl_body) assert result["ok"] assert is_binary(result["_local_id"]) @@ -282,7 +276,9 @@ defmodule ReplicationTest do end end - result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name) + repl_src = src_prefix <> src_db_name + repl_tgt = tgt_prefix <> tgt_db_name + result = replicate(repl_src, repl_tgt) assert result["ok"] src_info = -- cgit v1.2.1 From 3e9b3febc43edf742de8facef6e6c52ae7400bf2 Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Wed, 31 Jul 2019 15:24:26 -0400 Subject: Switch to only using elixir replication integration test And remove the js version. Elixir test has been running decently on Travis from what I observed. However, it was disabled on jenkins runs. With a recent hardware upgrade, perhaps there is chance this test will start passing there too. --- test/elixir/test/replication_test.exs | 2 - test/javascript/tests/replication.js | 1920 --------------------------------- 2 files changed, 1922 deletions(-) delete mode 100644 test/javascript/tests/replication.js diff --git a/test/elixir/test/replication_test.exs b/test/elixir/test/replication_test.exs index 6d4360d88..11687ab17 100644 --- a/test/elixir/test/replication_test.exs +++ b/test/elixir/test/replication_test.exs @@ -16,8 +16,6 @@ defmodule ReplicationTest do # happens for JavaScript tests. @moduletag config: [{"replicator", "startup_jitter", "0"}] - @moduletag :skip_on_jenkins - test "source database not found with host" do name = random_db_name() src_url = "http://127.0.0.1:15984/" <> name <> "_src" diff --git a/test/javascript/tests/replication.js b/test/javascript/tests/replication.js deleted file mode 100644 index ba586b409..000000000 --- a/test/javascript/tests/replication.js +++ /dev/null @@ -1,1920 +0,0 @@ -// Licensed under the Apache License, Version 2.0 (the "License"); you may not -// use this file except in compliance with the License. You may obtain a copy of -// the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations under -// the License. - -couchTests.replication = function(debug) { -// return console.log('TODO'); - if (debug) debugger; - - var host = CouchDB.host; - // as we change names during execution, do NOT use test_suite_db or a - // pre-computed value like ''+sourceDb.name (compute only on use) - var sourceDb; - var targetDb; - - var dbPairsPrefixes = [ - { - source: "", - target: "" - }, - { - source: CouchDB.protocol + host + "/", - target: "" - }, - { - source: "", - target: CouchDB.protocol + host + "/" - }, - { - source: CouchDB.protocol + host + "/", - target: CouchDB.protocol + host + "/" - } - ]; - - var att1_data = CouchDB.request("GET", "/_utils/script/test/lorem.txt"); - att1_data = att1_data.responseText; - - var att2_data = CouchDB.request("GET", "/_utils/script/test/lorem_b64.txt"); - att2_data = att2_data.responseText; - - var sourceInfo, targetInfo; - var docs, doc, copy; - var repResult; - var i, j, k; - - - function makeAttData(minSize) { - var data = att1_data; - - while (data.length < minSize) { - data = data + att1_data; - } - return data; - } - - - function runAllNodes(callback) { - // new and fancy: clustered version: pull cluster_members and walk over all of them - var xhr = CouchDB.request("GET", "/_membership"); - T(xhr.status === 200); - JSON.parse(xhr.responseText).cluster_nodes.forEach(callback); - } - - function runFirstNode(callback) { - // new and fancy: clustered version: pull cluster_members and walk over all of them - var xhr = CouchDB.request("GET", "/_membership"); - T(xhr.status === 200); - var node = JSON.parse(xhr.responseText).cluster_nodes[0]; - return callback(node); - } - - function getCompressionInfo() { - return runFirstNode(function(node) { - var xhr = CouchDB.request( - "GET", - "_node/" + node + "/_config/attachments" - ); - T(xhr.status === 200); - var res = JSON.parse(xhr.responseText); - return {"level": res.compression_level, "types": res.compressible_types}; - }); - } - - function enableAttCompression(level, types) { - runAllNodes(function(node) { - var xhr = CouchDB.request( - "PUT", - "_node/" + node + "/_config/attachments/compression_level", - { - body: JSON.stringify(level), - headers: {"X-Couch-Persist": "false"} - } - ); - T(xhr.status === 200); - xhr = CouchDB.request( - "PUT", - "_node/" + node + "/_config/attachments/compressible_types", - { - body: JSON.stringify(types), - headers: {"X-Couch-Persist": "false"} - } - ); - T(xhr.status === 200); - }); - } - - function disableAttCompression() { - runAllNodes(function(node) { - var xhr = CouchDB.request( - "PUT", - "_node/" + node + "/_config/attachments/compression_level", - { - body: JSON.stringify("0"), - headers: {"X-Couch-Persist": "false"} - } - ); - T(xhr.status === 200); - }); - } - - - function populateSourceDb(docs, dontRecreateDb) { - if(dontRecreateDb !== true) { - if(sourceDb) { - sourceDb.deleteDb(); - } - sourceDb = new CouchDB(get_random_db_name() + "_src",{"X-Couch-Full-Commit":"false"}); - sourceDb.createDb(); - } - for (var i = 0; i < docs.length; i++) { - var doc = docs[i]; - delete doc._rev; - } - if (docs.length > 0) { - sourceDb.bulkSave(docs); - } - } - function populateTargetDb(docs, dontRecreateDb) { - if(dontRecreateDb !== true) { - if(targetDb) { - targetDb.deleteDb(); - } - targetDb = new CouchDB(get_random_db_name() + "_tgt",{"X-Couch-Full-Commit":"false"}); - targetDb.createDb(); - } - for (var i = 0; i < docs.length; i++) { - var doc = docs[i]; - delete doc._rev; - } - if (docs.length > 0) { - targetDb.bulkSave(docs); - } - } - - - function addAtt(db, doc, attName, attData, type) { - var uri = "/" + db.name + "/" + encodeURIComponent(doc._id) + "/" + attName; - - if (doc._rev) { - uri += "?rev=" + doc._rev; - } - - var xhr = CouchDB.request("PUT", uri, { - headers: { - "Content-Type": type - }, - body: attData - }); - - T(xhr.status === 201); - doc._rev = JSON.parse(xhr.responseText).rev; - } - - - function compareObjects(o1, o2) { - for (var p in o1) { - if (o1[p] === null && o2[p] !== null) { - return false; - } else if (typeof o1[p] === "object") { - if ((typeof o2[p] !== "object") || o2[p] === null) { - return false; - } - if (!arguments.callee(o1[p], o2[p])) { - return false; - } - } else { - if (o1[p] !== o2[p]) { - return false; - } - } - } - return true; - } - - - function getTask(rep_id, delay) { - var t0 = new Date(); - var t1; - do { - var xhr = CouchDB.request("GET", "/_active_tasks"); - var tasks = JSON.parse(xhr.responseText); - for(var i = 0; i < tasks.length; i++) { - if(tasks[i].replication_id == repResult._local_id) { - return tasks[i]; - } - } - sleep(500); - t1 = new Date(); - } while((t1 - t0) <= delay); - - return null; - } - - function getSourceLastSeq(sourceDb) { - return sourceDb.changes({"since":"now"}).last_seq; - } - - function waitForSeq(sourceDb, targetDb, rep_id) { - var sourceSeq = getSourceLastSeq(sourceDb), - t0 = new Date(), - t1, - ms = 30000; - - do { - var task = getTask(rep_id, 0); - if(task && task["through_seq"] == sourceSeq) { - return; - } - t1 = new Date(); - sleep(500); - } while (((t1 - t0) <= ms)); - throw(Error('Timeout waiting for replication through_seq = source update seq')); - } - - function waitReplicationTaskStop(rep_id) { - var t0 = new Date(), - t1, - ms = 30000; - do { - var task = getTask(rep_id, 0); - if(task == null) { - return; - } - t1 = new Date(); - sleep(500); - } while (((t1 - t0) <= ms)); - throw(Error('Timeout waiting for replication task stop' + rep_id)); - } - - // test simple replications (not continuous, not filtered), including - // conflict creation - docs = makeDocs(1, 21); - docs.push({ - _id: "_design/foo", - language: "javascript", - value: "ddoc" - }); - - for (i = 0; i < dbPairsPrefixes.length; i++) { - populateSourceDb(docs); - populateTargetDb([]); - - // add some attachments - for (j = 10; j < 15; j++) { - addAtt(sourceDb, docs[j], "readme.txt", att1_data, "text/plain"); - } - - repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name); - TEquals(true, repResult.ok); - - sourceInfo = sourceDb.info(); - targetInfo = targetDb.info(); - - TEquals(sourceInfo.doc_count, targetInfo.doc_count); - - TEquals('string', typeof repResult.session_id); - // we can't rely on sequences in a cluster - //TEquals(repResult.source_last_seq, sourceInfo.update_seq); - TEquals(true, repResult.history instanceof Array); - TEquals(1, repResult.history.length); - TEquals(repResult.history[0].session_id, repResult.session_id); - TEquals('string', typeof repResult.history[0].start_time); - TEquals('string', typeof repResult.history[0].end_time); - TEquals(0, repResult.history[0].start_last_seq); - // we can't rely on sequences in a cluster - //TEquals(sourceInfo.update_seq, repResult.history[0].end_last_seq); - //TEquals(sourceInfo.update_seq, repResult.history[0].recorded_seq); - TEquals(sourceInfo.doc_count, repResult.history[0].missing_checked); - TEquals(sourceInfo.doc_count, repResult.history[0].missing_found); - TEquals(sourceInfo.doc_count, repResult.history[0].docs_read); - TEquals(sourceInfo.doc_count, repResult.history[0].docs_written); - TEquals(0, repResult.history[0].doc_write_failures); - - for (j = 0; j < docs.length; j++) { - doc = docs[j]; - copy = targetDb.open(doc._id); - - T(copy !== null); - TEquals(true, compareObjects(doc, copy)); - - if (j >= 10 && j < 15) { - var atts = copy._attachments; - TEquals('object', typeof atts); - TEquals('object', typeof atts["readme.txt"]); - TEquals(2, atts["readme.txt"].revpos); - TEquals(0, atts["readme.txt"].content_type.indexOf("text/plain")); - TEquals(true, atts["readme.txt"].stub); - - var att_copy = CouchDB.request( - "GET", "/" + targetDb.name + "/" + copy._id + "/readme.txt" - ).responseText; - TEquals(att1_data.length, att_copy.length); - TEquals(att1_data, att_copy); - } - } - - - // add one more doc to source, more attachments to some existing docs - // and replicate again - var newDoc = { - _id: "foo666", - value: "d" - }; - TEquals(true, sourceDb.save(newDoc).ok); - - // add some more attachments - for (j = 10; j < 15; j++) { - addAtt(sourceDb, docs[j], "data.dat", att2_data, "application/binary"); - } - - repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name); - TEquals(true, repResult.ok); - - sourceInfo = sourceDb.info(); - targetInfo = targetDb.info(); - - TEquals(targetInfo.doc_count, sourceInfo.doc_count); - - TEquals('string', typeof repResult.session_id); - // we can't rely on sequences in a cluster - //TEquals(sourceInfo.update_seq, repResult.source_last_seq); - TEquals(true, repResult.history instanceof Array); - TEquals(2, repResult.history.length); - TEquals(repResult.history[0].session_id, repResult.session_id); - TEquals('string', typeof repResult.history[0].start_time); - TEquals('string', typeof repResult.history[0].end_time); - // we can't rely on sequences in a cluster - //TEquals((sourceInfo.update_seq - 6), repResult.history[0].start_last_seq); - //TEquals(sourceInfo.update_seq, repResult.history[0].end_last_seq); - //TEquals(sourceInfo.update_seq, repResult.history[0].recorded_seq); - TEquals(6, repResult.history[0].missing_checked); - TEquals(6, repResult.history[0].missing_found); - TEquals(6, repResult.history[0].docs_read); - TEquals(6, repResult.history[0].docs_written); - TEquals(0, repResult.history[0].doc_write_failures); - - copy = targetDb.open(newDoc._id); - T(copy !== null); - TEquals(newDoc._id, copy._id); - TEquals(newDoc.value, copy.value); - - for (j = 10; j < 15; j++) { - doc = docs[j]; - copy = targetDb.open(doc._id); - - T(copy !== null); - TEquals(true, compareObjects(doc, copy)); - - var atts = copy._attachments; - TEquals('object', typeof atts); - TEquals('object', typeof atts["readme.txt"]); - TEquals(2, atts["readme.txt"].revpos); - TEquals(0, atts["readme.txt"].content_type.indexOf("text/plain")); - TEquals(true, atts["readme.txt"].stub); - - var att1_copy = CouchDB.request( - "GET", "/" + targetDb.name + "/" + copy._id + "/readme.txt" - ).responseText; - TEquals(att1_data.length, att1_copy.length); - TEquals(att1_data, att1_copy); - - TEquals('object', typeof atts["data.dat"]); - TEquals(3, atts["data.dat"].revpos); - TEquals(0, atts["data.dat"].content_type.indexOf("application/binary")); - TEquals(true, atts["data.dat"].stub); - - var att2_copy = CouchDB.request( - "GET", "/" + targetDb.name + "/" + copy._id + "/data.dat" - ).responseText; - TEquals(att2_data.length, att2_copy.length); - TEquals(att2_data, att2_copy); - } - - // test deletion is replicated - doc = sourceDb.open(docs[1]._id); - TEquals(true, sourceDb.deleteDoc(doc).ok); - - repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name); - TEquals(true, repResult.ok); - - sourceInfo = sourceDb.info(); - targetInfo = targetDb.info(); - - TEquals(targetInfo.doc_count, sourceInfo.doc_count); - TEquals(targetInfo.doc_del_count, sourceInfo.doc_del_count); - TEquals(1, targetInfo.doc_del_count); - - TEquals(true, repResult.history instanceof Array); - TEquals(3, repResult.history.length); - // we can't rely on sequences in a cluster - //TEquals((sourceInfo.update_seq - 1), repResult.history[0].start_last_seq); - //TEquals(sourceInfo.update_seq, repResult.history[0].end_last_seq); - //TEquals(sourceInfo.update_seq, repResult.history[0].recorded_seq); - TEquals(1, repResult.history[0].missing_checked); - TEquals(1, repResult.history[0].missing_found); - TEquals(1, repResult.history[0].docs_read); - TEquals(1, repResult.history[0].docs_written); - TEquals(0, repResult.history[0].doc_write_failures); - - copy = targetDb.open(docs[1]._id); - TEquals(null, copy); - - var changes = targetDb.changes({since: 0}); - // there is no guarantee of ordering also - // however: the doc has to appear somewhere - //var idx = changes.results.length - 1; - var changesResDoc1 = changes.results.filter(function(c){return c.id == docs[1]._id;}); - TEquals(1, changesResDoc1.length); - TEquals(docs[1]._id, changesResDoc1[0].id); - TEquals(true, changesResDoc1[0].deleted); - - // test conflict - doc = sourceDb.open(docs[0]._id); - doc.value = "white"; - TEquals(true, sourceDb.save(doc).ok); - - copy = targetDb.open(docs[0]._id); - copy.value = "black"; - TEquals(true, targetDb.save(copy).ok); - - repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name); - TEquals(true, repResult.ok); - - sourceInfo = sourceDb.info(); - targetInfo = targetDb.info(); - - TEquals(sourceInfo.doc_count, targetInfo.doc_count); - - TEquals(true, repResult.history instanceof Array); - TEquals(4, repResult.history.length); - // we can't rely on sequences in a cluster - //TEquals((sourceInfo.update_seq - 1), repResult.history[0].start_last_seq); - //TEquals(sourceInfo.update_seq, repResult.history[0].end_last_seq); - //TEquals(sourceInfo.update_seq, repResult.history[0].recorded_seq); - TEquals(1, repResult.history[0].missing_checked); - TEquals(1, repResult.history[0].missing_found); - TEquals(1, repResult.history[0].docs_read); - TEquals(1, repResult.history[0].docs_written); - TEquals(0, repResult.history[0].doc_write_failures); - - copy = targetDb.open(docs[0]._id, {conflicts: true}); - - TEquals(0, copy._rev.indexOf("2-")); - TEquals(true, copy._conflicts instanceof Array); - TEquals(1, copy._conflicts.length); - TEquals(0, copy._conflicts[0].indexOf("2-")); - - // replicate again with conflict - doc.value = "yellow"; - TEquals(true, sourceDb.save(doc).ok); - - repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name); - TEquals(true, repResult.ok); - - sourceInfo = sourceDb.info(); - targetInfo = targetDb.info(); - - TEquals(sourceInfo.doc_count, targetInfo.doc_count); - - TEquals(true, repResult.history instanceof Array); - TEquals(5, repResult.history.length); - // we can't rely on sequences in a cluster - //TEquals((sourceInfo.update_seq - 1), repResult.history[0].start_last_seq); - //TEquals(sourceInfo.update_seq, repResult.history[0].end_last_seq); - //TEquals(sourceInfo.update_seq, repResult.history[0].recorded_seq); - TEquals(1, repResult.history[0].missing_checked); - TEquals(1, repResult.history[0].missing_found); - TEquals(1, repResult.history[0].docs_read); - TEquals(1, repResult.history[0].docs_written); - TEquals(0, repResult.history[0].doc_write_failures); - - copy = targetDb.open(docs[0]._id, {conflicts: true}); - - TEquals(0, copy._rev.indexOf("3-")); - TEquals(true, copy._conflicts instanceof Array); - TEquals(1, copy._conflicts.length); - TEquals(0, copy._conflicts[0].indexOf("2-")); - - // resolve the conflict - TEquals(true, targetDb.deleteDoc({_id: copy._id, _rev: copy._conflicts[0]}).ok); - - // replicate again, check there are no more conflicts - doc.value = "rainbow"; - TEquals(true, sourceDb.save(doc).ok); - - repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name); - TEquals(true, repResult.ok); - - sourceInfo = sourceDb.info(); - targetInfo = targetDb.info(); - - TEquals(sourceInfo.doc_count, targetInfo.doc_count); - - TEquals(true, repResult.history instanceof Array); - TEquals(6, repResult.history.length); - // we can't rely on sequences in a cluster - //TEquals((sourceInfo.update_seq - 1), repResult.history[0].start_last_seq); - //TEquals(sourceInfo.update_seq, repResult.history[0].end_last_seq); - //TEquals(sourceInfo.update_seq, repResult.history[0].recorded_seq); - TEquals(1, repResult.history[0].missing_checked); - TEquals(1, repResult.history[0].missing_found); - TEquals(1, repResult.history[0].docs_read); - TEquals(1, repResult.history[0].docs_written); - TEquals(0, repResult.history[0].doc_write_failures); - - copy = targetDb.open(docs[0]._id, {conflicts: true}); - - TEquals(0, copy._rev.indexOf("4-")); - TEquals('undefined', typeof copy._conflicts); - - // test that revisions already in a target are not copied - TEquals(true, sourceDb.save({_id: "foo1", value: 111}).ok); - TEquals(true, targetDb.save({_id: "foo1", value: 111}).ok); - TEquals(true, sourceDb.save({_id: "foo2", value: 222}).ok); - TEquals(true, sourceDb.save({_id: "foo3", value: 333}).ok); - TEquals(true, targetDb.save({_id: "foo3", value: 333}).ok); - - repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name); - TEquals(true, repResult.ok); - - sourceInfo = sourceDb.info(); - // we can't rely on sequences in a cluster - //TEquals(sourceInfo.update_seq, repResult.source_last_seq); - //TEquals(sourceInfo.update_seq - 3, repResult.history[0].start_last_seq); - //TEquals(sourceInfo.update_seq, repResult.history[0].end_last_seq); - //TEquals(sourceInfo.update_seq, repResult.history[0].recorded_seq); - TEquals(3, repResult.history[0].missing_checked); - TEquals(1, repResult.history[0].missing_found); - TEquals(1, repResult.history[0].docs_read); - TEquals(1, repResult.history[0].docs_written); - TEquals(0, repResult.history[0].doc_write_failures); - - TEquals(true, sourceDb.save({_id: "foo4", value: 444}).ok); - TEquals(true, targetDb.save({_id: "foo4", value: 444}).ok); - TEquals(true, sourceDb.save({_id: "foo5", value: 555}).ok); - TEquals(true, targetDb.save({_id: "foo5", value: 555}).ok); - - repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name); - TEquals(true, repResult.ok); - - sourceInfo = sourceDb.info(); - // we can't rely on sequences in a cluster - //TEquals(sourceInfo.update_seq, repResult.source_last_seq); - //TEquals(sourceInfo.update_seq - 2, repResult.history[0].start_last_seq); - //TEquals(sourceInfo.update_seq, repResult.history[0].end_last_seq); - //TEquals(sourceInfo.update_seq, repResult.history[0].recorded_seq); - TEquals(2, repResult.history[0].missing_checked); - TEquals(0, repResult.history[0].missing_found); - TEquals(0, repResult.history[0].docs_read); - TEquals(0, repResult.history[0].docs_written); - TEquals(0, repResult.history[0].doc_write_failures); - - repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name); - TEquals(true, repResult.ok); - TEquals(true, repResult.no_changes); - sourceInfo = sourceDb.info(); - // we can't rely on sequences in a cluster - //TEquals(sourceInfo.update_seq, repResult.source_last_seq); - } - - - // test error when source database does not exist - try { - CouchDB.replicate("foobar", "test_suite_db"); - T(false, "should have failed with db_not_found error"); - } catch (x) { - TEquals("db_not_found", x.error); - } - - // validate COUCHDB-317 - try { - CouchDB.replicate("/foobar", "test_suite_db"); - T(false, "should have failed with db_not_found error"); - } catch (x) { - TEquals("db_not_found", x.error); - } - - try { - CouchDB.replicate(CouchDB.protocol + host + "/foobar", "test_suite_db"); - T(false, "should have failed with db_not_found error"); - } catch (x) { - TEquals("db_not_found", x.error); - } - - - // test since_seq parameter - docs = makeDocs(1, 6); - - for (i = 0; i < dbPairsPrefixes.length; i++) { - populateSourceDb(docs); - populateTargetDb([]); - // sequences are no longer simple numbers - so pull #3 from a feed - var since_seq = sourceDb.changes().results[2].seq; - - var expected_ids = []; - var changes = sourceDb.changes({since: JSON.stringify(since_seq)}); - for (j = 0; j < changes.results.length; j++) { - expected_ids.push(changes.results[j].id); - } - TEquals(2, expected_ids.length, "2 documents since since_seq"); - - // For OTP < R14B03, temporary child specs are kept in the supervisor - // after the child terminates, so cancel the replication to delete the - // child spec in those OTP releases, otherwise since_seq will have no - // effect. - try { - CouchDB.replicate( - dbPairsPrefixes[i].source+sourceDb.name, - dbPairsPrefixes[i].target+targetDb.name, - {body: {cancel: true}} - ); - } catch (x) { - // OTP R14B03 onwards - TEquals("not_found", x.error); - } - repResult = CouchDB.replicate( - dbPairsPrefixes[i].source+sourceDb.name, - dbPairsPrefixes[i].target+targetDb.name, - {body: {since_seq: since_seq}} - ); - // Same reason as before. But here we don't want since_seq to affect - // subsequent replications, so we need to delete the child spec from the - // supervisor (since_seq is not used to calculate the replication ID). - try { - CouchDB.replicate( - dbPairsPrefixes[i].source+sourceDb.name, - dbPairsPrefixes[i].target+targetDb.name, - {body: {cancel: true}} - ); - } catch (x) { - // OTP R14B03 onwards - TEquals("not_found", x.error); - } - TEquals(true, repResult.ok); - TEquals(2, repResult.history[0].missing_checked); - TEquals(2, repResult.history[0].missing_found); - TEquals(2, repResult.history[0].docs_read); - TEquals(2, repResult.history[0].docs_written); - TEquals(0, repResult.history[0].doc_write_failures); - - for (j = 0; j < docs.length; j++) { - doc = docs[j]; - copy = targetDb.open(doc._id); - - if (expected_ids.indexOf(doc._id) === -1) { - T(copy === null); - } else { - T(copy !== null); - TEquals(true, compareObjects(doc, copy)); - } - } - } - - - // test errors due to doc validate_doc_update functions in the target endpoint - docs = makeDocs(1, 8); - docs[2]["_attachments"] = { - "hello.txt": { - "content_type": "text/plain", - "data": "aGVsbG8gd29ybGQ=" // base64:encode("hello world") - } - }; - var ddoc = { - _id: "_design/test", - language: "javascript", - validate_doc_update: (function(newDoc, oldDoc, userCtx, secObj) { - if ((newDoc.integer % 2) !== 0) { - throw {forbidden: "I only like multiples of 2."}; - } - }).toString() - }; - - for (i = 0; i < dbPairsPrefixes.length; i++) { - populateSourceDb(docs); - populateTargetDb([ddoc]); - - repResult = CouchDB.replicate( - dbPairsPrefixes[i].source+sourceDb.name, - dbPairsPrefixes[i].target+targetDb.name - ); - TEquals(true, repResult.ok); - TEquals(7, repResult.history[0].missing_checked); - TEquals(7, repResult.history[0].missing_found); - TEquals(7, repResult.history[0].docs_read); - TEquals(3, repResult.history[0].docs_written); - TEquals(4, repResult.history[0].doc_write_failures); - - for (j = 0; j < docs.length; j++) { - doc = docs[j]; - copy = targetDb.open(doc._id); - - if (doc.integer % 2 === 0) { - T(copy !== null); - TEquals(copy.integer, doc.integer); - } else { - T(copy === null); - } - } - } - - - // test create_target option - docs = makeDocs(1, 2); - - for (i = 0; i < dbPairsPrefixes.length; i++) { - populateSourceDb(docs); - targetDb.deleteDb(); - - repResult = CouchDB.replicate( - dbPairsPrefixes[i].source+sourceDb.name, - dbPairsPrefixes[i].target+targetDb.name, - {body: {create_target: true}} - ); - TEquals(true, repResult.ok); - - sourceInfo = sourceDb.info(); - targetInfo = targetDb.info(); - - TEquals(sourceInfo.doc_count, targetInfo.doc_count); - TEquals(sourceInfo.update_seq, targetInfo.update_seq); - } - - - // test filtered replication - docs = makeDocs(1, 31); - docs.push({ - _id: "_design/mydesign", - language: "javascript", - filters: { - myfilter: (function(doc, req) { - var modulus = Number(req.query.modulus); - var special = req.query.special; - return (doc.integer % modulus === 0) || (doc.string === special); - }).toString() - } - }); - - for (i = 0; i < dbPairsPrefixes.length; i++) { - populateSourceDb(docs); - populateTargetDb([]); - - repResult = CouchDB.replicate( - dbPairsPrefixes[i].source+sourceDb.name, - dbPairsPrefixes[i].target+targetDb.name, - { - body: { - filter: "mydesign/myfilter", - query_params: { - modulus: "2", - special: "7" - } - } - } - ); - - TEquals(true, repResult.ok); - - for (j = 0; j < docs.length; j++) { - doc = docs[j]; - copy = targetDb.open(doc._id); - - if ((doc.integer && (doc.integer % 2 === 0)) || (doc.string === "7")) { - - T(copy !== null); - TEquals(true, compareObjects(doc, copy)); - } else { - TEquals(null, copy); - } - } - - TEquals(true, repResult.history instanceof Array); - TEquals(1, repResult.history.length); - // We (incorrectly) don't record update sequences for things - // that don't pass the changse feed filter. Historically the - // last document to pass was the second to last doc which has - // an update sequence of 30. Work that has been applied to avoid - // conflicts from duplicate IDs breaking _bulk_docs updates added - // a sort to the logic which changes this. Now the last document - // to pass has an doc id of "8" and is at update_seq 29 (because only - // "9" and the design doc are after it). - // - // In the future the fix ought to be that we record that update - // sequence of the database. BigCouch has some existing work on - // this in the clustered case because if you have very few documents - // that pass the filter then (given single node's behavior) you end - // up having to rescan a large portion of the database. - // we can't rely on sequences in a cluster - // not only can one figure appear twice (at least for n>1), there's also hashes involved now - so comparing seq==29 is lottery (= cutting off hashes is nonsense) - // above, there was brute-force comparing all attrs of all docs - now we did check if excluded docs did NOT make it - // in any way, we can't rely on sequences in a cluster (so leave out) - //TEquals(29, repResult.source_last_seq); - //TEquals(0, repResult.history[0].start_last_seq); - //TEquals(29, repResult.history[0].end_last_seq); - //TEquals(29, repResult.history[0].recorded_seq); - // 16 => 15 docs with even integer field + 1 doc with string field "7" - TEquals(16, repResult.history[0].missing_checked); - TEquals(16, repResult.history[0].missing_found); - TEquals(16, repResult.history[0].docs_read); - TEquals(16, repResult.history[0].docs_written); - TEquals(0, repResult.history[0].doc_write_failures); - - - // add new docs to source and resume the same replication - var newDocs = makeDocs(50, 56); - populateSourceDb(newDocs, true); - - repResult = CouchDB.replicate( - dbPairsPrefixes[i].source+sourceDb.name, - dbPairsPrefixes[i].target+targetDb.name, - { - body: { - filter: "mydesign/myfilter", - query_params: { - modulus: "2", - special: "7" - } - } - } - ); - - TEquals(true, repResult.ok); - - for (j = 0; j < newDocs.length; j++) { - doc = newDocs[j]; - copy = targetDb.open(doc._id); - - if (doc.integer && (doc.integer % 2 === 0)) { - - T(copy !== null); - TEquals(true, compareObjects(doc, copy)); - } else { - TEquals(null, copy); - } - } - - // last doc has even integer field, so last replicated seq is 36 - // cluster - so no seq (ditto above) - //TEquals(36, repResult.source_last_seq); - TEquals(true, repResult.history instanceof Array); - TEquals(2, repResult.history.length); - //TEquals(29, repResult.history[0].start_last_seq); - //TEquals(36, repResult.history[0].end_last_seq); - //TEquals(36, repResult.history[0].recorded_seq); - TEquals(3, repResult.history[0].missing_checked); - TEquals(3, repResult.history[0].missing_found); - TEquals(3, repResult.history[0].docs_read); - TEquals(3, repResult.history[0].docs_written); - TEquals(0, repResult.history[0].doc_write_failures); - } - - - // test filtered replication works as expected after changing the filter's - // code (ticket COUCHDB-892) - var filterFun1 = (function(doc, req) { - if (doc.value < Number(req.query.maxvalue)) { - return true; - } else { - return false; - } - }).toString(); - - var filterFun2 = (function(doc, req) { - return true; - }).toString(); - - for (i = 0; i < dbPairsPrefixes.length; i++) { - populateTargetDb([]); - populateSourceDb([]); - - TEquals(true, sourceDb.save({_id: "foo1", value: 1}).ok); - TEquals(true, sourceDb.save({_id: "foo2", value: 2}).ok); - TEquals(true, sourceDb.save({_id: "foo3", value: 3}).ok); - TEquals(true, sourceDb.save({_id: "foo4", value: 4}).ok); - - var ddoc = { - "_id": "_design/mydesign", - "language": "javascript", - "filters": { - "myfilter": filterFun1 - } - }; - - TEquals(true, sourceDb.save(ddoc).ok); - - repResult = CouchDB.replicate( - dbPairsPrefixes[i].source+sourceDb.name, - dbPairsPrefixes[i].target+targetDb.name, - { - body: { - filter: "mydesign/myfilter", - query_params: { - maxvalue: "3" - } - } - } - ); - - TEquals(true, repResult.ok); - TEquals(true, repResult.history instanceof Array); - TEquals(1, repResult.history.length); - TEquals(2, repResult.history[0].docs_written); - TEquals(2, repResult.history[0].docs_read); - TEquals(0, repResult.history[0].doc_write_failures); - - var docFoo1 = targetDb.open("foo1"); - T(docFoo1 !== null); - TEquals(1, docFoo1.value); - - var docFoo2 = targetDb.open("foo2"); - T(docFoo2 !== null); - TEquals(2, docFoo2.value); - - var docFoo3 = targetDb.open("foo3"); - TEquals(null, docFoo3); - - var docFoo4 = targetDb.open("foo4"); - TEquals(null, docFoo4); - - // replication should start from scratch after the filter's code changed - - ddoc.filters.myfilter = filterFun2; - TEquals(true, sourceDb.save(ddoc).ok); - - repResult = CouchDB.replicate( - dbPairsPrefixes[i].source+sourceDb.name, - dbPairsPrefixes[i].target+targetDb.name, - { - body: { - filter: "mydesign/myfilter", - query_params : { - maxvalue: "3" - } - } - } - ); - - TEquals(true, repResult.ok); - TEquals(true, repResult.history instanceof Array); - TEquals(1, repResult.history.length); - TEquals(3, repResult.history[0].docs_written); - TEquals(3, repResult.history[0].docs_read); - TEquals(0, repResult.history[0].doc_write_failures); - - docFoo1 = targetDb.open("foo1"); - T(docFoo1 !== null); - TEquals(1, docFoo1.value); - - docFoo2 = targetDb.open("foo2"); - T(docFoo2 !== null); - TEquals(2, docFoo2.value); - - docFoo3 = targetDb.open("foo3"); - T(docFoo3 !== null); - TEquals(3, docFoo3.value); - - docFoo4 = targetDb.open("foo4"); - T(docFoo4 !== null); - TEquals(4, docFoo4.value); - - T(targetDb.open("_design/mydesign") !== null); - } - - - // test replication by doc IDs - docs = makeDocs(1, 11); - docs.push({ - _id: "_design/foo", - language: "javascript", - integer: 1 - }); - - var target_doc_ids = [ - { initial: ["1", "2", "10"], after: [], conflict_id: "2" }, - { initial: ["1", "2"], after: ["7"], conflict_id: "1" }, - { initial: ["1", "foo_666", "10"], after: ["7"], conflict_id: "10" }, - { initial: ["_design/foo", "8"], after: ["foo_5"], conflict_id: "8" }, - { initial: ["_design%2Ffoo", "8"], after: ["foo_5"], conflict_id: "8" }, - { initial: [], after: ["foo_1000", "_design/foo", "1"], conflict_id: "1" } - ]; - var doc_ids, after_doc_ids; - var id, num_inexistent_docs, after_num_inexistent_docs; - var total, after_total; - - for (i = 0; i < dbPairsPrefixes.length; i++) { - - for (j = 0; j < target_doc_ids.length; j++) { - doc_ids = target_doc_ids[j].initial; - num_inexistent_docs = 0; - - for (k = 0; k < doc_ids.length; k++) { - id = doc_ids[k]; - if (id.indexOf("foo_") === 0) { - num_inexistent_docs += 1; - } - } - - populateSourceDb(docs); - populateTargetDb([]); - - repResult = CouchDB.replicate( - dbPairsPrefixes[i].source+sourceDb.name, - dbPairsPrefixes[i].target+targetDb.name, - { - body: { - doc_ids: doc_ids - } - } - ); - - total = doc_ids.length - num_inexistent_docs; - TEquals(true, repResult.ok); - if (total === 0) { - TEquals(true, repResult.no_changes); - } else { - TEquals('string', typeof repResult.start_time); - TEquals('string', typeof repResult.end_time); - TEquals(total, repResult.docs_read); - TEquals(total, repResult.docs_written); - TEquals(0, repResult.doc_write_failures); - } - - for (k = 0; k < doc_ids.length; k++) { - id = decodeURIComponent(doc_ids[k]); - doc = sourceDb.open(id); - copy = targetDb.open(id); - - if (id.indexOf("foo_") === 0) { - TEquals(null, doc); - TEquals(null, copy); - } else { - T(doc !== null); - T(copy !== null); - TEquals(true, compareObjects(doc, copy)); - } - } - - // be absolutely sure that other docs were not replicated - for (k = 0; k < docs.length; k++) { - var base_id = docs[k]._id; - id = encodeURIComponent(base_id); - doc = targetDb.open(base_id); - - if ((doc_ids.indexOf(id) >= 0) || (doc_ids.indexOf(base_id) >= 0)) { - T(doc !== null); - } else { - TEquals(null, doc); - } - } - - targetInfo = targetDb.info(); - TEquals(total, targetInfo.doc_count); - - - // add more docs throught replication by doc IDs - after_doc_ids = target_doc_ids[j].after; - after_num_inexistent_docs = 0; - - for (k = 0; k < after_doc_ids.length; k++) { - id = after_doc_ids[k]; - if (id.indexOf("foo_") === 0) { - after_num_inexistent_docs += 1; - } - } - - repResult = CouchDB.replicate( - dbPairsPrefixes[i].source+sourceDb.name, - dbPairsPrefixes[i].target+targetDb.name, - { - body: { - doc_ids: after_doc_ids - } - } - ); - - after_total = after_doc_ids.length - after_num_inexistent_docs; - TEquals(true, repResult.ok); - if (after_total === 0) { - TEquals(true, repResult.no_changes); - } else { - TEquals('string', typeof repResult.start_time); - TEquals('string', typeof repResult.end_time); - TEquals(after_total, repResult.docs_read); - TEquals(after_total, repResult.docs_written); - TEquals(0, repResult.doc_write_failures); - } - - for (k = 0; k < after_doc_ids.length; k++) { - id = after_doc_ids[k]; - doc = sourceDb.open(id); - copy = targetDb.open(id); - - if (id.indexOf("foo_") === 0) { - TEquals(null, doc); - TEquals(null, copy); - } else { - T(doc !== null); - T(copy !== null); - TEquals(true, compareObjects(doc, copy)); - } - } - - // be absolutely sure that other docs were not replicated - for (k = 0; k < docs.length; k++) { - var base_id = docs[k]._id; - id = encodeURIComponent(base_id); - doc = targetDb.open(base_id); - - if ((doc_ids.indexOf(id) >= 0) || (after_doc_ids.indexOf(id) >= 0) || - (doc_ids.indexOf(base_id) >= 0) || - (after_doc_ids.indexOf(base_id) >= 0)) { - T(doc !== null); - } else { - TEquals(null, doc); - } - } - - targetInfo = targetDb.info(); - TEquals((total + after_total), targetInfo.doc_count); - - - // replicate again the same doc after updated on source (no conflict) - id = target_doc_ids[j].conflict_id; - doc = sourceDb.open(id); - T(doc !== null); - doc.integer = 666; - TEquals(true, sourceDb.save(doc).ok); - addAtt(sourceDb, doc, "readme.txt", att1_data, "text/plain"); - addAtt(sourceDb, doc, "data.dat", att2_data, "application/binary"); - - repResult = CouchDB.replicate( - dbPairsPrefixes[i].source+sourceDb.name, - dbPairsPrefixes[i].target+targetDb.name, - { - body: { - doc_ids: [id] - } - } - ); - - TEquals(true, repResult.ok); - TEquals(1, repResult.docs_read); - TEquals(1, repResult.docs_written); - TEquals(0, repResult.doc_write_failures); - - copy = targetDb.open(id, {conflicts: true}); - - TEquals(666, copy.integer); - TEquals(0, copy._rev.indexOf("4-")); - TEquals('undefined', typeof copy._conflicts); - - var atts = copy._attachments; - TEquals('object', typeof atts); - TEquals('object', typeof atts["readme.txt"]); - TEquals(3, atts["readme.txt"].revpos); - TEquals(0, atts["readme.txt"].content_type.indexOf("text/plain")); - TEquals(true, atts["readme.txt"].stub); - - var att1_copy = CouchDB.request( - "GET", "/" + targetDb.name + "/" + copy._id + "/readme.txt" - ).responseText; - TEquals(att1_data.length, att1_copy.length); - TEquals(att1_data, att1_copy); - - TEquals('object', typeof atts["data.dat"]); - TEquals(4, atts["data.dat"].revpos); - TEquals(0, atts["data.dat"].content_type.indexOf("application/binary")); - TEquals(true, atts["data.dat"].stub); - - var att2_copy = CouchDB.request( - "GET", "/" + targetDb.name + "/" + copy._id + "/data.dat" - ).responseText; - TEquals(att2_data.length, att2_copy.length); - TEquals(att2_data, att2_copy); - - - // generate a conflict throught replication by doc IDs - id = target_doc_ids[j].conflict_id; - doc = sourceDb.open(id); - copy = targetDb.open(id); - T(doc !== null); - T(copy !== null); - doc.integer += 100; - copy.integer += 1; - TEquals(true, sourceDb.save(doc).ok); - TEquals(true, targetDb.save(copy).ok); - - repResult = CouchDB.replicate( - dbPairsPrefixes[i].source+sourceDb.name, - dbPairsPrefixes[i].target+targetDb.name, - { - body: { - doc_ids: [id] - } - } - ); - - TEquals(true, repResult.ok); - TEquals(1, repResult.docs_read); - TEquals(1, repResult.docs_written); - TEquals(0, repResult.doc_write_failures); - - copy = targetDb.open(id, {conflicts: true}); - - TEquals(0, copy._rev.indexOf("5-")); - TEquals(true, copy._conflicts instanceof Array); - TEquals(1, copy._conflicts.length); - TEquals(0, copy._conflicts[0].indexOf("5-")); - } - } - - - docs = makeDocs(1, 25); - docs.push({ - _id: "_design/foo", - language: "javascript", - filters: { - myfilter: (function(doc, req) { return true; }).toString() - } - }); - - for (i = 0; i < dbPairsPrefixes.length; i++) { - populateSourceDb(docs); - populateTargetDb([]); - - // add some attachments - for (j = 10; j < 15; j++) { - addAtt(sourceDb, docs[j], "readme.txt", att1_data, "text/plain"); - } - - repResult = CouchDB.replicate( - dbPairsPrefixes[i].source+sourceDb.name, - dbPairsPrefixes[i].target+targetDb.name, - { - body: { - continuous: true - } - } - ); - TEquals(true, repResult.ok); - TEquals('string', typeof repResult._local_id); - - var rep_id = repResult._local_id; - - waitForSeq(sourceDb, targetDb, rep_id); - - for (j = 0; j < docs.length; j++) { - doc = docs[j]; - copy = targetDb.open(doc._id); - - T(copy !== null); - TEquals(true, compareObjects(doc, copy)); - - if (j >= 10 && j < 15) { - var atts = copy._attachments; - TEquals('object', typeof atts); - TEquals('object', typeof atts["readme.txt"]); - TEquals(2, atts["readme.txt"].revpos); - TEquals(0, atts["readme.txt"].content_type.indexOf("text/plain")); - TEquals(true, atts["readme.txt"].stub); - - var att_copy = CouchDB.request( - "GET", "/" + targetDb.name + "/" + copy._id + "/readme.txt" - ).responseText; - TEquals(att1_data.length, att_copy.length); - TEquals(att1_data, att_copy); - } - } - - sourceInfo = sourceDb.info(); - targetInfo = targetDb.info(); - - TEquals(sourceInfo.doc_count, targetInfo.doc_count); - - // add attachments to docs in source - for (j = 10; j < 15; j++) { - addAtt(sourceDb, docs[j], "data.dat", att2_data, "application/binary"); - } - - var ddoc = docs[docs.length - 1]; // design doc - addAtt(sourceDb, ddoc, "readme.txt", att1_data, "text/plain"); - - waitForSeq(sourceDb, targetDb, rep_id); - - var modifDocs = docs.slice(10, 15).concat([ddoc]); - for (j = 0; j < modifDocs.length; j++) { - doc = modifDocs[j]; - copy = targetDb.open(doc._id); - - T(copy !== null); - TEquals(true, compareObjects(doc, copy)); - - var atts = copy._attachments; - TEquals('object', typeof atts); - TEquals('object', typeof atts["readme.txt"]); - TEquals(2, atts["readme.txt"].revpos); - TEquals(0, atts["readme.txt"].content_type.indexOf("text/plain")); - TEquals(true, atts["readme.txt"].stub); - - var att1_copy = CouchDB.request( - "GET", "/" + targetDb.name + "/" + copy._id + "/readme.txt" - ).responseText; - TEquals(att1_data.length, att1_copy.length); - TEquals(att1_data, att1_copy); - - if (doc._id.indexOf("_design/") === -1) { - TEquals('object', typeof atts["data.dat"]); - TEquals(3, atts["data.dat"].revpos); - TEquals(0, atts["data.dat"].content_type.indexOf("application/binary")); - TEquals(true, atts["data.dat"].stub); - - var att2_copy = CouchDB.request( - "GET", "/" + targetDb.name + "/" + copy._id + "/data.dat" - ).responseText; - TEquals(att2_data.length, att2_copy.length); - TEquals(att2_data, att2_copy); - } - } - - sourceInfo = sourceDb.info(); - targetInfo = targetDb.info(); - - TEquals(sourceInfo.doc_count, targetInfo.doc_count); - - // add another attachment to the ddoc on source - addAtt(sourceDb, ddoc, "data.dat", att2_data, "application/binary"); - - waitForSeq(sourceDb, targetDb, rep_id); - - copy = targetDb.open(ddoc._id); - var atts = copy._attachments; - TEquals('object', typeof atts); - TEquals('object', typeof atts["readme.txt"]); - TEquals(2, atts["readme.txt"].revpos); - TEquals(0, atts["readme.txt"].content_type.indexOf("text/plain")); - TEquals(true, atts["readme.txt"].stub); - - var att1_copy = CouchDB.request( - "GET", "/" + targetDb.name + "/" + copy._id + "/readme.txt" - ).responseText; - TEquals(att1_data.length, att1_copy.length); - TEquals(att1_data, att1_copy); - - TEquals('object', typeof atts["data.dat"]); - TEquals(3, atts["data.dat"].revpos); - TEquals(0, atts["data.dat"].content_type.indexOf("application/binary")); - TEquals(true, atts["data.dat"].stub); - - var att2_copy = CouchDB.request( - "GET", "/" + targetDb.name + "/" + copy._id + "/data.dat" - ).responseText; - TEquals(att2_data.length, att2_copy.length); - TEquals(att2_data, att2_copy); - - sourceInfo = sourceDb.info(); - targetInfo = targetDb.info(); - - TEquals(sourceInfo.doc_count, targetInfo.doc_count); - - - // add more docs to source - var newDocs = makeDocs(25, 35); - populateSourceDb(newDocs, true); - - waitForSeq(sourceDb, targetDb, rep_id); - - for (j = 0; j < newDocs.length; j++) { - doc = newDocs[j]; - copy = targetDb.open(doc._id); - - T(copy !== null); - TEquals(true, compareObjects(doc, copy)); - } - - sourceInfo = sourceDb.info(); - targetInfo = targetDb.info(); - - TEquals(sourceInfo.doc_count, targetInfo.doc_count); - - // delete docs from source - TEquals(true, sourceDb.deleteDoc(newDocs[0]).ok); - TEquals(true, sourceDb.deleteDoc(newDocs[6]).ok); - - waitForSeq(sourceDb, targetDb, rep_id); - - copy = targetDb.open(newDocs[0]._id); - TEquals(null, copy); - copy = targetDb.open(newDocs[6]._id); - TEquals(null, copy); - - var changes = targetDb.changes({since: targetInfo.update_seq}); - // quite unfortunately, there is no way on relying on ordering in a cluster - // but we can assume a length of 2 - var line1 = changes.results[changes.results.length - 2]; - var line2 = changes.results[changes.results.length - 1]; - T(newDocs[0]._id == line1.id || newDocs[0]._id == line2.id); - T(newDocs[6]._id == line1.id || newDocs[6]._id == line2.id); - T(line1.deleted && line2.deleted); - - // cancel the replication - repResult = CouchDB.replicate( - dbPairsPrefixes[i].source+sourceDb.name, - dbPairsPrefixes[i].target+targetDb.name, - { - body: { - continuous: true, - cancel: true - } - } - ); - TEquals(true, repResult.ok); - TEquals(rep_id, repResult._local_id); - - doc = { - _id: 'foobar', - value: 666 - }; - TEquals(true, sourceDb.save(doc).ok); - - waitReplicationTaskStop(rep_id); - - copy = targetDb.open(doc._id); - TEquals(null, copy); - } - - // COUCHDB-1093 - filtered and continuous _changes feed dies when the - // database is compacted - // no more relevant when clustering, you can't compact (per se at least) - /* - docs = makeDocs(1, 10); - docs.push({ - _id: "_design/foo", - language: "javascript", - filters: { - myfilter: (function(doc, req) { return true; }).toString() - } - }); - populateSourceDb(docs); - populateTargetDb([]); - - repResult = CouchDB.replicate( - CouchDB.protocol + host + "/" + sourceDb.name, - targetDb.name, - { - body: { - continuous: true, - filter: "foo/myfilter" - } - } - ); - TEquals(true, repResult.ok); - TEquals('string', typeof repResult._local_id); - - TEquals(true, sourceDb.compact().ok); - while (sourceDb.info().compact_running) {}; - - TEquals(true, sourceDb.save(makeDocs(30, 31)[0]).ok); - - var task = getTask(repResult._local_id, 1000); - T(task != null); - - waitForSeq(sourceDb, targetDb, repResult._local_id); - T(sourceDb.open("30") !== null); - - // cancel replication - repResult = CouchDB.replicate( - CouchDB.protocol + host + "/" + sourceDb.name, - targetDb.name, - { - body: { - continuous: true, - filter: "foo/myfilter", - cancel: true - } - } - ); - TEquals(true, repResult.ok); - TEquals('string', typeof repResult._local_id); - */ - - // - // test replication of compressed attachments - // - doc = { - _id: "foobar" - }; - var bigTextAtt = makeAttData(128 * 1024); - var attName = "readme.txt"; - var oldSettings = getCompressionInfo(); - var compressionLevel = oldSettings.level; - var compressibleTypes = oldSettings.types; - - for (i = 0; i < dbPairsPrefixes.length; i++) { - populateSourceDb([doc]); - populateTargetDb([]); - - // enable compression of text types - enableAttCompression("8", "text/*"); - - // add text attachment to foobar doc - xhr = CouchDB.request( - "PUT", - "/" + sourceDb.name + "/" + doc._id + "/" + attName + "?rev=" + doc._rev, - { - body: bigTextAtt, - headers: {"Content-Type": "text/plain"} - } - ); - TEquals(201, xhr.status); - - // disable compression and replicate - disableAttCompression(); - - repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name); - TEquals(true, repResult.ok); - TEquals(true, repResult.history instanceof Array); - TEquals(1, repResult.history.length); - TEquals(1, repResult.history[0].missing_checked); - TEquals(1, repResult.history[0].missing_found); - TEquals(1, repResult.history[0].docs_read); - TEquals(1, repResult.history[0].docs_written); - TEquals(0, repResult.history[0].doc_write_failures); - - copy = targetDb.open( - doc._id, - {att_encoding_info: true, bypass_cache: Math.round(Math.random() * 1000)} - ); - T(copy !== null); - T(attName in copy._attachments); - TEquals("gzip", copy._attachments[attName].encoding); - TEquals("number", typeof copy._attachments[attName].length); - TEquals("number", typeof copy._attachments[attName].encoded_length); - T(copy._attachments[attName].encoded_length < copy._attachments[attName].length); - } - - delete bigTextAtt; - // restore original settings - enableAttCompression(compressionLevel, compressibleTypes); - - // - // test replication triggered by non admins - // - - // case 1) user triggering the replication is not a DB admin of the target DB - var joeUserDoc = CouchDB.prepareUserDoc({ - name: "joe", - roles: ["erlanger"] - }, "erly"); - var defaultUsersDb = new CouchDB("_users", {"X-Couch-Full-Commit":"false"}); - try { defaultUsersDb.createDb(); } catch (e) { /* ignore if exists*/ } - //var usersDb = new CouchDB("test_suite_auth", {"X-Couch-Full-Commit":"false"}); - /*var server_config = [ - { - section: "couch_httpd_auth", - key: "authentication_db", - value: usersDb.name - } - ];*/ - - docs = makeDocs(1, 6); - docs.push({ - _id: "_design/foo", - language: "javascript" - }); - - dbPairsPrefixes = [ - { - source: "", - target: "" - }, - { - source: CouchDB.protocol + host + "/", - target: "" - }, - { - source: "", - target: CouchDB.protocol + "joe:erly@" + host + "/" - }, - { - source: CouchDB.protocol + host + "/", - target: CouchDB.protocol + "joe:erly@" + host + "/" - } - ]; - - for (i = 0; i < dbPairsPrefixes.length; i++) { - //usersDb.deleteDb(); - populateSourceDb(docs); - populateTargetDb([]); - - TEquals(true, targetDb.setSecObj({ - admins: { - names: ["superman"], - roles: ["god"] - } - }).ok); - - // do NOT run on modified server b/c we use the default DB - //run_on_modified_server(server_config, function() { - delete joeUserDoc._rev; - var prevJoeUserDoc = defaultUsersDb.open(joeUserDoc._id); - if (prevJoeUserDoc) { - joeUserDoc._rev = prevJoeUserDoc._rev; - } - if(i == 0) { - TEquals(true, defaultUsersDb.save(joeUserDoc).ok); - wait(5000); - } - TEquals(true, CouchDB.login("joe", "erly").ok); - TEquals('joe', CouchDB.session().userCtx.name); - - repResult = CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name); - - TEquals(true, CouchDB.logout().ok); - - TEquals(true, repResult.ok); - TEquals(docs.length, repResult.history[0].docs_read); - TEquals((docs.length - 1), repResult.history[0].docs_written); // 1 ddoc - TEquals(1, repResult.history[0].doc_write_failures); - //}); - - for (j = 0; j < docs.length; j++) { - doc = docs[j]; - copy = targetDb.open(doc._id); - - if (doc._id.indexOf("_design/") === 0) { - TEquals(null, copy); - } else { - T(copy !== null); - TEquals(true, compareObjects(doc, copy)); - } - } - } - - // case 2) user triggering the replication is not a reader (nor admin) of the source DB - dbPairsPrefixes = [ - { - source: "", - target: "" - }, - { - source: CouchDB.protocol + "joe:erly@" + host + "/", - target: "" - }, - { - source: "", - target: CouchDB.protocol + host + "/" - }, - { - source: CouchDB.protocol + "joe:erly@" + host + "/", - target: CouchDB.protocol + host + "/" - } - ]; - - for (i = 0; i < dbPairsPrefixes.length; i++) { - //usersDb.deleteDb(); - populateSourceDb(docs); - populateTargetDb([]); - - TEquals(true, sourceDb.setSecObj({ - admins: { - names: ["superman"], - roles: ["god"] - }, - readers: { - names: ["john"], - roles: ["secret"] - } - }).ok); - // check that we start OK (plus give time for sec object apply 2 avoid Heisenbugs) - for (j = 0; j < docs.length; j++) { - doc = docs[j]; - copy = targetDb.open(doc._id); - TEquals(null, copy); - } - - // do NOT run on modified server b/c we use the default DB - //run_on_modified_server(server_config, function() { - delete joeUserDoc._rev; - var prevJoeUserDoc = defaultUsersDb.open(joeUserDoc._id); - if (prevJoeUserDoc) { - joeUserDoc._rev = prevJoeUserDoc._rev; - } - if(i == 0) { - TEquals(true, defaultUsersDb.save(joeUserDoc).ok); - wait(5000); - } - - TEquals(true, CouchDB.login("joe", "erly").ok); - TEquals('joe', CouchDB.session().userCtx.name); - - try { - CouchDB.replicate(dbPairsPrefixes[i].source+sourceDb.name, dbPairsPrefixes[i].target+targetDb.name); - T(false, "should have raised an exception"); - } catch (x) { - // TODO: small thing: DB exists but is no more found - at least we have an exception, so it's rather minor - //TEquals("unauthorized", x.error); - T(!!x); - } - - TEquals(true, CouchDB.logout().ok); - //}); - - for (j = 0; j < docs.length; j++) { - doc = docs[j]; - copy = targetDb.open(doc._id); - TEquals(null, copy); - } - } - - - // COUCHDB-885 - push replication of a doc with attachment causes a - // conflict in the target. - populateSourceDb([]); - populateTargetDb([]); - - doc = { - _id: "doc1" - }; - TEquals(true, sourceDb.save(doc).ok); - - repResult = CouchDB.replicate( - sourceDb.name, - CouchDB.protocol + host + "/" + targetDb.name - ); - TEquals(true, repResult.ok); - TEquals(true, repResult.history instanceof Array); - TEquals(1, repResult.history.length); - TEquals(1, repResult.history[0].docs_written); - TEquals(1, repResult.history[0].docs_read); - TEquals(0, repResult.history[0].doc_write_failures); - - doc["_attachments"] = { - "hello.txt": { - "content_type": "text/plain", - "data": "aGVsbG8gd29ybGQ=" // base64:encode("hello world") - }, - "foo.dat": { - "content_type": "not/compressible", - "data": "aSBhbSBub3QgZ3ppcGVk" // base64:encode("i am not gziped") - } - }; - - TEquals(true, sourceDb.save(doc).ok); - repResult = CouchDB.replicate( - sourceDb.name, - CouchDB.protocol + host + "/" + targetDb.name - ); - TEquals(true, repResult.ok); - TEquals(true, repResult.history instanceof Array); - TEquals(2, repResult.history.length); - TEquals(1, repResult.history[0].docs_written); - TEquals(1, repResult.history[0].docs_read); - TEquals(0, repResult.history[0].doc_write_failures); - - copy = targetDb.open(doc._id, { - conflicts: true, deleted_conflicts: true, - attachments: true, att_encoding_info: true}); - T(copy !== null); - TEquals("undefined", typeof copy._conflicts); - TEquals("undefined", typeof copy._deleted_conflicts); - TEquals("text/plain", copy._attachments["hello.txt"]["content_type"]); - TEquals("aGVsbG8gd29ybGQ=", copy._attachments["hello.txt"]["data"]); - TEquals("gzip", copy._attachments["hello.txt"]["encoding"]); - TEquals("not/compressible", copy._attachments["foo.dat"]["content_type"]); - TEquals("aSBhbSBub3QgZ3ppcGVk", copy._attachments["foo.dat"]["data"]); - TEquals("undefined", typeof copy._attachments["foo.dat"]["encoding"]); - // end of test for COUCHDB-885 - - // Test for COUCHDB-1242 (reject non-string query_params) - // TODO: non-String params crash CouchDB alltogether - /* - try { - CouchDB.replicate(sourceDb, targetDb, { - body: { - filter : "mydesign/myfilter", - query_params : { - "maxvalue": 4 - } - } - }); - } catch (e) { - TEquals("bad_request", e.error); - } - */ - - - // Test that we can cancel a replication just by POSTing an object - // like {"replication_id": Id, "cancel": true}. The replication ID - // can be obtained from a continuous replication request response - // (_local_id field), from _active_tasks or from the log - populateSourceDb(makeDocs(1, 6)); - populateTargetDb([]); - - repResult = CouchDB.replicate( - CouchDB.protocol + host + "/" + sourceDb.name, - targetDb.name, - { - body: { - continuous: true, - create_target: true - } - } - ); - TEquals(true, repResult.ok); - TEquals('string', typeof repResult._local_id); - var repId = repResult._local_id; - - var task = getTask(repId, 3000); - T(task != null); - - TEquals(task["replication_id"], repId, "Replication found in _active_tasks"); - xhr = CouchDB.request( - "POST", "/_replicate", { - body: JSON.stringify({"replication_id": repId, "cancel": true}), - headers: {"Content-Type": "application/json"} - }); - TEquals(200, xhr.status, "Replication cancel request success"); - waitReplicationTaskStop(repId); - task = getTask(repId); - TEquals(null, task, "Replication was canceled"); - - xhr = CouchDB.request( - "POST", "/_replicate", { - body: JSON.stringify({"replication_id": repId, "cancel": true}), - headers: {"Content-Type": "application/json"} - }); - TEquals(404, xhr.status, "2nd replication cancel failed"); - - // Non-admin user can not cancel replications triggered by other users - var userDoc = CouchDB.prepareUserDoc({ - name: "tony", - roles: ["mafia"] - }, "soprano"); - // again, due doe _security not there, we use the default users DB - defaultUsersDb = new CouchDB("_users", {"X-Couch-Full-Commit":"false"}); - //usersDb = new CouchDB("test_suite_auth", {"X-Couch-Full-Commit":"false"}); - // (and leave the server alone) - /*server_config = [ - { - section: "couch_httpd_auth", - key: "authentication_db", - value: usersDb.name - } - ];*/ - - //run_on_modified_server(server_config, function() { - populateSourceDb(makeDocs(1, 6)); - populateTargetDb([]); - var prevUserDoc = defaultUsersDb.open(userDoc._id); - if(prevUserDoc) { - userDoc._rev = prevUserDoc._rev; - } - TEquals(true, defaultUsersDb.save(userDoc).ok); - - repResult = CouchDB.replicate( - CouchDB.protocol + host + "/" + sourceDb.name, - targetDb.name, - { - body: { - continuous: true - } - } - ); - TEquals(true, repResult.ok); - TEquals('string', typeof repResult._local_id); - - TEquals(true, CouchDB.login("tony", "soprano").ok); - TEquals('tony', CouchDB.session().userCtx.name); - - xhr = CouchDB.request( - "POST", "/_replicate", { - body: JSON.stringify({"replication_id": repResult._local_id, "cancel": true}), - headers: {"Content-Type": "application/json"} - }); - TEquals(401, xhr.status, "Unauthorized to cancel replication"); - TEquals("unauthorized", JSON.parse(xhr.responseText).error); - - TEquals(true, CouchDB.logout().ok); - - xhr = CouchDB.request( - "POST", "/_replicate", { - body: JSON.stringify({"replication_id": repResult._local_id, "cancel": true}), - headers: {"Content-Type": "application/json"} - }); - TEquals(200, xhr.status, "Authorized to cancel replication"); - //}); - - // cleanup - //usersDb.deleteDb(); - sourceDb.deleteDb(); - targetDb.deleteDb(); - // (not sure what this is - cleanup after 'file not found tests' poss. - not harmful anyway) - (new CouchDB("test_suite_db")).deleteDb(); -}; -- cgit v1.2.1 From d5ae1bf416a6b2a06575c3a1e7206261d857d345 Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Tue, 6 Aug 2019 11:25:54 -0400 Subject: Fix bash-ism in EUnit retry logic Bash has `let` but other shells might not have it. --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ed22509e2..f5d15d47b 100644 --- a/Makefile +++ b/Makefile @@ -176,7 +176,7 @@ eunit: couch if [ $$? -eq 0 ]; then \ break; \ else \ - let "tries=tries+1"; \ + tries=$$((tries+1)); \ [ $$tries -gt 2 ] && exit 1; \ fi \ done \ -- cgit v1.2.1 From 98a702c4a8784e3037f40f8d201fe0387a836428 Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Wed, 7 Aug 2019 01:52:47 -0400 Subject: Fix cpse_test_purge_replication eunit test It doesn't work on Jenkins but worked locally. Noticed that we started chttpd even though the clustered port was never used. Add a wait function in `db_url/1` to make sure to wait until the db is available via the HTTP interface before continuing. --- src/couch_pse_tests/src/cpse_test_purge_replication.erl | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/couch_pse_tests/src/cpse_test_purge_replication.erl b/src/couch_pse_tests/src/cpse_test_purge_replication.erl index 4a0822525..20dcc2f81 100644 --- a/src/couch_pse_tests/src/cpse_test_purge_replication.erl +++ b/src/couch_pse_tests/src/cpse_test_purge_replication.erl @@ -21,7 +21,7 @@ setup_all() -> - cpse_util:setup_all([mem3, fabric, chttpd, couch_replicator]). + cpse_util:setup_all([mem3, fabric, couch_replicator]). setup_each() -> @@ -205,4 +205,11 @@ make_shard(DbName) -> db_url(DbName) -> Addr = config:get("httpd", "bind_address", "127.0.0.1"), Port = mochiweb_socket_server:get(couch_httpd, port), - ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])). + Url = ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])), + test_util:wait(fun() -> + case test_request:get(?b2l(Url)) of + {ok, 200, _, _} -> ok; + _ -> wait + end + end), + Url. -- cgit v1.2.1 From 41e3461a3331ec61f27fa234c383f222652718fc Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Tue, 6 Aug 2019 16:48:24 -0400 Subject: Fix copy/paste errors in platform naming --- Jenkinsfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index c57574824..3c49b7672 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -246,7 +246,7 @@ pipeline { timeout(time: 90, unit: "MINUTES") } environment { - platform = 'xenial' + platform = 'bionic' } steps { sh 'rm -f apache-couchdb-*.tar.gz' @@ -300,7 +300,7 @@ pipeline { timeout(time: 90, unit: "MINUTES") } environment { - platform = 'jessie' + platform = 'stretch' } steps { sh 'rm -f apache-couchdb-*.tar.gz' @@ -327,7 +327,7 @@ pipeline { timeout(time: 90, unit: "MINUTES") } environment { - platform = 'jessie' + platform = 'aarch64-debian-stretch' } steps { sh 'rm -f apache-couchdb-*.tar.gz' -- cgit v1.2.1 From c4125a8635d0c15db67019e5fa5d84486eadc4d7 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Tue, 6 Aug 2019 16:49:52 -0400 Subject: Refactor using sequential stages, in workspace This work moves the builds back into the workspace, using a separate sub-directory per platform to avoid clashes between builds caused by JENKINS-57454. It also breaks out the steps into a pair of sequential stages within each each parallel stage of the build, which gives us better visibility into the progress of the build, and also sets us up to capture test results and expose them directly via Jenkins UI for faster problem determination. --- Jenkinsfile | 249 ++++++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 177 insertions(+), 72 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 3c49b7672..b2deb8ef8 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -14,35 +14,33 @@ // the License. // DRYing out the Jenkinsfile... -build_script = ''' -mkdir -p ${COUCHDB_IO_LOG_DIR} -echo -echo "Build CouchDB from tarball & test" -builddir=$(mktemp -d) -cd ${builddir} +build_and_test = ''' +mkdir -p ${COUCHDB_IO_LOG_DIR} +rm -rf build +mkdir build +cd build tar -xf ${WORKSPACE}/apache-couchdb-*.tar.gz cd apache-couchdb-* ./configure --with-curl make check || (build-aux/logfile-uploader.py && false) +''' -echo -echo "Build CouchDB packages" -cd ${builddir} +make_packages = ''' git clone https://github.com/apache/couchdb-pkg +rm -rf couchdb mkdir couchdb cp ${WORKSPACE}/apache-couchdb-*.tar.gz couchdb tar -xf ${WORKSPACE}/apache-couchdb-*.tar.gz -C couchdb cd couchdb-pkg make ${platform} PLATFORM=${platform} +''' -echo -echo "Cleanup & save for posterity" +cleanup_and_save = ''' rm -rf ${WORKSPACE}/pkgs/${platform} mkdir -p ${WORKSPACE}/pkgs/${platform} -mv ../rpmbuild/RPMS/$(arch)/*rpm ${WORKSPACE}/pkgs/${platform} || true -mv ../couchdb/*.deb ${WORKSPACE}/pkgs/${platform} || true -rm -rf ${builddir} ${COUCHDB_IO_LOG_DIR} +mv ${WORKSPACE}/rpmbuild/RPMS/$(arch)/*rpm ${WORKSPACE}/pkgs/${platform} || true +mv ${WORKSPACE}/couchdb/*.deb ${WORKSPACE}/pkgs/${platform} || true ''' pipeline { @@ -113,11 +111,7 @@ pipeline { // https://issues.jenkins-ci.org/browse/JENKINS-47962 // https://issues.jenkins-ci.org/browse/JENKINS-48050 - // The builddir stuff is to prevent all the builds from live syncing - // their build results to each other during the build, which ACTUALLY - // HAPPENS. Ugh. - - stage('make check') { + stage('Test and Package') { parallel { @@ -138,18 +132,22 @@ pipeline { mkdir -p $COUCHDB_IO_LOG_DIR # Build CouchDB from tarball & test - builddir=$(mktemp -d) - cd $builddir + mkdir build + cd build tar -xf $WORKSPACE/apache-couchdb-*.tar.gz cd apache-couchdb-* ./configure --with-curl gmake check || (build-aux/logfile-uploader.py && false) # No package build for FreeBSD at this time - rm -rf $builddir $COUCHDB_IO_LOG_DIR ''' } // withEnv } // steps + post { + cleanup { + sh 'rm -rf $COUCHDB_IO_LOG_DIR' + } + } // post } // stage FreeBSD stage('CentOS 6') { @@ -158,6 +156,8 @@ pipeline { image 'couchdbdev/centos-6-erlang-19.3.6:latest' alwaysPull true label 'ubuntu' + // this keeps builds landing on the same host from clashing with each other + customWorkspace pwd() + '/centos6' } } options { @@ -167,14 +167,28 @@ pipeline { environment { platform = 'centos6' } - steps { - sh 'rm -f apache-couchdb-*.tar.gz' - unstash 'tarball' - sh( script: build_script ) - } // steps + stages { + stage('Build from tarball & test') { + steps { + unstash 'tarball' + sh( script: build_and_test ) + } + } + stage('Build CouchDB packages') { + steps { + sh( script: make_packages ) + sh( script: cleanup_and_save ) + } + post { + success { + archiveArtifacts artifacts: 'pkgs/**', fingerprint: true + } + } + } + } // stages post { - success { - archiveArtifacts artifacts: 'pkgs/**', fingerprint: true + cleanup { + sh 'rm -rf ${WORKSPACE}/*' } } // post } // stage @@ -185,6 +199,7 @@ pipeline { image 'couchdbdev/centos-7-erlang-19.3.6:latest' alwaysPull true label 'ubuntu' + customWorkspace pwd() + '/centos7' } } options { @@ -194,14 +209,29 @@ pipeline { environment { platform = 'centos7' } - steps { - sh 'rm -f apache-couchdb-*.tar.gz' - unstash 'tarball' - sh( script: build_script ) - } // steps + stages { + stage('Build from tarball & test') { + steps { + unstash 'tarball' + sh( script: build_and_test ) + } + } + stage('Build CouchDB packages') { + steps { + unstash 'tarball' + sh( script: make_packages ) + sh( script: cleanup_and_save ) + } + post { + success { + archiveArtifacts artifacts: 'pkgs/**', fingerprint: true + } + } + } + } // stages post { - success { - archiveArtifacts artifacts: 'pkgs/**', fingerprint: true + cleanup { + sh 'rm -rf ${WORKSPACE}/*' } } // post } // stage @@ -212,6 +242,7 @@ pipeline { image 'couchdbdev/ubuntu-xenial-erlang-19.3.6:latest' alwaysPull true label 'ubuntu' + customWorkspace pwd() + '/xenial' } } options { @@ -221,14 +252,28 @@ pipeline { environment { platform = 'xenial' } - steps { - sh 'rm -f apache-couchdb-*.tar.gz' - unstash 'tarball' - sh( script: build_script ) - } // steps + stages { + stage('Build from tarball & test') { + steps { + unstash 'tarball' + sh( script: build_and_test ) + } + } + stage('Build CouchDB packages') { + steps { + sh( script: make_packages ) + sh( script: cleanup_and_save ) + } + post { + success { + archiveArtifacts artifacts: 'pkgs/**', fingerprint: true + } + } + } + } // stages post { - success { - archiveArtifacts artifacts: 'pkgs/**', fingerprint: true + cleanup { + sh 'rm -rf ${WORKSPACE}/*' } } // post } // stage @@ -239,6 +284,7 @@ pipeline { image 'couchdbdev/ubuntu-bionic-erlang-19.3.6:latest' alwaysPull true label 'ubuntu' + customWorkspace pwd() + '/bionic' } } options { @@ -248,14 +294,28 @@ pipeline { environment { platform = 'bionic' } - steps { - sh 'rm -f apache-couchdb-*.tar.gz' - unstash 'tarball' - sh( script: build_script ) - } // steps + stages { + stage('Build from tarball & test') { + steps { + unstash 'tarball' + sh( script: build_and_test ) + } + } + stage('Build CouchDB packages') { + steps { + sh( script: make_packages ) + sh( script: cleanup_and_save ) + } + post { + success { + archiveArtifacts artifacts: 'pkgs/**', fingerprint: true + } + } + } + } // stages post { - success { - archiveArtifacts artifacts: 'pkgs/**', fingerprint: true + cleanup { + sh 'rm -rf ${WORKSPACE}/*' } } // post } // stage @@ -266,6 +326,7 @@ pipeline { image 'couchdbdev/debian-jessie-erlang-19.3.6:latest' alwaysPull true label 'ubuntu' + customWorkspace pwd() + '/jessie' } } options { @@ -275,14 +336,28 @@ pipeline { environment { platform = 'jessie' } - steps { - sh 'rm -f apache-couchdb-*.tar.gz' - unstash 'tarball' - sh( script: build_script ) - } // steps + stages { + stage('Build from tarball & test') { + steps { + unstash 'tarball' + sh( script: build_and_test ) + } + } + stage('Build CouchDB packages') { + steps { + sh( script: make_packages ) + sh( script: cleanup_and_save ) + } + post { + success { + archiveArtifacts artifacts: 'pkgs/**', fingerprint: true + } + } + } + } // stages post { - success { - archiveArtifacts artifacts: 'pkgs/**', fingerprint: true + cleanup { + sh 'rm -rf ${WORKSPACE}/*' } } // post } // stage @@ -293,6 +368,7 @@ pipeline { image 'couchdbdev/debian-stretch-erlang-19.3.6:latest' alwaysPull true label 'ubuntu' + customWorkspace pwd() + '/stretch' } } options { @@ -302,14 +378,28 @@ pipeline { environment { platform = 'stretch' } - steps { - sh 'rm -f apache-couchdb-*.tar.gz' - unstash 'tarball' - sh( script: build_script ) - } // steps + stages { + stage('Build from tarball & test') { + steps { + unstash 'tarball' + sh( script: build_and_test ) + } + } + stage('Build CouchDB packages') { + steps { + sh( script: make_packages ) + sh( script: cleanup_and_save ) + } + post { + success { + archiveArtifacts artifacts: 'pkgs/**', fingerprint: true + } + } + } + } // stages post { - success { - archiveArtifacts artifacts: 'pkgs/**', fingerprint: true + cleanup { + sh 'rm -rf ${WORKSPACE}/*' } } // post } // stage @@ -320,6 +410,7 @@ pipeline { image 'couchdbdev/aarch64-debian-stretch-erlang-20.3.8.20:latest' alwaysPull true label 'arm' + customWorkspace pwd() + '/arm' } } options { @@ -329,14 +420,28 @@ pipeline { environment { platform = 'aarch64-debian-stretch' } - steps { - sh 'rm -f apache-couchdb-*.tar.gz' - unstash 'tarball' - sh( script: build_script ) - } // steps + stages { + stage('Build from tarball & test') { + steps { + unstash 'tarball' + sh( script: build_and_test ) + } + } + stage('Build CouchDB packages') { + steps { + sh( script: make_packages ) + sh( script: cleanup_and_save ) + } + post { + success { + archiveArtifacts artifacts: 'pkgs/**', fingerprint: true + } + } + } + } // stages post { - success { - archiveArtifacts artifacts: 'pkgs/**', fingerprint: true + cleanup { + sh 'rm -rf ${WORKSPACE}/*' } } // post } // stage @@ -442,7 +547,7 @@ pipeline { body: "Boo, we failed. ${env.RUN_DISPLAY_URL}" } cleanup { - sh 'rm -rf ${WORKSPACE}/*' + sh 'rm -rf ${COUCHDB_IO_LOG_DIR}' } } -- cgit v1.2.1 From 6b410aabc0e0cb740fd24e9a4478882aefea7220 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Tue, 6 Aug 2019 21:12:18 -0400 Subject: Capture EUnit and ExUnit test results for Jenkins --- Jenkinsfile | 38 ++++++++++++++++++++++++++++++++++++++ mix.exs | 1 + rebar.config.script | 2 +- 3 files changed, 40 insertions(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index b2deb8ef8..f37e9bf14 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -144,6 +144,9 @@ pipeline { } // withEnv } // steps post { + always { + junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml' + } cleanup { sh 'rm -rf $COUCHDB_IO_LOG_DIR' } @@ -173,6 +176,11 @@ pipeline { unstash 'tarball' sh( script: build_and_test ) } + post { + always { + junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml' + } + } } stage('Build CouchDB packages') { steps { @@ -215,6 +223,11 @@ pipeline { unstash 'tarball' sh( script: build_and_test ) } + post { + always { + junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml' + } + } } stage('Build CouchDB packages') { steps { @@ -258,6 +271,11 @@ pipeline { unstash 'tarball' sh( script: build_and_test ) } + post { + always { + junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml' + } + } } stage('Build CouchDB packages') { steps { @@ -300,6 +318,11 @@ pipeline { unstash 'tarball' sh( script: build_and_test ) } + post { + always { + junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml' + } + } } stage('Build CouchDB packages') { steps { @@ -342,6 +365,11 @@ pipeline { unstash 'tarball' sh( script: build_and_test ) } + post { + always { + junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml' + } + } } stage('Build CouchDB packages') { steps { @@ -384,6 +412,11 @@ pipeline { unstash 'tarball' sh( script: build_and_test ) } + post { + always { + junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml' + } + } } stage('Build CouchDB packages') { steps { @@ -426,6 +459,11 @@ pipeline { unstash 'tarball' sh( script: build_and_test ) } + post { + always { + junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml' + } + } } stage('Build CouchDB packages') { steps { diff --git a/mix.exs b/mix.exs index 2e213aeb1..43c8b6d93 100644 --- a/mix.exs +++ b/mix.exs @@ -35,6 +35,7 @@ defmodule CouchDBTest.Mixfile do # Run "mix help deps" to learn about dependencies. defp deps() do [ + {:junit_formatter, "~> 3.0", only: [:dev, :test, :integration]}, {:httpotion, "~> 3.0", only: [:dev, :test, :integration], runtime: false}, {:jiffy, path: Path.expand("src/jiffy", __DIR__)}, {:ibrowse, diff --git a/rebar.config.script b/rebar.config.script index 6445057e7..c38b6e235 100644 --- a/rebar.config.script +++ b/rebar.config.script @@ -156,7 +156,7 @@ AddConfig = [ {sub_dirs, SubDirs}, {lib_dirs, ["src"]}, {erl_opts, [{i, "../"} | ErlOpts]}, - {eunit_opts, [verbose]}, + {eunit_opts, [verbose, {report,{eunit_surefire,[{dir,"."}]}}]}, {plugins, [eunit_plugin]}, {dialyzer, [ {plt_location, local}, -- cgit v1.2.1 From 7f144950f14cd2d3a42888420fa99079e66f402f Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 7 Aug 2019 11:18:15 -0400 Subject: Avoid shebang length limits on jenkins The `pip3` and `nosetest` executables are scripts, and on jenkins the specified interpreter can exceed the 128 character length limit because of the deeply-nested workspace. Invoking these as modules seems the preferred workaround per pypa/pip#1773 --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index f5d15d47b..8f85ab8de 100644 --- a/Makefile +++ b/Makefile @@ -370,8 +370,8 @@ build-test: mango-test: devclean all @cd src/mango && \ python3 -m venv .venv && \ - .venv/bin/pip3 install -r requirements.txt - @cd src/mango && ../../dev/run -n 1 --admin=testuser:testpass .venv/bin/nosetests + .venv/bin/python3 -m pip install -r requirements.txt + @cd src/mango && ../../dev/run -n 1 --admin=testuser:testpass '.venv/bin/python3 -m nose' ################################################################################ # Developing -- cgit v1.2.1 From 2b3e2a02a5d7e106d70438173b1d9abaffc2dca1 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 7 Aug 2019 11:37:12 -0400 Subject: Configure environment for Elixir on ARM These settings are required to prevent Mix & Hex from trying to install packages into / on the ARM host. --- Jenkinsfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index f37e9bf14..f8f6a2f8b 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -457,7 +457,9 @@ pipeline { stage('Build from tarball & test') { steps { unstash 'tarball' - sh( script: build_and_test ) + withEnv(['MIX_HOME='+pwd(), 'HEX_HOME='+pwd()]) { + sh( script: build_and_test ) + } } post { always { -- cgit v1.2.1 From 0bdf74d6dcfdbca8f6b405e2f3e10e011e8d9191 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Tue, 6 Aug 2019 13:28:47 -0400 Subject: Add timeout for couch_db_split_tests The "Should copy local docs after split in four" test was occasionally timing out in CI. --- src/couch/test/eunit/couch_db_split_tests.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/couch/test/eunit/couch_db_split_tests.erl b/src/couch/test/eunit/couch_db_split_tests.erl index 7d2bb4006..c44f47ca3 100644 --- a/src/couch/test/eunit/couch_db_split_tests.erl +++ b/src/couch/test/eunit/couch_db_split_tests.erl @@ -16,6 +16,7 @@ -include_lib("couch/include/couch_db.hrl"). -define(RINGTOP, 2 bsl 31). +-define(TIMEOUT, 60). % seconds setup() -> @@ -68,7 +69,7 @@ should_split_shard({Desc, TotalDocs, Q}, DbName) -> TMap = make_targets(Ranges), DocsPerRange = TotalDocs div Q, PickFun = make_pickfun(DocsPerRange), - {Desc, ?_test(begin + {Desc, timeout, ?TIMEOUT, ?_test(begin {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun), ?assertEqual(ExpectSeq, UpdateSeq), maps:map(fun(Range, Name) -> -- cgit v1.2.1 From aab0c51d8dca39d6556d4636edec20d14f65e36d Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Tue, 6 Aug 2019 22:46:43 -0400 Subject: Move couch startup to a fixture This improves reliability because that time isn't charged to the test, and also speeds up the test. --- .../test/eunit/ddoc_cache_no_cache_test.erl | 76 +++++++++++----------- 1 file changed, 37 insertions(+), 39 deletions(-) diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl index 637a6e872..a1937a088 100644 --- a/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl +++ b/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl @@ -35,45 +35,43 @@ return_error(_DDocId) -> {error, timeout}. -start(Resp) -> - Ctx = ddoc_cache_tutil:start_couch(), +no_cache_test_() -> + { + "ddoc_cache no cache test", + { + setup, + fun ddoc_cache_tutil:start_couch/0, fun ddoc_cache_tutil:stop_couch/1, + { + foreachx, + fun setup/1, fun teardown/2, + [ + {fun ddoc/1, fun no_cache_open_ok_test/2}, + {fun not_found/1, fun no_cache_open_not_found_test/2}, + {fun return_error/1, fun no_cache_open_error_test/2} + ] + } + } + }. + +setup(Resp) -> meck:new(fabric), meck:expect(fabric, open_doc, fun(_, DDocId, _) -> Resp(DDocId) - end), - Ctx. - - -stop(Ctx) -> - meck:unload(), - ddoc_cache_tutil:stop_couch(Ctx). - - -no_cache_open_ok_test() -> - Ctx = start(fun ddoc/1), - try - Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>), - ?assertEqual(ddoc(<<"bar">>), Resp) - after - stop(Ctx) - end. - - -no_cache_open_not_found_test() -> - Ctx = start(fun not_found/1), - try - Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>), - ?assertEqual(not_found(<<"bar">>), Resp) - after - stop(Ctx) - end. - - -no_cache_open_error_test() -> - Ctx = start(fun return_error/1), - try - Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>), - ?assertEqual(return_error(<<"bar">>), Resp) - after - stop(Ctx) - end. + end). + +teardown(_, _) -> + meck:unload(). + +no_cache_open_ok_test(_, _) -> + Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>), + ?_assertEqual(ddoc(<<"bar">>), Resp). + + +no_cache_open_not_found_test(_, _) -> + Resp = ddoc_cache:open_doc(<<"foo">>, <<"baz">>), + ?_assertEqual(not_found(<<"baz">>), Resp). + + +no_cache_open_error_test(_, _) -> + Resp = ddoc_cache:open_doc(<<"foo">>, <<"bif">>), + ?_assertEqual(return_error(<<"bif">>), Resp). -- cgit v1.2.1 From 608caaf12904effc104fc86a8525eb51425e2311 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Mon, 29 Jul 2019 14:48:40 -0400 Subject: Increase default HTTP timeouts These are needed to avoid timeouts on ASF Jenkins build farm. The httpotion client uses ibrowse underneath, and ibrowse has three separate timeouts. We are configuring two of them here: the overall request timeout, and one that detects inactivity on the connection. We set them slightly differently just to be able to differentiate which one fired from the logs. --- test/elixir/lib/couch.ex | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/test/elixir/lib/couch.ex b/test/elixir/lib/couch.ex index 58581b2fd..3c43ab152 100644 --- a/test/elixir/lib/couch.ex +++ b/test/elixir/lib/couch.ex @@ -50,6 +50,14 @@ defmodule Couch do CouchDB library to power test suite. """ + # These constants are supplied to the underlying HTTP client and control + # how long we will wait before timing out a test. The inactivity timeout + # specifically fires during an active HTTP response and defaults to 10_000 + # if not specified. We're defining it to a different value than the + # request_timeout largely just so we know which timeout fired. + @request_timeout 60_000 + @inactivity_timeout 55_000 + def process_url("http://" <> _ = url) do url end @@ -179,13 +187,13 @@ defmodule Couch do Keyword.get( options, :timeout, - Application.get_env(:httpotion, :default_timeout, 5000) + Application.get_env(:httpotion, :default_timeout, @request_timeout) ) ib_options = Keyword.merge( Application.get_env(:httpotion, :default_ibrowse, []), - Keyword.get(options, :ibrowse, []) + Keyword.get(options, :ibrowse, [{:inactivity_timeout, @inactivity_timeout}]) ) follow_redirects = -- cgit v1.2.1 From dcae3e7a7ad3a1365f7294b0df488ec2990e4f8b Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 7 Aug 2019 17:49:05 -0400 Subject: Extend timeout for mrview_purge_docs_fabric --- .../test/eunit/couch_mrview_purge_docs_fabric_tests.erl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl index 213acac0b..a593f54e3 100644 --- a/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl +++ b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl @@ -17,7 +17,7 @@ -include_lib("mem3/include/mem3.hrl"). -include_lib("couch_mrview/include/couch_mrview.hrl"). --define(TIMEOUT, 1000). +-define(TIMEOUT, 60). % seconds setup() -> @@ -56,7 +56,7 @@ view_purge_fabric_test_() -> test_purge_verify_index(DbName) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin Docs1 = couch_mrview_test_util:make_docs(normal, 5), {ok, _} = fabric:update_docs(DbName, Docs1, [?ADMIN_CTX]), {ok, _} = fabric:update_doc( @@ -99,11 +99,11 @@ test_purge_verify_index(DbName) -> ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props2)), ?assertEqual(true, couch_mrview_index:verify_index_exists( ShardDbName, Props2)) - end). + end)}. test_purge_hook_before_compaction(DbName) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin Docs1 = couch_mrview_test_util:make_docs(normal, 5), {ok, _} = fabric:update_docs(DbName, Docs1, [?ADMIN_CTX]), {ok, _} = fabric:update_doc( @@ -198,7 +198,7 @@ test_purge_hook_before_compaction(DbName) -> {ok, #doc{body = {Props4}}} = get_local_purge_doc(DbName), ?assertEqual(2, couch_util:get_value(<<"purge_seq">>, Props4)) - end). + end)}. get_local_purge_doc(DbName) -> -- cgit v1.2.1 From 721f4fde975f045997e81bd33242c99f42d2c244 Mon Sep 17 00:00:00 2001 From: jiangph Date: Tue, 13 Aug 2019 16:42:21 +0800 Subject: fixup for dreyfus_fabric_cleanup:go/1 --- src/dreyfus/src/dreyfus_fabric_cleanup.erl | 8 ++++---- src/dreyfus/src/dreyfus_util.erl | 5 ++++- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/dreyfus/src/dreyfus_fabric_cleanup.erl b/src/dreyfus/src/dreyfus_fabric_cleanup.erl index b5e030db0..2840a2f2d 100644 --- a/src/dreyfus/src/dreyfus_fabric_cleanup.erl +++ b/src/dreyfus/src/dreyfus_fabric_cleanup.erl @@ -53,12 +53,12 @@ cleanup_local_purge_doc(DbName, ActiveSigs) -> end, [], LocalShards), DeadDirs = DirList -- ActiveDirs, - lists:foldl(fun(IdxDir) -> + lists:foreach(fun(IdxDir) -> Sig = dreyfus_util:get_signature_from_idxdir(IdxDir), case Sig of undefined -> ok; _ -> DocId = dreyfus_util:get_local_purge_doc_id(Sig), LocalShards = mem3:local_shards(DbName), - lists:foldl(fun(LS, _AccOuter) -> + lists:foreach(fun(LS) -> ShardDbName = LS#shard.name, {ok, ShardDb} = couch_db:open_int(ShardDbName, []), case couch_db:open_doc(ShardDb, DocId, []) of @@ -69,6 +69,6 @@ cleanup_local_purge_doc(DbName, ActiveSigs) -> ok end, couch_db:close(ShardDb) - end, [], LocalShards) + end, LocalShards) end - end, [], DeadDirs). + end, DeadDirs). diff --git a/src/dreyfus/src/dreyfus_util.erl b/src/dreyfus/src/dreyfus_util.erl index ae3133e7d..6832299db 100644 --- a/src/dreyfus/src/dreyfus_util.erl +++ b/src/dreyfus/src/dreyfus_util.erl @@ -332,7 +332,10 @@ get_local_purge_doc_id(Sig) -> get_signature_from_idxdir(IdxDir) -> IdxDirList = filename:split(IdxDir), Sig = lists:last(IdxDirList), - case [Ch || Ch <- Sig, not (((Ch >= $0) and (Ch =< $9)) + Sig2 = if not is_binary(Sig) -> Sig; true -> + binary_to_list(Sig) + end, + case [Ch || Ch <- Sig2, not (((Ch >= $0) and (Ch =< $9)) orelse ((Ch >= $a) and (Ch =< $f)) orelse ((Ch >= $A) and (Ch =< $F)))] == [] of true -> Sig; -- cgit v1.2.1 From 36fd9dab64814475221e72022bd9227cfbd034d2 Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Tue, 13 Aug 2019 16:53:44 -0400 Subject: Fix replication rescheduling Running < MaxJobs corner case Previously, when total number of replication jobs exceed `MaxJobs`, if some jobs crashed, additional jobs didn't start immediately to bring the running total up to the `MaxJobs` limit. Then, during rescheduling, the `Running == MaxJobs, Pending > 0` guard would fail and jobs would not rotate. In other words, if at least one job crashed, rotation didn't happen. The fix is to simplify the rotation logic to handle the `Running < MaxJobs` case. First, up to `Churn` number of jobs are stopped, then enough jobs are started to reach the `MaxJobs` limit. The rotation logic case handles the `start_pending_jobs/3` case so there is no need to call that separately before rotation happens. --- .../src/couch_replicator_scheduler.erl | 82 +++++++++++++--------- 1 file changed, 49 insertions(+), 33 deletions(-) diff --git a/src/couch_replicator/src/couch_replicator_scheduler.erl b/src/couch_replicator/src/couch_replicator_scheduler.erl index e3dbede83..7fe417a53 100644 --- a/src/couch_replicator/src/couch_replicator_scheduler.erl +++ b/src/couch_replicator/src/couch_replicator_scheduler.erl @@ -494,7 +494,10 @@ start_jobs(Count, State) -> -spec stop_jobs(non_neg_integer(), boolean(), #state{}) -> non_neg_integer(). -stop_jobs(Count, IsContinuous, State) -> +stop_jobs(Count, _, _) when is_integer(Count), Count =< 0 -> + 0; + +stop_jobs(Count, IsContinuous, State) when is_integer(Count) -> Running0 = running_jobs(), ContinuousPred = fun(Job) -> is_continuous(Job) =:= IsContinuous end, Running1 = lists:filter(ContinuousPred, Running0), @@ -723,35 +726,25 @@ reset_job_process(#job{} = Job) -> -spec reschedule(#state{}) -> ok. reschedule(State) -> - Running = running_job_count(), - Pending = pending_job_count(), - stop_excess_jobs(State, Running), - start_pending_jobs(State, Running, Pending), - rotate_jobs(State, Running, Pending), - update_running_jobs_stats(State#state.stats_pid), - ok. + StopCount = stop_excess_jobs(State, running_job_count()), + rotate_jobs(State, StopCount), + update_running_jobs_stats(State#state.stats_pid). --spec stop_excess_jobs(#state{}, non_neg_integer()) -> ok. +-spec stop_excess_jobs(#state{}, non_neg_integer()) -> non_neg_integer(). stop_excess_jobs(State, Running) -> #state{max_jobs=MaxJobs} = State, - StopCount = Running - MaxJobs, - if StopCount =< 0 -> ok; true -> - Stopped = stop_jobs(StopCount, true, State), - OneshotLeft = StopCount - Stopped, - if OneshotLeft =< 0 -> ok; true -> - stop_jobs(OneshotLeft, false, State), - ok - end - end. + StopCount = max(0, Running - MaxJobs), + Stopped = stop_jobs(StopCount, true, State), + OneshotLeft = StopCount - Stopped, + stop_jobs(OneshotLeft, false, State), + StopCount. start_pending_jobs(State) -> - start_pending_jobs(State, running_job_count(), pending_job_count()). - - -start_pending_jobs(State, Running, Pending) -> #state{max_jobs=MaxJobs} = State, + Running = running_job_count(), + Pending = pending_job_count(), if Running < MaxJobs, Pending > 0 -> start_jobs(MaxJobs - Running, State); true -> @@ -759,13 +752,19 @@ start_pending_jobs(State, Running, Pending) -> end. --spec rotate_jobs(#state{}, non_neg_integer(), non_neg_integer()) -> ok. -rotate_jobs(State, Running, Pending) -> +-spec rotate_jobs(#state{}, non_neg_integer()) -> ok. +rotate_jobs(State, ChurnSoFar) -> #state{max_jobs=MaxJobs, max_churn=MaxChurn} = State, - if Running == MaxJobs, Pending > 0 -> - RotateCount = lists:min([Pending, Running, MaxChurn]), - StopCount = stop_jobs(RotateCount, true, State), - start_jobs(StopCount, State); + Running = running_job_count(), + Pending = pending_job_count(), + % Reduce MaxChurn by the number of already stopped jobs in the + % current rescheduling cycle. + Churn = max(0, MaxChurn - ChurnSoFar), + if Running =< MaxJobs -> + StopCount = lists:min([Pending, Running, Churn]), + stop_jobs(StopCount, true, State), + StartCount = max(0, MaxJobs - running_job_count()), + start_jobs(StartCount, State); true -> ok end. @@ -1047,6 +1046,7 @@ scheduler_test_() -> t_excess_prefer_continuous_first(), t_stop_oldest_first(), t_start_oldest_first(), + t_jobs_churn_even_if_not_all_max_jobs_are_running(), t_dont_stop_if_nothing_pending(), t_max_churn_limits_number_of_rotated_jobs(), t_existing_jobs(), @@ -1056,7 +1056,7 @@ scheduler_test_() -> t_rotate_continuous_only_if_mixed(), t_oneshot_dont_get_starting_priority(), t_oneshot_will_hog_the_scheduler(), - t_if_excess_is_trimmed_rotation_doesnt_happen(), + t_if_excess_is_trimmed_rotation_still_happens(), t_if_transient_job_crashes_it_gets_removed(), t_if_permanent_job_crashes_it_stays_in_ets(), t_job_summary_running(), @@ -1177,10 +1177,10 @@ t_stop_oldest_first() -> continuous_running(5) ], setup_jobs(Jobs), - reschedule(mock_state(2)), + reschedule(mock_state(2, 1)), ?assertEqual({2, 1}, run_stop_count()), ?assertEqual([4], jobs_stopped()), - reschedule(mock_state(1)), + reschedule(mock_state(1, 1)), ?assertEqual([7], jobs_running()) end). @@ -1192,6 +1192,22 @@ t_start_oldest_first() -> ?assertEqual({1, 2}, run_stop_count()), ?assertEqual([2], jobs_running()), reschedule(mock_state(2)), + ?assertEqual({2, 1}, run_stop_count()), + % After rescheduling with max_jobs = 2, 2 was stopped and 5, 7 should + % be running. + ?assertEqual([2], jobs_stopped()) + end). + + +t_jobs_churn_even_if_not_all_max_jobs_are_running() -> + ?_test(begin + setup_jobs([ + continuous_running(7), + continuous(2), + continuous(5) + ]), + reschedule(mock_state(2, 2)), + ?assertEqual({2, 1}, run_stop_count()), ?assertEqual([7], jobs_stopped()) end). @@ -1289,7 +1305,7 @@ t_oneshot_will_hog_the_scheduler() -> end). -t_if_excess_is_trimmed_rotation_doesnt_happen() -> +t_if_excess_is_trimmed_rotation_still_happens() -> ?_test(begin Jobs = [ continuous(1), @@ -1298,7 +1314,7 @@ t_if_excess_is_trimmed_rotation_doesnt_happen() -> ], setup_jobs(Jobs), reschedule(mock_state(1)), - ?assertEqual([3], jobs_running()) + ?assertEqual([1], jobs_running()) end). -- cgit v1.2.1 From 1db0294eb1093066773760b75a200d99aa453be8 Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Thu, 15 Aug 2019 10:07:07 -0300 Subject: Support map childspecs in couch_epi supervisor's children replacement --- src/couch_epi/src/couch_epi_sup.erl | 5 +++- src/couch_epi/test/eunit/couch_epi_basic_test.erl | 34 +++++++++++++++-------- 2 files changed, 27 insertions(+), 12 deletions(-) diff --git a/src/couch_epi/src/couch_epi_sup.erl b/src/couch_epi/src/couch_epi_sup.erl index 218db5445..477cbe79e 100644 --- a/src/couch_epi/src/couch_epi_sup.erl +++ b/src/couch_epi/src/couch_epi_sup.erl @@ -136,4 +136,7 @@ modules(#couch_epi_spec{kind = data_subscriptions, behaviour = Module}) -> merge([], Children) -> Children; merge([{Id, _, _, _, _, _} = Spec | Rest], Children) -> - merge(Rest, lists:keystore(Id, 1, Children, Spec)). + merge(Rest, lists:keystore(Id, 1, Children, Spec)); +merge([#{id := Id} = Spec | Rest], Children) -> + Replace = fun(#{id := I}) when I == Id -> Spec; (E) -> E end, + merge(Rest, lists:map(Replace, Children)). diff --git a/src/couch_epi/test/eunit/couch_epi_basic_test.erl b/src/couch_epi/test/eunit/couch_epi_basic_test.erl index 587d1564e..5ba6c9f87 100644 --- a/src/couch_epi/test/eunit/couch_epi_basic_test.erl +++ b/src/couch_epi/test/eunit/couch_epi_basic_test.erl @@ -67,7 +67,9 @@ processes() -> [ {?MODULE, [?CHILD(extra_process, worker)]}, {?MODULE, [{to_replace, {new, start_link, [bar]}, - permanent, 5000, worker, [bar]}]} + permanent, 5000, worker, [bar]}]}, + {?MODULE, [#{id => to_replace_map, + start => {new, start_link, [bar]}, modules => [bar]}]} ]. @@ -95,9 +97,10 @@ parse_child_id(Id) -> -include_lib("eunit/include/eunit.hrl"). basic_test() -> - Expected = lists:sort([ + Expected = [ {extra_process, [], [extra_process]}, {to_replace, [bar], [bar]}, + {to_replace_map, [bar], [bar]}, {{my_service, providers}, [couch_epi_functions_gen_my_service], [couch_epi_codechange_monitor, couch_epi_functions_gen_my_service, @@ -114,18 +117,23 @@ basic_test() -> [couch_epi_data_gen_test_app_descriptions], lists:sort([couch_epi_codechange_monitor, couch_epi_data_gen_test_app_descriptions, ?MODULE])} - ]), + ], - ToReplace = {to_replace, - {old, start_link, [foo]}, permanent, 5000, worker, [foo]}, - Children = lists:sort(couch_epi_sup:plugin_childspecs( - ?MODULE, [?MODULE], [ToReplace])), - Results = [ - {parse_child_id(Id), Args, lists:sort(Modules)} - || {Id, {_M, _F, Args}, _, _, _, Modules} <- Children + ToReplace = [ + {to_replace, {old, start_link, [foo]}, permanent, 5000, worker, [foo]}, + #{id => to_replace_map, start => {old, start_link, [foo]}} ], + Children = lists:sort(couch_epi_sup:plugin_childspecs( + ?MODULE, [?MODULE], ToReplace)), + + Results = lists:map(fun + ({Id, {_M, _F, Args}, _, _, _, Modules}) -> + {parse_child_id(Id), Args, lists:sort(Modules)}; + (#{id := Id, start := {_M, _F, Args}, modules := Modules}) -> + {parse_child_id(Id), Args, lists:sort(Modules)} + end, Children), - Tests = lists:zip(Expected, Results), + Tests = lists:zip(lists:sort(Expected), lists:sort(Results)), [?assertEqual(Expect, Result) || {Expect, Result} <- Tests], ExpectedChild = {to_replace, {new, start_link, [bar]}, @@ -134,4 +142,8 @@ basic_test() -> ExpectedChild, lists:keyfind(to_replace, 1, Children)), + ExpectedMapChildSpec = #{id => to_replace_map, + start => {new, start_link, [bar]}, modules => [bar]}, + [MapChildSpec] = [E || #{id := to_replace_map} = E <- Children], + ?assertEqual(ExpectedMapChildSpec, MapChildSpec), ok. -- cgit v1.2.1 From 69d15cd10652de1895b7750dd86b37ba04267f1d Mon Sep 17 00:00:00 2001 From: ILYA Khlopotov Date: Wed, 7 Aug 2019 16:45:15 +0000 Subject: Refactor fabric:cleanup_index_files Previous implementation assembled a regexp by concatenating active signatures. The approach caused regexp to exceed system limit in the case of huge number of them. --- src/couch/test/exunit/fabric_test.exs | 101 ++++++++++++++++++++++++++++++++++ src/fabric/src/fabric.erl | 25 +++++---- test/elixir/lib/setup/common.ex | 5 ++ 3 files changed, 121 insertions(+), 10 deletions(-) create mode 100644 src/couch/test/exunit/fabric_test.exs diff --git a/src/couch/test/exunit/fabric_test.exs b/src/couch/test/exunit/fabric_test.exs new file mode 100644 index 000000000..bdb84e9a2 --- /dev/null +++ b/src/couch/test/exunit/fabric_test.exs @@ -0,0 +1,101 @@ +defmodule Couch.Test.Fabric do + use Couch.Test.ExUnit.Case + alias Couch.Test.Utils + + alias Couch.Test.Setup + + alias Couch.Test.Setup.Step + + import Couch.DBTest + + import Utils + + @admin {:user_ctx, user_ctx(roles: ["_admin"])} + + def with_db(context, setup) do + setup = + setup + |> Setup.Common.with_db() + |> Setup.run() + + context = + Map.merge(context, %{ + db_name: setup |> Setup.get(:db) |> Step.Create.DB.name() + }) + + {context, setup} + end + + describe "Fabric miscellaneous API" do + @describetag setup: &__MODULE__.with_db/2 + test "Get inactive_index_files", ctx do + {:ok, _rev} = update_doc(ctx.db_name, %{"_id" => "doc1"}) + + design_doc = %{ + "_id" => "_design/test", + "language" => "javascript", + "views" => %{ + "view" => %{ + "map" => "function(doc){emit(doc._id, doc._rev)}" + } + } + } + + {:ok, rev1} = update_doc(ctx.db_name, design_doc) + wait_sig_update(ctx.db_name, "test", "") + prev_active = get_active_sig(ctx.db_name, "test") + + updated_design_doc = + put_in(design_doc, ["views", "view", "map"], "function(doc){emit(doc._id, null)}") + + {:ok, rev2} = + update_doc( + ctx.db_name, + Map.put(updated_design_doc, "_rev", rev1) + ) + + assert rev1 != rev2 + wait_sig_update(ctx.db_name, "test", prev_active) + + {:ok, info} = :fabric.get_view_group_info(ctx.db_name, "_design/test") + active = info[:signature] + + files = Enum.map(:fabric.inactive_index_files(ctx.db_name), &List.to_string/1) + + assert [] != files, "We should have some inactive" + + assert not Enum.any?(files, fn + file_path -> String.contains?(file_path, active) + end), + "We are not suppose to return active views" + + assert Enum.all?(files, fn + file_path -> String.contains?(file_path, prev_active) + end), + "We expect all files to contain previous active signature" + end + end + + defp update_doc(db_name, body) do + json_body = :jiffy.decode(:jiffy.encode(body)) + + case :fabric.update_doc(db_name, json_body, [@admin]) do + {:ok, rev} -> + {:ok, :couch_doc.rev_to_str(rev)} + + error -> + error + end + end + + defp get_active_sig(db_name, ddoc_id) do + {:ok, info} = :fabric.get_view_group_info(db_name, "_design/#{ddoc_id}") + info[:signature] + end + + defp wait_sig_update(db_name, ddoc_id, prev_active) do + retry_until(fn -> + get_active_sig(db_name, ddoc_id) != prev_active + end) + end +end diff --git a/src/fabric/src/fabric.erl b/src/fabric/src/fabric.erl index 6d04184e6..d98ffc978 100644 --- a/src/fabric/src/fabric.erl +++ b/src/fabric/src/fabric.erl @@ -36,7 +36,8 @@ % miscellany -export([design_docs/1, reset_validation_funs/1, cleanup_index_files/0, - cleanup_index_files/1, cleanup_index_files_all_nodes/1, dbname/1]). + cleanup_index_files/1, cleanup_index_files_all_nodes/1, dbname/1, + inactive_index_files/1]). -include_lib("fabric/include/fabric.hrl"). @@ -503,26 +504,30 @@ cleanup_index_files() -> %% @doc clean up index files for a specific db -spec cleanup_index_files(dbname()) -> ok. cleanup_index_files(DbName) -> + lists:foreach(fun(File) -> + file:delete(File) + end, inactive_index_files(DbName)). + +%% @doc inactive index files for a specific db +-spec inactive_index_files(dbname()) -> ok. +inactive_index_files(DbName) -> {ok, DesignDocs} = fabric:design_docs(DbName), - ActiveSigs = lists:map(fun(#doc{id = GroupId}) -> + ActiveSigs = maps:from_list(lists:map(fun(#doc{id = GroupId}) -> {ok, Info} = fabric:get_view_group_info(DbName, GroupId), - binary_to_list(couch_util:get_value(signature, Info)) - end, [couch_doc:from_json_obj(DD) || DD <- DesignDocs]), + {binary_to_list(couch_util:get_value(signature, Info)), nil} + end, [couch_doc:from_json_obj(DD) || DD <- DesignDocs])), FileList = lists:flatmap(fun(#shard{name = ShardName}) -> IndexDir = couch_index_util:index_dir(mrview, ShardName), filelib:wildcard([IndexDir, "/*"]) end, mem3:local_shards(dbname(DbName))), - DeleteFiles = if ActiveSigs =:= [] -> FileList; true -> - {ok, RegExp} = re:compile([$(, string:join(ActiveSigs, "|"), $)]), + if ActiveSigs =:= [] -> FileList; true -> lists:filter(fun(FilePath) -> - re:run(FilePath, RegExp, [{capture, none}]) == nomatch + not maps:is_key(filename:basename(FilePath, ".view"), ActiveSigs) end, FileList) - end, - [file:delete(File) || File <- DeleteFiles], - ok. + end. %% @doc clean up index files for a specific db on all nodes -spec cleanup_index_files_all_nodes(dbname()) -> [reference()]. diff --git a/test/elixir/lib/setup/common.ex b/test/elixir/lib/setup/common.ex index 3b59e9476..e81f109c9 100644 --- a/test/elixir/lib/setup/common.ex +++ b/test/elixir/lib/setup/common.ex @@ -19,4 +19,9 @@ defmodule Couch.Test.Setup.Common do |> Step.Create.DB.new(:db) end + def with_db(setup) do + setup + |> Step.Start.new(:start, extra_apps: [:fabric]) + |> Step.Create.DB.new(:db) + end end \ No newline at end of file -- cgit v1.2.1 From f2724d3fe19f58bca8c81b2ae610c6dd1c7e44c8 Mon Sep 17 00:00:00 2001 From: ILYA Khlopotov Date: Mon, 12 Aug 2019 05:18:55 -0700 Subject: Do not fail 'dev/run' on connection close Sometimes admin party mode causes the 'dev/run' to fail with ``` http.client.RemoteDisconnected: Remote end closed connection without response ``` This PR makes this use case more robust. --- dev/run | 41 +++++++++++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/dev/run b/dev/run index 60e7d5c35..10351eb86 100755 --- a/dev/run +++ b/dev/run @@ -690,27 +690,37 @@ def generate_cookie(): def cluster_setup_with_admin_party(ctx): + connect_nodes(ctx) + host, port = "127.0.0.1", cluster_port(ctx, 1) + create_system_databases(host, port) + + +def connect_nodes(ctx): host, port = "127.0.0.1", backend_port(ctx, 1) for node in ctx["nodes"]: - body = "{}" - conn = httpclient.HTTPConnection(host, port) - conn.request("PUT", "/_nodes/%s@127.0.0.1" % node, body) - resp = conn.getresponse() - if resp.status not in (200, 201, 202, 409): - print(("Failed to join %s into cluster: %s" % (node, resp.read()))) - sys.exit(1) - create_system_databases(host, cluster_port(ctx, 1)) + path = "/_nodes/%s@127.0.0.1" % node + try_request( + host, + port, + "PUT", + path, + (200, 201, 202, 409), + body="{}", + error="Failed to join %s into cluster:\n" % node, + ) -def try_request(host, port, meth, path, success_codes, retries=10, retry_dt=1): +def try_request( + host, port, meth, path, success_codes, body=None, retries=10, retry_dt=1, error="" +): while True: conn = httpclient.HTTPConnection(host, port) - conn.request(meth, path) + conn.request(meth, path, body=body) resp = conn.getresponse() if resp.status in success_codes: return resp.status, resp.read() elif retries <= 0: - assert resp.status in success_codes, resp.read() + assert resp.status in success_codes, "%s%s" % (error, resp.read()) retries -= 1 time.sleep(retry_dt) @@ -721,7 +731,14 @@ def create_system_databases(host, port): conn.request("HEAD", "/" + dbname) resp = conn.getresponse() if resp.status == 404: - try_request(host, port, "PUT", "/" + dbname, (201, 202, 412)) + try_request( + host, + port, + "PUT", + "/" + dbname, + (201, 202, 412), + error="Failed to create '%s' database:\n" % dbname, + ) @log( -- cgit v1.2.1 From 119ca034abed834cf9ad400310dbe397c6d8ea9e Mon Sep 17 00:00:00 2001 From: ILYA Khlopotov Date: Fri, 16 Aug 2019 12:24:01 +0000 Subject: Call :meck.unload() automatically after every test --- test/elixir/lib/ex_unit.ex | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/elixir/lib/ex_unit.ex b/test/elixir/lib/ex_unit.ex index 7abba07ef..8503cd991 100644 --- a/test/elixir/lib/ex_unit.ex +++ b/test/elixir/lib/ex_unit.ex @@ -35,6 +35,10 @@ defmodule Couch.Test.ExUnit.Case do end setup context do + on_exit(fn -> + :meck.unload() + end) + case context do %{:setup => setup_fun} -> {:ok, Setup.setup(context, setup_fun)} -- cgit v1.2.1 From 54fe9525b92e0b0ba94692d637a1d711f31b1621 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Fri, 9 Aug 2019 16:34:43 -0400 Subject: Ensure EUnit inherits appropriate env vars Omitting COUCHDB_VERSION caused the EUnit build of the replicator to have a corrupted User-Agent header. It tried to construct a version using git, but when building from a release tarball there is no git repo so the UA had a git error message in it. This error message contained a newline, which plausibly confused some part of the HTTP stack and caused replicator HTTP requests to hang. Related to #2098. --- Makefile | 4 ++-- Makefile.win | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 8f85ab8de..f09ae326a 100644 --- a/Makefile +++ b/Makefile @@ -168,11 +168,11 @@ eunit: export BUILDDIR = $(shell pwd) eunit: export ERL_AFLAGS = -config $(shell pwd)/rel/files/eunit.config eunit: export COUCHDB_QUERY_SERVER_JAVASCRIPT = $(shell pwd)/bin/couchjs $(shell pwd)/share/server/main.js eunit: couch - @$(REBAR) setup_eunit 2> /dev/null + @COUCHDB_VERSION=$(COUCHDB_VERSION) COUCHDB_GIT_SHA=$(COUCHDB_GIT_SHA) $(REBAR) setup_eunit 2> /dev/null @for dir in $(subdirs); do \ tries=0; \ while true; do \ - $(REBAR) -r eunit $(EUNIT_OPTS) apps=$$dir ; \ + COUCHDB_VERSION=$(COUCHDB_VERSION) COUCHDB_GIT_SHA=$(COUCHDB_GIT_SHA) $(REBAR) -r eunit $(EUNIT_OPTS) apps=$$dir ; \ if [ $$? -eq 0 ]; then \ break; \ else \ diff --git a/Makefile.win b/Makefile.win index a5e23d498..eda27a02a 100644 --- a/Makefile.win +++ b/Makefile.win @@ -141,8 +141,8 @@ eunit: export ERL_AFLAGS = $(shell echo "-config rel/files/eunit.config") eunit: export BUILDDIR = $(shell echo %cd%) eunit: export COUCHDB_QUERY_SERVER_JAVASCRIPT = $(shell echo %cd%)/bin/couchjs $(shell echo %cd%)/share/server/main.js eunit: couch - @$(REBAR) setup_eunit 2> nul - @$(REBAR) -r eunit $(EUNIT_OPTS) + @set COUCHDB_VERSION=$(COUCHDB_VERSION) && set COUCHDB_GIT_SHA=$(COUCHDB_GIT_SHA) && $(REBAR) setup_eunit 2> nul + @set COUCHDB_VERSION=$(COUCHDB_VERSION) && set COUCHDB_GIT_SHA=$(COUCHDB_GIT_SHA) && $(REBAR) -r eunit $(EUNIT_OPTS) .PHONY: exunit # target: exunit - Run ExUnit tests -- cgit v1.2.1 From e17f93393a3e6e1948f306786bd85889aec1805b Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 14 Aug 2019 12:07:38 -0400 Subject: Don't try to publish trusty packages We aren't building them anymore. --- Jenkinsfile | 2 -- 1 file changed, 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index f8f6a2f8b..342ac6c83 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -529,8 +529,6 @@ pipeline { reprepro -b couchdb-pkg/repo includedeb jessie pkgs/jessie/*.deb cp js/debian-stretch/*.deb pkgs/stretch reprepro -b couchdb-pkg/repo includedeb stretch pkgs/stretch/*.deb - cp js/ubuntu-trusty/*.deb pkgs/trusty - reprepro -b couchdb-pkg/repo includedeb trusty pkgs/trusty/*.deb cp js/ubuntu-xenial/*.deb pkgs/xenial reprepro -b couchdb-pkg/repo includedeb xenial pkgs/xenial/*.deb cp js/ubuntu-bionic/*.deb pkgs/bionic -- cgit v1.2.1 From 964afcdd7ad627b5f0e0f46653f48a1963c8001d Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 14 Aug 2019 13:19:23 -0400 Subject: Extend timeouts for couch_bt_engine_upgrade_tests Jenkins flaked out on one of these today. --- src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl b/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl index 3a516f8f7..a2a972caf 100644 --- a/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl +++ b/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl @@ -15,6 +15,7 @@ -include_lib("couch/include/couch_eunit.hrl"). -include_lib("couch/include/couch_db.hrl"). +-define(TIMEOUT, 60). % seconds setup(_) -> Ctx = test_util:start_couch(), @@ -63,7 +64,7 @@ upgrade_test_() -> t_upgrade_without_purge_req(VersionFrom, {_Ctx, _NewPaths}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin % There are three documents in the fixture % db with zero purge entries DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom) @@ -99,11 +100,11 @@ t_upgrade_without_purge_req(VersionFrom, {_Ctx, _NewPaths}) -> ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)), ?assertEqual(1, couch_db:get_purge_seq(Db)) end) - end). + end)}. t_upgrade_with_1_purge_req(VersionFrom, {_Ctx, _NewPaths}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin % There are two documents in the fixture database % with a single purge entry DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom) @@ -140,11 +141,11 @@ t_upgrade_with_1_purge_req(VersionFrom, {_Ctx, _NewPaths}) -> ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)), ?assertEqual(2, couch_db:get_purge_seq(Db)) end) - end). + end)}. t_upgrade_with_N_purge_req(VersionFrom, {_Ctx, _NewPaths}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin % There is one document in the fixture database % with two docs that have been purged DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom) @@ -179,11 +180,11 @@ t_upgrade_with_N_purge_req(VersionFrom, {_Ctx, _NewPaths}) -> ?assertEqual({ok, 2}, couch_db:get_doc_count(Db)), ?assertEqual(3, couch_db:get_purge_seq(Db)) end) - end). + end)}. t_upgrade_with_1_purge_req_for_2_docs(VersionFrom, {_Ctx, _NewPaths}) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin % There are two documents (Doc4 and Doc5) in the fixture database % with three docs (Doc1, Doc2 and Doc3) that have been purged, and % with one purge req for Doc1 and another purge req for Doc 2 and Doc3 @@ -219,7 +220,7 @@ t_upgrade_with_1_purge_req_for_2_docs(VersionFrom, {_Ctx, _NewPaths}) -> ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)), ?assertEqual(4, couch_db:get_purge_seq(Db)) end) - end). + end)}. save_doc(DbName, Json) -> -- cgit v1.2.1 From 9ca6ab2d093869844c6aa591423e82f2d6988a9c Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 14 Aug 2019 14:33:53 -0400 Subject: Extend timeouts for chttpd_db_test suite The last 9 tests take a few hundred milliseconds locally and flaked a bit on Jenkins. For consistency's sake we bump the timeout from 5 to 60 seconds across the board. --- src/chttpd/test/eunit/chttpd_db_test.erl | 85 ++++++++++++++++---------------- 1 file changed, 43 insertions(+), 42 deletions(-) diff --git a/src/chttpd/test/eunit/chttpd_db_test.erl b/src/chttpd/test/eunit/chttpd_db_test.erl index 2708aa033..17250199a 100644 --- a/src/chttpd/test/eunit/chttpd_db_test.erl +++ b/src/chttpd/test/eunit/chttpd_db_test.erl @@ -23,6 +23,7 @@ -define(DESTHEADER2, {"Destination", "foo%2Fbar%23baz%3Fpow%3Afiz"}). -define(FIXTURE_TXT, ?ABS_PATH(?FILE)). -define(i2l(I), integer_to_list(I)). +-define(TIMEOUT, 60). % seconds setup() -> Hashed = couch_passwords:hash_admin_password(?PASS), @@ -88,7 +89,7 @@ all_test_() -> should_return_ok_true_on_bulk_update(Url) -> - ?_assertEqual(true, + {timeout, ?TIMEOUT, ?_assertEqual(true, begin {ok, _, _, Body} = create_doc(Url, "testdoc"), {Json} = ?JSON_DECODE(Body), @@ -99,27 +100,27 @@ should_return_ok_true_on_bulk_update(Url) -> ResultJson = ?JSON_DECODE(ResultBody), {InnerJson} = lists:nth(1, ResultJson), couch_util:get_value(<<"ok">>, InnerJson, undefined) - end). + end)}. should_return_ok_true_on_ensure_full_commit(Url0) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin Url = Url0 ++ "/_ensure_full_commit", {ok, RC, _, Body} = test_request:post(Url, [?CONTENT_JSON, ?AUTH], []), {Json} = ?JSON_DECODE(Body), ?assertEqual(201, RC), ?assert(couch_util:get_value(<<"ok">>, Json)) - end). + end)}. should_return_404_for_ensure_full_commit_on_no_db(Url0) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin Url = Url0 ++ "-missing-db" ++ "/_ensure_full_commit", {ok, RC, _, Body} = test_request:post(Url, [?CONTENT_JSON, ?AUTH], []), {Json} = ?JSON_DECODE(Body), ?assertEqual(404, RC), ?assertEqual(<<"not_found">>, couch_util:get_value(<<"error">>, Json)) - end). + end)}. should_accept_live_as_an_alias_for_continuous(Url) -> @@ -135,7 +136,7 @@ should_accept_live_as_an_alias_for_continuous(Url) -> end, couch_util:get_value(<<"last_seq">>, Result, undefined) end, - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin {ok, _, _, ResultBody1} = test_request:get(Url ++ "/_changes?feed=live&timeout=1", [?AUTH]), LastSeq1 = GetLastSeq(ResultBody1), @@ -146,11 +147,11 @@ should_accept_live_as_an_alias_for_continuous(Url) -> LastSeq2 = GetLastSeq(ResultBody2), ?assertNotEqual(LastSeq1, LastSeq2) - end). + end)}. should_return_404_for_delete_att_on_notadoc(Url) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin {ok, RC, _, RespBody} = test_request:delete( Url ++ "/notadoc/att.pdf", [?CONTENT_JSON, ?AUTH], @@ -168,11 +169,11 @@ should_return_404_for_delete_att_on_notadoc(Url) -> [] ), ?assertEqual(404, RC1) - end). + end)}. should_return_409_for_del_att_without_rev(Url) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin {ok, RC, _, _} = test_request:put( Url ++ "/testdoc3", [?CONTENT_JSON, ?AUTH], @@ -186,11 +187,11 @@ should_return_409_for_del_att_without_rev(Url) -> [] ), ?assertEqual(409, RC1) - end). + end)}. should_return_200_for_del_att_with_rev(Url) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin {ok, RC, _Headers, RespBody} = test_request:put( Url ++ "/testdoc4", [?CONTENT_JSON, ?AUTH], @@ -207,11 +208,11 @@ should_return_200_for_del_att_with_rev(Url) -> [] ), ?assertEqual(200, RC1) - end). + end)}. should_return_409_for_put_att_nonexistent_rev(Url) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin {ok, RC, _Headers, RespBody} = test_request:put( Url ++ "/should_return_404/file.erl?rev=1-000", [?CONTENT_JSON, ?AUTH], @@ -222,11 +223,11 @@ should_return_409_for_put_att_nonexistent_rev(Url) -> {<<"error">>,<<"not_found">>}, {<<"reason">>,<<"missing_rev">>}]}, ?JSON_DECODE(RespBody)) - end). + end)}. should_return_update_seq_when_set_on_all_docs(Url) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)], {ok, RC, _, RespBody} = test_request:get(Url ++ "/_all_docs/" ++ "?update_seq=true&keys=[\"testdoc1\"]",[?CONTENT_JSON, ?AUTH]), @@ -236,11 +237,11 @@ should_return_update_seq_when_set_on_all_docs(Url) -> couch_util:get_value(<<"update_seq">>, ResultJson)), ?assertNotEqual(undefined, couch_util:get_value(<<"offset">>, ResultJson)) - end). + end)}. should_not_return_update_seq_when_unset_on_all_docs(Url) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)], {ok, RC, _, RespBody} = test_request:get(Url ++ "/_all_docs/" ++ "?update_seq=false&keys=[\"testdoc1\"]",[?CONTENT_JSON, ?AUTH]), @@ -250,11 +251,11 @@ should_not_return_update_seq_when_unset_on_all_docs(Url) -> couch_util:get_value(<<"update_seq">>, ResultJson)), ?assertNotEqual(undefined, couch_util:get_value(<<"offset">>, ResultJson)) - end). + end)}. should_return_correct_id_on_doc_copy(Url) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin {ok, _, _, _} = create_doc(Url, "testdoc"), {_, _, _, ResultBody1} = test_request:copy(Url ++ "/testdoc/", [?CONTENT_JSON, ?AUTH, ?DESTHEADER1]), @@ -269,7 +270,7 @@ should_return_correct_id_on_doc_copy(Url) -> ?assertEqual(<<102,111,111,229,149,138,98,97,114>>, Id1), ?assertEqual(<<"foo/bar#baz?pow:fiz">>, Id2) ] - end). + end)}. attachment_doc() -> @@ -285,7 +286,7 @@ attachment_doc() -> should_return_400_for_bad_engine(_) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin TmpDb = ?tempdb(), Addr = config:get("chttpd", "bind_address", "127.0.0.1"), Port = mochiweb_socket_server:get(chttpd, port), @@ -293,11 +294,11 @@ should_return_400_for_bad_engine(_) -> Url = BaseUrl ++ "?engine=cowabunga", {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), ?assertEqual(400, Status) - end). + end)}. should_succeed_on_all_docs_with_queries_keys(Url) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]}]}", {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/queries/", @@ -307,11 +308,11 @@ should_succeed_on_all_docs_with_queries_keys(Url) -> ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), {InnerJson} = lists:nth(1, ResultJsonBody), ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson))) - end). + end)}. should_succeed_on_all_docs_with_queries_limit_skip(Url) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}", {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/queries/", @@ -322,11 +323,11 @@ should_succeed_on_all_docs_with_queries_limit_skip(Url) -> {InnerJson} = lists:nth(1, ResultJsonBody), ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)), ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson))) - end). + end)}. should_succeed_on_all_docs_with_multiple_queries(Url) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]}, {\"limit\": 5, \"skip\": 2}]}", @@ -340,11 +341,11 @@ should_succeed_on_all_docs_with_multiple_queries(Url) -> {InnerJson2} = lists:nth(2, ResultJsonBody), ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)), ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2))) - end). + end)}. should_succeed_on_design_docs_with_queries_keys(Url) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], QueryDoc = "{\"queries\": [{\"keys\": [ \"_design/ddoc3\", \"_design/ddoc8\"]}]}", @@ -355,11 +356,11 @@ should_succeed_on_design_docs_with_queries_keys(Url) -> ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), {InnerJson} = lists:nth(1, ResultJsonBody), ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson))) - end). + end)}. should_succeed_on_design_docs_with_queries_limit_skip(Url) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}", {ok, RC, _, RespBody} = test_request:post(Url ++ @@ -370,11 +371,11 @@ should_succeed_on_design_docs_with_queries_limit_skip(Url) -> {InnerJson} = lists:nth(1, ResultJsonBody), ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)), ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson))) - end). + end)}. should_succeed_on_design_docs_with_multiple_queries(Url) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], QueryDoc = "{\"queries\": [{\"keys\": [ \"_design/ddoc3\", \"_design/ddoc8\"]}, {\"limit\": 5, \"skip\": 2}]}", @@ -388,11 +389,11 @@ should_succeed_on_design_docs_with_multiple_queries(Url) -> {InnerJson2} = lists:nth(2, ResultJsonBody), ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)), ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2))) - end). + end)}. should_succeed_on_local_docs_with_queries_keys(Url) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], QueryDoc = "{\"queries\": [{\"keys\": [ \"_local/doc3\", \"_local/doc8\"]}]}", @@ -403,11 +404,11 @@ should_succeed_on_local_docs_with_queries_keys(Url) -> ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), {InnerJson} = lists:nth(1, ResultJsonBody), ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson))) - end). + end)}. should_succeed_on_local_docs_with_queries_limit_skip(Url) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}", {ok, RC, _, RespBody} = test_request:post(Url ++ @@ -417,11 +418,11 @@ should_succeed_on_local_docs_with_queries_limit_skip(Url) -> ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), {InnerJson} = lists:nth(1, ResultJsonBody), ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson))) - end). + end)}. should_succeed_on_local_docs_with_multiple_queries(Url) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], QueryDoc = "{\"queries\": [{\"keys\": [ \"_local/doc3\", \"_local/doc8\"]}, {\"limit\": 5, \"skip\": 2}]}", @@ -434,4 +435,4 @@ should_succeed_on_local_docs_with_multiple_queries(Url) -> ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))), {InnerJson2} = lists:nth(2, ResultJsonBody), ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2))) - end). + end)}. -- cgit v1.2.1 From ae62936dbbb5792bb832093667fe1f78524a2ff8 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 14 Aug 2019 16:30:02 -0400 Subject: Extend timeouts for chttpd_view_test suite More occasional flakiness on Jenkins. --- src/chttpd/test/eunit/chttpd_view_test.erl | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/chttpd/test/eunit/chttpd_view_test.erl b/src/chttpd/test/eunit/chttpd_view_test.erl index 3457c6f30..4c224bb4e 100644 --- a/src/chttpd/test/eunit/chttpd_view_test.erl +++ b/src/chttpd/test/eunit/chttpd_view_test.erl @@ -24,6 +24,7 @@ -define(FIXTURE_TXT, ?ABS_PATH(?FILE)). -define(i2l(I), integer_to_list(I)). +-define(TIMEOUT, 60). % seconds setup() -> Hashed = couch_passwords:hash_admin_password(?PASS), @@ -71,7 +72,7 @@ all_view_test_() -> should_succeed_on_view_with_queries_keys(Url) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], {ok, _, _, _} = test_request:put(Url ++ "/_design/bar", [?CONTENT_JSON, ?AUTH], ?DDOC), @@ -83,11 +84,11 @@ should_succeed_on_view_with_queries_keys(Url) -> ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), {InnerJson} = lists:nth(1, ResultJsonBody), ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson))) - end). + end)}. should_succeed_on_view_with_queries_limit_skip(Url) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], {ok, _, _, _} = test_request:put(Url ++ "/_design/bar", [?CONTENT_JSON, ?AUTH], ?DDOC), @@ -100,11 +101,11 @@ should_succeed_on_view_with_queries_limit_skip(Url) -> {InnerJson} = lists:nth(1, ResultJsonBody), ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)), ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson))) - end). + end)}. should_succeed_on_view_with_multiple_queries(Url) -> - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], {ok, _, _, _} = test_request:put(Url ++ "/_design/bar", [?CONTENT_JSON, ?AUTH], ?DDOC), @@ -120,4 +121,4 @@ should_succeed_on_view_with_multiple_queries(Url) -> {InnerJson2} = lists:nth(2, ResultJsonBody), ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)), ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2))) - end). + end)}. -- cgit v1.2.1 From 08075fd3f0e5477e892678874cfc2319b21de951 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Sat, 17 Aug 2019 09:26:25 -0400 Subject: Give resharding more time to complete --- test/elixir/test/reshard_helpers.exs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/elixir/test/reshard_helpers.exs b/test/elixir/test/reshard_helpers.exs index 52ce301df..282d98c82 100644 --- a/test/elixir/test/reshard_helpers.exs +++ b/test/elixir/test/reshard_helpers.exs @@ -92,7 +92,7 @@ defmodule ReshardHelpers do end def wait_job_removed(id) do - retry_until(fn -> get_job(id).status_code == 404 end, 200, 10_000) + retry_until(fn -> get_job(id).status_code == 404 end, 200, 60_000) end def wait_job_completed(id) do @@ -100,7 +100,7 @@ defmodule ReshardHelpers do end def wait_job_state(id, state) do - retry_until(fn -> get_job_state(id) == state end, 200, 10_000) + retry_until(fn -> get_job_state(id) == state end, 200, 60_000) end def reset_reshard_state do -- cgit v1.2.1 From 8b613bbd7bfcf606f8344b519f68bf47f346a267 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Sat, 17 Aug 2019 10:45:26 -0400 Subject: Bump default timeout for retry_until --- test/elixir/lib/couch/db_test.ex | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/elixir/lib/couch/db_test.ex b/test/elixir/lib/couch/db_test.ex index 990173a13..f98376c94 100644 --- a/test/elixir/lib/couch/db_test.ex +++ b/test/elixir/lib/couch/db_test.ex @@ -290,7 +290,7 @@ defmodule Couch.DBTest do end end - def retry_until(condition, sleep \\ 100, timeout \\ 5000) do + def retry_until(condition, sleep \\ 100, timeout \\ 30_000) do retry_until(condition, now(:ms), sleep, timeout) end -- cgit v1.2.1 From 69275859c6a4840e127c5ba30e756a7640d5cfd5 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Sat, 17 Aug 2019 19:03:32 -0400 Subject: Extend timeout on shard splitting test --- src/couch/test/eunit/couch_db_split_tests.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/couch/test/eunit/couch_db_split_tests.erl b/src/couch/test/eunit/couch_db_split_tests.erl index c44f47ca3..312b5f84d 100644 --- a/src/couch/test/eunit/couch_db_split_tests.erl +++ b/src/couch/test/eunit/couch_db_split_tests.erl @@ -155,7 +155,7 @@ should_copy_local_docs({Desc, TotalDocs, Q}, DbName) -> TMap = make_targets(Ranges), DocsPerRange = TotalDocs div Q, PickFun = make_pickfun(DocsPerRange), - {Desc, ?_test(begin + {Desc, timeout, ?TIMEOUT, ?_test(begin {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun), ?assertEqual(ExpectSeq, UpdateSeq), Response = couch_db_split:copy_local_docs(DbName, TMap, PickFun), -- cgit v1.2.1 From adc0315ecdb37a30ebb3eeaada70143ebe7dbd70 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Sun, 18 Aug 2019 09:33:25 -0400 Subject: Refactor test to keep setup/teardown out of timer --- src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl index e37f1c090..9a5391587 100644 --- a/src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl +++ b/src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl @@ -47,14 +47,17 @@ stop_couch(Ctx) -> check_not_started_test() -> % Starting couch, but not ddoc_cache - Ctx = test_util:start_couch(), - try - Key = {ddoc_cache_entry_custom, {<<"dbname">>, ?MODULE}}, - ?assertEqual({ok, <<"dbname">>}, ddoc_cache_lru:open(Key)) - after - test_util:stop_couch(Ctx) - end. - + { + setup, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, + [ + fun(_) -> + Key = {ddoc_cache_entry_custom, {<<"dbname">>, ?MODULE}}, + ?assertEqual({ok, <<"dbname">>}, ddoc_cache_lru:open(Key)) + end + ] + }. check_lru_test_() -> { -- cgit v1.2.1 From 29b3569e7d23dc96adc30fe60aa903e59a4f7e90 Mon Sep 17 00:00:00 2001 From: ILYA Khlopotov Date: Mon, 19 Aug 2019 14:52:17 +0000 Subject: Update httpotion to 3.1.3 There were couple of hacks in test/elixir/lib/couch.ex We've got changes needed to remove them into httpotion 3.1.3. The changes were introduced in: - https://github.com/myfreeweb/httpotion/pull/118 - https://github.com/myfreeweb/httpotion/pull/130 --- mix.exs | 2 +- mix.lock | 2 +- test/elixir/lib/couch.ex | 103 +---------------------------------------------- 3 files changed, 3 insertions(+), 104 deletions(-) diff --git a/mix.exs b/mix.exs index 43c8b6d93..2859da3d1 100644 --- a/mix.exs +++ b/mix.exs @@ -36,7 +36,7 @@ defmodule CouchDBTest.Mixfile do defp deps() do [ {:junit_formatter, "~> 3.0", only: [:dev, :test, :integration]}, - {:httpotion, "~> 3.0", only: [:dev, :test, :integration], runtime: false}, + {:httpotion, ">= 3.1.3", only: [:dev, :test, :integration], runtime: false}, {:jiffy, path: Path.expand("src/jiffy", __DIR__)}, {:ibrowse, path: Path.expand("src/ibrowse", __DIR__), override: true, compile: false}, diff --git a/mix.lock b/mix.lock index 34c0ea961..343215fdc 100644 --- a/mix.lock +++ b/mix.lock @@ -1,7 +1,7 @@ %{ "bunt": {:hex, :bunt, "0.2.0", "951c6e801e8b1d2cbe58ebbd3e616a869061ddadcc4863d0a2182541acae9a38", [:mix], [], "hexpm"}, "credo": {:hex, :credo, "1.0.5", "fdea745579f8845315fe6a3b43e2f9f8866839cfbc8562bb72778e9fdaa94214", [:mix], [{:bunt, "~> 0.2.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm"}, - "httpotion": {:hex, :httpotion, "3.1.2", "50e3e559c2ffe8c8908c97e4ffb01efc1c18e8547cc7ce5dd173c9cf0a573a3b", [:mix], [{:ibrowse, "== 4.4.0", [hex: :ibrowse, repo: "hexpm", optional: false]}], "hexpm"}, + "httpotion": {:hex, :httpotion, "3.1.3", "fdaf1e16b9318dcb722de57e75ac368c93d4c6e3c9125f93e960f953a750fb77", [:mix], [{:ibrowse, "== 4.4.0", [hex: :ibrowse, repo: "hexpm", optional: false]}], "hexpm"}, "ibrowse": {:hex, :ibrowse, "4.4.0", "2d923325efe0d2cb09b9c6a047b2835a5eda69d8a47ed6ff8bc03628b764e991", [:rebar3], [], "hexpm"}, "jason": {:hex, :jason, "1.1.2", "b03dedea67a99223a2eaf9f1264ce37154564de899fd3d8b9a21b1a6fd64afe7", [:mix], [{:decimal, "~> 1.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm"}, "jiffy": {:hex, :jiffy, "0.15.2", "de266c390111fd4ea28b9302f0bc3d7472468f3b8e0aceabfbefa26d08cd73b7", [:rebar3], [], "hexpm"}, diff --git a/test/elixir/lib/couch.ex b/test/elixir/lib/couch.ex index 3c43ab152..65da13d75 100644 --- a/test/elixir/lib/couch.ex +++ b/test/elixir/lib/couch.ex @@ -67,7 +67,7 @@ defmodule Couch do base_url <> url end - def process_request_headers(headers, options) do + def process_request_headers(headers, _body, options) do headers = Keyword.put(headers, :"User-Agent", "couch-potion") headers = @@ -133,105 +133,4 @@ defmodule Couch do %Couch.Session{cookie: token} end - # HACK: this is here until this commit lands in a release - # https://github.com/myfreeweb/httpotion/commit/f3fa2f0bc3b9b400573942b3ba4628b48bc3c614 - def handle_response(response) do - case response do - {:ok, status_code, headers, body, _} -> - processed_headers = process_response_headers(headers) - - %HTTPotion.Response{ - status_code: process_status_code(status_code), - headers: processed_headers, - body: process_response_body(processed_headers, body) - } - - {:ok, status_code, headers, body} -> - processed_headers = process_response_headers(headers) - - %HTTPotion.Response{ - status_code: process_status_code(status_code), - headers: processed_headers, - body: process_response_body(processed_headers, body) - } - - {:ibrowse_req_id, id} -> - %HTTPotion.AsyncResponse{id: id} - - {:error, {:conn_failed, {:error, reason}}} -> - %HTTPotion.ErrorResponse{message: error_to_string(reason)} - - {:error, :conn_failed} -> - %HTTPotion.ErrorResponse{message: "conn_failed"} - - {:error, reason} -> - %HTTPotion.ErrorResponse{message: error_to_string(reason)} - end - end - - # Anther HACK: Until we can get process_request_headers/2 merged - # upstream. - @spec process_arguments(atom, String.t(), [{atom(), any()}]) :: %{} - defp process_arguments(method, url, options) do - options = process_options(options) - - body = Keyword.get(options, :body, "") - - headers = - Keyword.merge( - Application.get_env(:httpotion, :default_headers, []), - Keyword.get(options, :headers, []) - ) - - timeout = - Keyword.get( - options, - :timeout, - Application.get_env(:httpotion, :default_timeout, @request_timeout) - ) - - ib_options = - Keyword.merge( - Application.get_env(:httpotion, :default_ibrowse, []), - Keyword.get(options, :ibrowse, [{:inactivity_timeout, @inactivity_timeout}]) - ) - - follow_redirects = - Keyword.get( - options, - :follow_redirects, - Application.get_env(:httpotion, :default_follow_redirects, false) - ) - - ib_options = - if stream_to = Keyword.get(options, :stream_to), - do: - Keyword.put( - ib_options, - :stream_to, - spawn(__MODULE__, :transformer, [stream_to, method, url, options]) - ), - else: ib_options - - ib_options = - if user_password = Keyword.get(options, :basic_auth) do - {user, password} = user_password - Keyword.put(ib_options, :basic_auth, {to_charlist(user), to_charlist(password)}) - else - ib_options - end - - %{ - method: method, - url: url |> to_string |> process_url(options) |> to_charlist, - body: body |> process_request_body, - headers: - headers - |> process_request_headers(options) - |> Enum.map(fn {k, v} -> {to_charlist(k), to_charlist(v)} end), - timeout: timeout, - ib_options: ib_options, - follow_redirects: follow_redirects - } - end end -- cgit v1.2.1 From ac4d4fcc7d161725196437520e3b5cfee8dc9954 Mon Sep 17 00:00:00 2001 From: ILYA Khlopotov Date: Mon, 19 Aug 2019 16:34:23 +0000 Subject: Increase default HTTP timeouts This commits ports `couch.ex` related chages from https://github.com/apache/couchdb/pull/2104 into the world where we don't override `process_arguments/3` --- test/elixir/lib/couch.ex | 44 +++++++++++++++++++++++++++++++------------- 1 file changed, 31 insertions(+), 13 deletions(-) diff --git a/test/elixir/lib/couch.ex b/test/elixir/lib/couch.ex index 65da13d75..6c7310d56 100644 --- a/test/elixir/lib/couch.ex +++ b/test/elixir/lib/couch.ex @@ -87,19 +87,10 @@ defmodule Couch do end def process_options(options) do - if Keyword.get(options, :cookie) == nil do - headers = Keyword.get(options, :headers, []) - - if headers[:basic_auth] != nil or headers[:authorization] != nil do - options - else - username = System.get_env("EX_USERNAME") || "adm" - password = System.get_env("EX_PASSWORD") || "pass" - Keyword.put(options, :basic_auth, {username, password}) - end - else - options - end + options + |> set_auth_options() + |> set_inactivity_timeout() + |> set_request_timeout() end def process_request_body(body) do @@ -120,6 +111,33 @@ defmodule Couch do end end + def set_auth_options(options) do + if Keyword.get(options, :cookie) == nil do + headers = Keyword.get(options, :headers, []) + + if headers[:basic_auth] != nil or headers[:authorization] != nil do + options + else + username = System.get_env("EX_USERNAME") || "adm" + password = System.get_env("EX_PASSWORD") || "pass" + Keyword.put(options, :basic_auth, {username, password}) + end + else + options + end + end + + def set_inactivity_timeout(options) do + Keyword.update(options, :ibrowse, [{:inactivity_timeout, @inactivity_timeout}], fn(ibrowse) -> + Keyword.put_new(ibrowse, :inactivity_timeout, @inactivity_timeout) + end) + end + + def set_request_timeout(options) do + timeout = Application.get_env(:httpotion, :default_timeout, @request_timeout) + Keyword.put_new(options, :timeout, timeout) + end + def login(userinfo) do [user, pass] = String.split(userinfo, ":", parts: 2) login(user, pass) -- cgit v1.2.1 From e5baa1f30b6427b74f5afd4bc8f773fcbd497cbd Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Tue, 20 Aug 2019 12:18:34 -0400 Subject: Fix _scheduler/docs response for local replication endpoints When the `_scheduler/docs` response is generated, the replication docs are parsed and credentials are stripped by `couch_replicator:strip_url_creds/1`. When local endpoint support was removed, that function didn't properly catch the parsing error for local endpoints and as a result was error-ing out the whole response. The fix is to catch the error and return the endpoint as is. The catch is specific to that error, so turned the long binary message into a shorter, but hopefully still obvious atom. `_scheduler/docs` response would look like: ``` { "docs": [ { "database": "_replicator", "doc_id": "r", "error_count": 1, "id": null, "info": "local_endpoints_not_supported", "last_updated": "2019-08-20T16:09:53Z", "source": "http://adm:*****@127.0.0.1:15984/s/", "start_time": "2019-08-20T16:09:53Z", "state": "failed", "target": "t" } ], "offset": 0, "total_rows": 1 } ``` Interestingly, there was already a test for this case, except it wasn't included in the EUnit test suite list. --- src/couch_replicator/src/couch_replicator.erl | 15 +++++++++------ src/couch_replicator/src/couch_replicator_docs.erl | 4 ++-- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/couch_replicator/src/couch_replicator.erl b/src/couch_replicator/src/couch_replicator.erl index e4fa31cee..9c7e318b6 100644 --- a/src/couch_replicator/src/couch_replicator.erl +++ b/src/couch_replicator/src/couch_replicator.erl @@ -144,11 +144,13 @@ replication_states() -> -spec strip_url_creds(binary() | {[_]}) -> binary(). strip_url_creds(Endpoint) -> - case couch_replicator_docs:parse_rep_db(Endpoint, [], []) of - #httpdb{url=Url} -> - iolist_to_binary(couch_util:url_strip_password(Url)); - LocalDb when is_binary(LocalDb) -> - LocalDb + try + couch_replicator_docs:parse_rep_db(Endpoint, [], []) of + #httpdb{url = Url} -> + iolist_to_binary(couch_util:url_strip_password(Url)) + catch + throw:{error, local_endpoints_not_supported} -> + Endpoint end. @@ -359,7 +361,8 @@ strip_url_creds_test_() -> fun (_) -> meck:unload() end, [ t_strip_http_basic_creds(), - t_strip_http_props_creds() + t_strip_http_props_creds(), + t_strip_local_db_creds() ] }. diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl index c07caa1aa..2d6db1b73 100644 --- a/src/couch_replicator/src/couch_replicator_docs.erl +++ b/src/couch_replicator/src/couch_replicator_docs.erl @@ -424,7 +424,7 @@ parse_rep_db(<<"https://", _/binary>> = Url, Proxy, Options) -> parse_rep_db({[{<<"url">>, Url}]}, Proxy, Options); parse_rep_db(<<_/binary>>, _Proxy, _Options) -> - throw({error, <<"Local endpoints not supported since CouchDB 3.x">>}); + throw({error, local_endpoints_not_supported}); parse_rep_db(undefined, _Proxy, _Options) -> throw({error, <<"Missing replicator database">>}). @@ -843,7 +843,7 @@ t_error_on_local_endpoint() -> {<<"source">>, <<"localdb">>}, {<<"target">>, <<"http://somehost.local/tgt">>} ]}, - Expect = <<"Local endpoints not supported since CouchDB 3.x">>, + Expect = local_endpoints_not_supported, ?assertThrow({bad_rep_doc, Expect}, parse_rep_doc_without_id(RepDoc)) end). -- cgit v1.2.1 From 258f89e13903f34c8248041cef114c6bfd229a78 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Fri, 23 Aug 2019 10:09:52 +0100 Subject: Send a 500, not a 400, for unknown search errors We should only send a 400 Bad Request if there is genuinely something wrong with the request, otherwise we mislead users and sysadmins. --- src/mango/src/mango_error.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mango/src/mango_error.erl b/src/mango/src/mango_error.erl index 2f22552c9..bb545ad67 100644 --- a/src/mango/src/mango_error.erl +++ b/src/mango/src/mango_error.erl @@ -69,7 +69,7 @@ info(mango_cursor_text, {text_search_error, {error, {bad_request, Msg}}}) }; info(mango_cursor_text, {text_search_error, {error, Error}}) -> { - 400, + 500, <<"text_search_error">>, fmt("~p", [Error]) }; -- cgit v1.2.1 From e9e8ae5a0f4bffdb1109f9a008a7d43624ee9c63 Mon Sep 17 00:00:00 2001 From: jiangph Date: Fri, 23 Aug 2019 18:24:01 +0800 Subject: Keep database property after overwriting shard map --- src/chttpd/test/eunit/chttpd_db_test.erl | 24 ++++++++++++++++++++++++ src/mem3/src/mem3_util.erl | 16 +++++++++++++++- 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/src/chttpd/test/eunit/chttpd_db_test.erl b/src/chttpd/test/eunit/chttpd_db_test.erl index 17250199a..c819bdf6e 100644 --- a/src/chttpd/test/eunit/chttpd_db_test.erl +++ b/src/chttpd/test/eunit/chttpd_db_test.erl @@ -73,6 +73,7 @@ all_test_() -> fun should_not_return_update_seq_when_unset_on_all_docs/1, fun should_return_correct_id_on_doc_copy/1, fun should_return_400_for_bad_engine/1, + fun should_not_change_db_proper_after_rewriting_shardmap/1, fun should_succeed_on_all_docs_with_queries_keys/1, fun should_succeed_on_all_docs_with_queries_limit_skip/1, fun should_succeed_on_all_docs_with_multiple_queries/1, @@ -297,6 +298,29 @@ should_return_400_for_bad_engine(_) -> end)}. +should_not_change_db_proper_after_rewriting_shardmap(_) -> + {timeout, ?TIMEOUT, ?_test(begin + TmpDb = ?tempdb(), + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(chttpd, port), + AdmPort = mochiweb_socket_server:get(couch_httpd, port), + + BaseUrl = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]), + Url = BaseUrl ++ "?partitioned=true&q=1", + {ok, 201, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), + + ShardDbName = ?l2b(config:get("mem3", "shards_db", "_dbs")), + {ok, ShardDb} = mem3_util:ensure_exists(ShardDbName), + {ok, #doc{body = {Props}}} = couch_db:open_doc( + ShardDb, TmpDb, [ejson_body]), + Shards = mem3_util:build_shards(TmpDb, Props), + + {Prop2} = ?JSON_DECODE(?JSON_ENCODE({Props})), + Shards2 = mem3_util:build_shards(TmpDb, Prop2), + ?assertEqual(Shards2, Shards) + end)}. + + should_succeed_on_all_docs_with_queries_keys(Url) -> {timeout, ?TIMEOUT, ?_test(begin [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], diff --git a/src/mem3/src/mem3_util.erl b/src/mem3/src/mem3_util.erl index e8cba5d7b..3fc9b4f8e 100644 --- a/src/mem3/src/mem3_util.erl +++ b/src/mem3/src/mem3_util.erl @@ -221,11 +221,25 @@ get_engine_opt(DocProps) -> get_props_opt(DocProps) -> case couch_util:get_value(<<"props">>, DocProps) of {Props} when is_list(Props) -> - [{props, Props}]; + [{props, db_props_from_json(Props)}]; _ -> [] end. +db_props_from_json([]) -> + []; + +db_props_from_json([{<<"partitioned">>, Value} | Rest]) -> + [{partitioned, Value} | db_props_from_json(Rest)]; + +db_props_from_json([{<<"hash">>, [MBin, FBin, A]} | Rest]) -> + M = binary_to_existing_atom(MBin, utf8), + F = binary_to_existing_atom(FBin, utf8), + [{hash, [M, F, A]} | db_props_from_json(Rest)]; + +db_props_from_json([{K, V} | Rest]) -> + [{K, V} | db_props_from_json(Rest)]. + n_val(undefined, NodeCount) -> n_val(config:get("cluster", "n", "3"), NodeCount); n_val(N, NodeCount) when is_list(N) -> -- cgit v1.2.1 From ea5661dccebc1f04c8012fe8e3d15ae8f6a57c4b Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Wed, 28 Aug 2019 19:33:33 -0300 Subject: Fix badmatch in fabric_view_all_docs In query for all_docs with passed keys array it is possible for `doc_receive_loop/6` to timeout and trigger `badmatch` exception. The fix changes code to to accept `timeout` as a possible response and passes it to Callback to process. --- src/fabric/src/fabric_view_all_docs.erl | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/fabric/src/fabric_view_all_docs.erl b/src/fabric/src/fabric_view_all_docs.erl index 1d87e3ddd..5fe143731 100644 --- a/src/fabric/src/fabric_view_all_docs.erl +++ b/src/fabric/src/fabric_view_all_docs.erl @@ -104,10 +104,15 @@ go(DbName, Options, QueryArgs, Callback, Acc0) -> [{total, TotalRows}, {offset, null}, {update_seq, null}] end, {ok, Acc1} = Callback({meta, Meta}, Acc0), - {ok, Acc2} = doc_receive_loop( + Resp = doc_receive_loop( Keys3, queue:new(), SpawnFun, MaxJobs, Callback, Acc1 ), - Callback(complete, Acc2); + case Resp of + {ok, Acc2} -> + Callback(complete, Acc2); + timeout -> + Callback(timeout, Acc0) + end; {'DOWN', Ref, _, _, Error} -> Callback({error, Error}, Acc0) after Timeout -> -- cgit v1.2.1 From 59067d5bcb37f6697608f82c45c1cedbc7b8810c Mon Sep 17 00:00:00 2001 From: sarveshtamba Date: Fri, 30 Aug 2019 17:04:51 +0530 Subject: Skipping CompactTest due to compaction failure --- test/elixir/test/compact_test.exs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/elixir/test/compact_test.exs b/test/elixir/test/compact_test.exs index 4b0a5a07a..4c6657ea9 100644 --- a/test/elixir/test/compact_test.exs +++ b/test/elixir/test/compact_test.exs @@ -12,6 +12,9 @@ defmodule CompactTest do @att_name "foo.txt" @att_plaintext "This is plain text" + # Need to investigate why compaction is not compacting (or compactor cannot complete) + # Refer:- https://github.com/apache/couchdb/pull/2127 + @tag :pending @tag :skip_on_jenkins @tag :with_db test "compaction reduces size of deleted docs", context do -- cgit v1.2.1 From 8a062686fac0a29af2d6f3c7da81ff3ff1e484fb Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Mon, 9 Sep 2019 12:26:09 +0200 Subject: feat: less scary error message --- src/chttpd/src/chttpd_auth_cache.erl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/chttpd/src/chttpd_auth_cache.erl b/src/chttpd/src/chttpd_auth_cache.erl index 638d8c748..fdae27b79 100644 --- a/src/chttpd/src/chttpd_auth_cache.erl +++ b/src/chttpd/src/chttpd_auth_cache.erl @@ -113,6 +113,9 @@ handle_info({'DOWN', _, _, Pid, Reason}, #state{changes_pid=Pid} = State) -> Seq = case Reason of {seq, EndSeq} -> EndSeq; + {database_does_not_exist, _} -> + couch_log:notice("~p changes listener died because the _users database does not exist. Create the database to silence this notice.", [?MODULE]), + 0; _ -> couch_log:notice("~p changes listener died ~r", [?MODULE, Reason]), 0 -- cgit v1.2.1 From b79f19757434b407060d044979af82b7123b5dc4 Mon Sep 17 00:00:00 2001 From: jiangph Date: Mon, 9 Sep 2019 14:31:51 +0800 Subject: fixup fabric:cleanup_index_files --- src/fabric/src/fabric.erl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/fabric/src/fabric.erl b/src/fabric/src/fabric.erl index d98ffc978..27fa8c045 100644 --- a/src/fabric/src/fabric.erl +++ b/src/fabric/src/fabric.erl @@ -524,8 +524,11 @@ inactive_index_files(DbName) -> end, mem3:local_shards(dbname(DbName))), if ActiveSigs =:= [] -> FileList; true -> + %% .view and .compact.view where is in ActiveSigs + %% will be excluded from FileList because they are active view + %% files and should not be deleted. lists:filter(fun(FilePath) -> - not maps:is_key(filename:basename(FilePath, ".view"), ActiveSigs) + not maps:is_key(get_view_sig_from_filename(FilePath), ActiveSigs) end, FileList) end. @@ -662,6 +665,8 @@ kl_to_record(KeyList,RecName) -> set_namespace(NS, #mrargs{extra = Extra} = Args) -> Args#mrargs{extra = [{namespace, NS} | Extra]}. +get_view_sig_from_filename(FilePath) -> + filename:basename(filename:basename(FilePath, ".view"), ".compact"). -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). -- cgit v1.2.1 From dd5ac138ecbdee76ff3ba68664f25c2a5cdda7cc Mon Sep 17 00:00:00 2001 From: ILYA Khlopotov Date: Tue, 10 Sep 2019 11:30:46 +0000 Subject: Support `--extra_args` parameter in `dev/run` Sometimes there is a need to specify additional arguments for the beam process we start from dev/run. In particular the feature is handy for: - changing emulator flags - simulate OOM via available RAM restrictions - enable module loading tracing - configure number of schedulers - modify applications configuration - run customization script to add extra development deps (such as automatic code reload) Historically developers had to edit dev/run to do it. This PR adds an ability to specify additional arguments via `--extra_args` argument. In order to run customization script create `customization.erl` which exports `start/0` and run it using: ``` dev/run --extra_args='-run customization' ``` --- dev/run | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/dev/run b/dev/run index 10351eb86..9209c1129 100755 --- a/dev/run +++ b/dev/run @@ -110,6 +110,11 @@ def setup_logging(ctx): def setup_argparse(): + parser = get_args_parser() + return parser.parse_args() + + +def get_args_parser(): parser = optparse.OptionParser(description="Runs CouchDB 2.0 dev cluster") parser.add_option( "-a", @@ -200,7 +205,13 @@ def setup_argparse(): action="store_true", help="Select available ports for nodes automatically", ) - return parser.parse_args() + parser.add_option( + "--extra_args", + dest="extra_args", + default=None, + help="Extra arguments to pass to beam process", + ) + return parser def setup_context(opts, args): @@ -223,6 +234,7 @@ def setup_context(opts, args): "haproxy_port": opts.haproxy_port, "config_overrides": opts.config_overrides, "no_eval": opts.no_eval, + "extra_args": opts.extra_args, "reset_logs": True, "procs": [], "auto_ports": opts.auto_ports, @@ -578,6 +590,8 @@ def boot_node(ctx, node): mode = "r+b" logfname = os.path.join(ctx["devdir"], "logs", "%s.log" % node) log = open(logfname, mode) + if "extra_args" in ctx and ctx["extra_args"]: + cmd += ctx["extra_args"].split(" ") cmd = [toposixpath(x) for x in cmd] return sp.Popen(cmd, stdin=sp.PIPE, stdout=log, stderr=sp.STDOUT, env=env) -- cgit v1.2.1 From 196532f6847c195d1b7e614c8842c25962772b57 Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Thu, 12 Sep 2019 12:53:30 -0400 Subject: Improve credential stripping for replication document reads Allow a special field for plugin writers to stash endpoint credentials, which gets the same treatment as headers and user:pass combinations for already existing plugins (session, noop aka basic auth). Instead of complicating the plugin API, use a simple convention of just calling it "auth" for now. --- src/couch_replicator/src/couch_replicator_auth.erl | 6 ++++++ src/couch_replicator/src/couch_replicator_docs.erl | 12 ++++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/src/couch_replicator/src/couch_replicator_auth.erl b/src/couch_replicator/src/couch_replicator_auth.erl index 7f51cdd1c..272e10af5 100644 --- a/src/couch_replicator/src/couch_replicator_auth.erl +++ b/src/couch_replicator/src/couch_replicator_auth.erl @@ -33,6 +33,12 @@ % Behavior API +% Note for plugin developers: consider using the "auth" field in the source and +% target objects to store credentials. In that case non-owner and non-admin +% users will have those credentials stripped when they read the replication +% document, which mimicks the behavior for "headers" and user and pass fields +% in endpoint URLs". + -callback initialize(#httpdb{}) -> {ok, #httpdb{}, term()} | {error, term()} | ignore. diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl index 2d6db1b73..81685cd48 100644 --- a/src/couch_replicator/src/couch_replicator_docs.erl +++ b/src/couch_replicator/src/couch_replicator_docs.erl @@ -683,8 +683,12 @@ strip_credentials(Url) when is_binary(Url) -> "http(s)?://(?:[^:]+):[^@]+@(.*)$", "http\\1://\\2", [{return, binary}]); -strip_credentials({Props}) -> - {lists:keydelete(<<"headers">>, 1, Props)}. +strip_credentials({Props0}) -> + Props1 = lists:keydelete(<<"headers">>, 1, Props0), + % Strip "auth" just like headers, for replication plugins it can be a place + % to stash credential that are not necessarily in headers + Props2 = lists:keydelete(<<"auth">>, 1, Props1), + {Props2}. error_reason({shutdown, Error}) -> @@ -773,6 +777,10 @@ check_strip_credentials_test() -> { {[{<<"_id">>, <<"foo">>}]}, {[{<<"_id">>, <<"foo">>}, {<<"headers">>, <<"baz">>}]} + }, + { + {[{<<"_id">>, <<"foo">>}]}, + {[{<<"_id">>, <<"foo">>}, {<<"auth">>, <<"pluginsecret">>}]} } ]]. -- cgit v1.2.1 From 3302ffaa20101438c7c04691d9fc87a99d86aefb Mon Sep 17 00:00:00 2001 From: Jamie Luckett Date: Mon, 16 Sep 2019 11:09:36 +0100 Subject: Fix typo in couch_mrview comment --- src/couch_mrview/src/couch_mrview.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/couch_mrview/src/couch_mrview.erl b/src/couch_mrview/src/couch_mrview.erl index ae1d8d6f5..d549568ec 100644 --- a/src/couch_mrview/src/couch_mrview.erl +++ b/src/couch_mrview/src/couch_mrview.erl @@ -224,7 +224,7 @@ validate(Db, DDoc) -> couch_query_servers:ret_os_process(Proc) end catch {unknown_query_language, _Lang} -> - %% Allow users to save ddocs written in uknown languages + %% Allow users to save ddocs written in unknown languages ok end. -- cgit v1.2.1 From 9da6c7ae8658273d32da1a805d3549670f118d51 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 18 Sep 2019 14:16:57 -0400 Subject: Remove deprecated dbinfo fields (#2163) These fields are all marked as deprecated in the current documentation and they have more specific replacements in the `sizes` object. --- src/couch/src/couch_db.erl | 11 ----------- src/couch_mrview/src/couch_mrview_index.erl | 2 -- .../test/eunit/couch_mrview_index_info_tests.erl | 10 ---------- src/fabric/src/fabric_db_info.erl | 17 ----------------- src/fabric/src/fabric_group_info.erl | 4 ---- test/elixir/test/compact_test.exs | 8 ++++---- test/javascript/tests/attachments.js | 4 ++-- test/javascript/tests/compact.js | 16 ++++++++-------- test/javascript/tests/design_docs.js | 4 ++-- test/javascript/tests/view_compaction.js | 10 +++++----- 10 files changed, 21 insertions(+), 65 deletions(-) diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl index 52c3fbb76..6135813ae 100644 --- a/src/couch/src/couch_db.erl +++ b/src/couch/src/couch_db.erl @@ -594,9 +594,6 @@ get_db_info(Db) -> {ok, DocCount} = get_doc_count(Db), {ok, DelDocCount} = get_del_doc_count(Db), SizeInfo = couch_db_engine:get_size_info(Db), - FileSize = couch_util:get_value(file, SizeInfo, null), - ActiveSize = couch_util:get_value(active, SizeInfo, null), - ExternalSize = couch_util:get_value(external, SizeInfo, null), DiskVersion = couch_db_engine:get_disk_version(Db), Uuid = case get_uuid(Db) of undefined -> null; @@ -619,14 +616,6 @@ get_db_info(Db) -> {purge_seq, couch_db_engine:get_purge_seq(Db)}, {compact_running, Compactor /= nil}, {sizes, {SizeInfo}}, - % TODO: Remove this in 3.0 - % These are legacy and have been duplicated under - % the sizes key since 2.0. We should make a note - % in our release notes that we'll remove these - % old versions in 3.0 - {disk_size, FileSize}, % legacy - {data_size, ActiveSize}, - {other, {[{data_size, ExternalSize}]}}, {instance_start_time, StartTime}, {disk_format_version, DiskVersion}, {committed_update_seq, CommittedUpdateSeq}, diff --git a/src/couch_mrview/src/couch_mrview_index.erl b/src/couch_mrview/src/couch_mrview_index.erl index 03701693f..91703bd4d 100644 --- a/src/couch_mrview/src/couch_mrview_index.erl +++ b/src/couch_mrview/src/couch_mrview_index.erl @@ -80,8 +80,6 @@ get(info, State) -> {ok, [ {signature, list_to_binary(couch_index_util:hexsig(Sig))}, {language, Lang}, - {disk_size, FileSize}, % legacy - {data_size, ExternalSize}, % legacy {sizes, {[ {file, FileSize}, {active, ActiveSize}, diff --git a/src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl index efa03e7c0..c4c765feb 100644 --- a/src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl +++ b/src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl @@ -48,8 +48,6 @@ view_info_test_() -> fun file_size_is_non_neg_int/1, fun active_size_is_non_neg_int/1, fun external_size_is_non_neg_int/1, - fun disk_size_is_file_size/1, - fun data_size_is_external_size/1, fun active_size_less_than_file_size/1, fun update_seq_is_non_neg_int/1, fun purge_seq_is_non_neg_int/1, @@ -80,14 +78,6 @@ external_size_is_non_neg_int({_, Info}) -> ?_assert(check_non_neg_int([sizes, external], Info)). -disk_size_is_file_size({_, Info}) -> - ?_assertEqual(prop([sizes, file], Info), prop(disk_size, Info)). - - -data_size_is_external_size({_, Info}) -> - ?_assertEqual(prop([sizes, external], Info), prop(data_size, Info)). - - active_size_less_than_file_size({_, Info}) -> ?_assert(prop([sizes, active], Info) < prop([sizes, file], Info)). diff --git a/src/fabric/src/fabric_db_info.erl b/src/fabric/src/fabric_db_info.erl index bb7a3530e..ed14e3e20 100644 --- a/src/fabric/src/fabric_db_info.erl +++ b/src/fabric/src/fabric_db_info.erl @@ -99,14 +99,8 @@ merge_results(Info) -> [{doc_del_count, lists:sum(X)} | Acc]; (compact_running, X, Acc) -> [{compact_running, lists:member(true, X)} | Acc]; - (disk_size, X, Acc) -> % legacy - [{disk_size, lists:sum(X)} | Acc]; - (data_size, X, Acc) -> % legacy - [{data_size, lists:sum(X)} | Acc]; (sizes, X, Acc) -> [{sizes, {merge_object(X)}} | Acc]; - (other, X, Acc) -> % legacy - [{other, {merge_other_results(X)}} | Acc]; (disk_format_version, X, Acc) -> [{disk_format_version, lists:max(X)} | Acc]; (cluster, [X], Acc) -> @@ -117,17 +111,6 @@ merge_results(Info) -> Acc end, [{instance_start_time, <<"0">>}], Dict). -merge_other_results(Results) -> - Dict = lists:foldl(fun({Props}, D) -> - lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end, D, Props) - end, orddict:new(), Results), - orddict:fold(fun - (data_size, X, Acc) -> - [{data_size, lists:sum(X)} | Acc]; - (_, _, Acc) -> - Acc - end, [], Dict). - merge_object(Objects) -> Dict = lists:foldl(fun({Props}, D) -> lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end, D, Props) diff --git a/src/fabric/src/fabric_group_info.erl b/src/fabric/src/fabric_group_info.erl index be507420e..91bdf01b0 100644 --- a/src/fabric/src/fabric_group_info.erl +++ b/src/fabric/src/fabric_group_info.erl @@ -111,10 +111,6 @@ merge_results(Info) -> [{signature, X} | Acc]; (language, [X | _], Acc) -> [{language, X} | Acc]; - (disk_size, X, Acc) -> % legacy - [{disk_size, lists:sum(X)} | Acc]; - (data_size, X, Acc) -> % legacy - [{data_size, lists:sum(X)} | Acc]; (sizes, X, Acc) -> [{sizes, {merge_object(X)}} | Acc]; (compact_running, X, Acc) -> diff --git a/test/elixir/test/compact_test.exs b/test/elixir/test/compact_test.exs index 4c6657ea9..191267b47 100644 --- a/test/elixir/test/compact_test.exs +++ b/test/elixir/test/compact_test.exs @@ -21,8 +21,8 @@ defmodule CompactTest do db = context[:db_name] docs = populate(db) info = get_info(db) - orig_data_size = info["data_size"] - orig_disk_size = info["disk_size"] + orig_data_size = info["sizes"]["active"] + orig_disk_size = info["sizes"]["file"] start_time = info["instance_start_time"] assert is_integer(orig_data_size) and is_integer(orig_disk_size) assert orig_data_size < orig_disk_size @@ -42,8 +42,8 @@ defmodule CompactTest do assert get_info(db)["instance_start_time"] == start_time assert_attachment_available(db) info = get_info(db) - final_data_size = info["data_size"] - final_disk_size = info["disk_size"] + final_data_size = info["sizes"]["active"] + final_disk_size = info["sizes"]["file"] assert final_data_size < final_disk_size assert is_integer(final_data_size) and is_integer(final_disk_size) assert final_data_size < deleted_data_size diff --git a/test/javascript/tests/attachments.js b/test/javascript/tests/attachments.js index 2e831a731..73de018f6 100644 --- a/test/javascript/tests/attachments.js +++ b/test/javascript/tests/attachments.js @@ -218,7 +218,7 @@ couchTests.attachments= function(debug) { // re-create them var saved3 = db.bulkSave(docs); - var before = db.info().disk_size; + var before = db.info().sizes.file; // Compact it. /*T(db.compact().ok); @@ -226,7 +226,7 @@ couchTests.attachments= function(debug) { // compaction isn't instantaneous, loop until done while (db.info().compact_running) {}; - var after = db.info().disk_size; + var after = db.info().sizes.file; // Compaction should reduce the database slightly, but not // orders of magnitude (unless attachments introduce sparseness) diff --git a/test/javascript/tests/compact.js b/test/javascript/tests/compact.js index e0b052a10..8c8beb4b9 100644 --- a/test/javascript/tests/compact.js +++ b/test/javascript/tests/compact.js @@ -30,19 +30,19 @@ couchTests.compact = function(debug) { T(db.save(binAttDoc).ok); - var originalsize = db.info().disk_size; - var originaldatasize = db.info().data_size; + var originalsize = db.info().sizes.file; + var originaldatasize = db.info().sizes.active; var start_time = db.info().instance_start_time; - TEquals("number", typeof originaldatasize, "data_size is a number"); + TEquals("number", typeof originaldatasize, "data size is a number"); T(originaldatasize < originalsize, "data size is < then db file size"); for(var i in docs) { db.deleteDoc(docs[i]); } T(db.ensureFullCommit().ok); - var deletesize = db.info().disk_size; - var deletedatasize = db.info().data_size; + var deletesize = db.info().sizes.file; + var deletedatasize = db.info().sizes.active; T(deletesize > originalsize); T(db.setDbProperty("_revs_limit", 666).ok); @@ -59,9 +59,9 @@ couchTests.compact = function(debug) { T(xhr.responseText == "This is a base64 encoded text"); T(xhr.getResponseHeader("Content-Type") == "text/plain"); T(db.info().doc_count == 1); - // XXX BUGGED! T(db.info().data_size < deletedatasize); - TEquals("number", typeof db.info().data_size, "data_size is a number"); - T(db.info().data_size < db.info().disk_size, "data size is < then db file size"); + // XXX BUGGED! T(db.info().sizes.active < deletedatasize); + TEquals("number", typeof db.info().sizes.active, "data size is a number"); + T(db.info().sizes.active < db.info().sizes.file, "data size is < then db file size"); // cleanup db.deleteDb(); diff --git a/test/javascript/tests/design_docs.js b/test/javascript/tests/design_docs.js index ed1e72f3f..780fc1354 100644 --- a/test/javascript/tests/design_docs.js +++ b/test/javascript/tests/design_docs.js @@ -247,7 +247,7 @@ couchTests.design_docs = function(debug) { var prev_view_sig = db.designInfo("_design/test").view_index.signature; - var prev_view_size = db.designInfo("_design/test").view_index.disk_size; + var prev_view_size = db.designInfo("_design/test").view_index.sizes.file; db.bulkSave(makeDocs(1, numDocs + 1)); T(db.ensureFullCommit().ok); @@ -260,7 +260,7 @@ couchTests.design_docs = function(debug) { var dinfo = db.designInfo("_design/test"); TEquals("test", dinfo.name); var vinfo = dinfo.view_index; - TEquals(prev_view_size, vinfo.disk_size, "view group disk size didn't change"); + TEquals(prev_view_size, vinfo.sizes.file, "view group disk size didn't change"); TEquals(false, vinfo.compact_running); TEquals(prev_view_sig, vinfo.signature, 'ddoc sig'); // wait some time (there were issues where an update diff --git a/test/javascript/tests/view_compaction.js b/test/javascript/tests/view_compaction.js index 03e8395c7..d1a1e8790 100644 --- a/test/javascript/tests/view_compaction.js +++ b/test/javascript/tests/view_compaction.js @@ -78,8 +78,8 @@ couchTests.view_compaction = function(debug) { resp = db.designInfo("_design/foo"); TEquals(30001, resp.view_index.update_seq); - var disk_size_before_compact = resp.view_index.disk_size; - var data_size_before_compact = resp.view_index.data_size; + var disk_size_before_compact = resp.view_index.sizes.file; + var data_size_before_compact = resp.view_index.sizes.active; TEquals("number", typeof data_size_before_compact, "data size is a number"); T(data_size_before_compact < disk_size_before_compact, "data size < file size"); @@ -102,9 +102,9 @@ couchTests.view_compaction = function(debug) { resp = db.designInfo("_design/foo"); TEquals(30001, resp.view_index.update_seq); - T(resp.view_index.disk_size < disk_size_before_compact); - TEquals("number", typeof resp.view_index.data_size, "data size is a number"); - T(resp.view_index.data_size < resp.view_index.disk_size, "data size < file size"); + T(resp.view_index.sizes.file < disk_size_before_compact); + TEquals("number", typeof resp.view_index.sizes.active, "data size is a number"); + T(resp.view_index.sizes.active < resp.view_index.sizes.file, "data size < file size"); // cleanup db.deleteDb(); -- cgit v1.2.1 From aa311b8cd7ac6e2c8cfe8757d2e39e020b153997 Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Thu, 19 Sep 2019 12:01:53 +0200 Subject: feat: do not run stats aggregations on an interval MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Similar to 448be7996999a706464d8f7429a56dc9e9c87c3a (hello 0.10.1), `timer:{send,apply}_interval()` will apply functions / send messages for all intervals that match the time that a machine was in sleep / hibernation mode that is common on desktop systems. In a typical office scneario, a laptop system that sleeps over a weekend , when woken up on a monday, issue thousands of function calls, that together with other, unrelated wake-up activity, make a machine top out its CPU for no good reason. The change addresses this by instead of relying on an interval to start a given task, on startup, start the task once after a timeout, and then start a fresh timer after the task is done. Other than the 0.10-era patch, this one does not account for a system waking up before the timeout. I’m happy to add that behaviour, if a reviewer insists on it. As a result, no matter how long the sleep period is, we only run the desired function _once_ after we wake up again. In the never- sleep scenario, the existing behaviour is retained. This might impact metrics that have a time component, but I think that’s a fair compromise, so I didn’t investigate that further. --- src/couch_stats/src/couch_stats_aggregator.erl | 27 +++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/src/couch_stats/src/couch_stats_aggregator.erl b/src/couch_stats/src/couch_stats_aggregator.erl index 17bd6fc33..8aef3d02d 100644 --- a/src/couch_stats/src/couch_stats_aggregator.erl +++ b/src/couch_stats/src/couch_stats_aggregator.erl @@ -55,18 +55,19 @@ start_link() -> init([]) -> {ok, Descs} = reload_metrics(), - Interval = config:get_integer("stats", "interval", ?DEFAULT_INTERVAL), - {ok, CT} = timer:send_interval(Interval * 1000, self(), collect), - {ok, RT} = timer:send_interval(?RELOAD_INTERVAL * 1000, self(), reload), + {ok, CT} = timer:send_after(get_interval(collect), self(), collect), + {ok, RT} = timer:send_after(get_interval(reload), self(), reload), {ok, #st{descriptions=Descs, stats=[], collect_timer=CT, reload_timer=RT}}. handle_call(fetch, _from, #st{stats = Stats}=State) -> {reply, {ok, Stats}, State}; handle_call(flush, _From, State) -> {reply, ok, collect(State)}; -handle_call(reload, _from, State) -> +handle_call(reload, _from, #st{reload_timer=OldRT} = State) -> + timer:cancel(OldRT), {ok, Descriptions} = reload_metrics(), - {reply, ok, State#st{descriptions=Descriptions}}; + {ok, RT} = update_timer(reload), + {reply, ok, State#st{descriptions=Descriptions, reload_timer=RT}}; handle_call(Msg, _From, State) -> {stop, {unknown_call, Msg}, error, State}. @@ -140,11 +141,23 @@ load_metrics_for_application(AppName) -> end end. -collect(State) -> +collect(#st{collect_timer=OldCT} = State) -> + timer:cancel(OldCT), Stats = lists:map( fun({Name, Props}) -> {Name, [{value, couch_stats:sample(Name)}|Props]} end, State#st.descriptions ), - State#st{stats=Stats}. + {ok, CT} = update_timer(collect), + State#st{stats=Stats, collect_timer=CT}. + +update_timer(collect) -> + Interval = get_interval(collect), + timer:send_after(Interval, self(), collect); +update_timer(reload) -> + Interval = get_interval(reload), + timer:send_after(Interval, self(), reload). + +get_interval(reload) -> 1000 * ?RELOAD_INTERVAL; +get_interval(collect) -> 1000 * config:get_integer("stats", "interval", ?DEFAULT_INTERVAL). -- cgit v1.2.1 From a5b30e7828f16eb524eb58b53b9d0f48175fefcf Mon Sep 17 00:00:00 2001 From: Joan Touzet Date: Tue, 17 Sep 2019 19:38:06 -0400 Subject: Bump to 3.0.0 --- rel/reltool.config | 2 +- version.mk | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/rel/reltool.config b/rel/reltool.config index da85f36bc..512e45c44 100644 --- a/rel/reltool.config +++ b/rel/reltool.config @@ -12,7 +12,7 @@ {sys, [ {lib_dirs, ["../src"]}, - {rel, "couchdb", "2.3.0", [ + {rel, "couchdb", "3.0.0", [ %% stdlib asn1, compiler, diff --git a/version.mk b/version.mk index a93c9b198..9ca09ab11 100644 --- a/version.mk +++ b/version.mk @@ -1,3 +1,3 @@ -vsn_major=2 -vsn_minor=3 +vsn_major=3 +vsn_minor=0 vsn_patch=0 -- cgit v1.2.1 From 7196a3c4b721158045af36e592ffdc7cc50abb3d Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Tue, 24 Sep 2019 16:58:42 -0400 Subject: Remove old multi-query path (#2173) Users should send requests with multiple queries to the new endpoint: /db/_design/{ddoc}/_view/{view}/queries Closes #2168 --- src/chttpd/src/chttpd_view.erl | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/src/chttpd/src/chttpd_view.erl b/src/chttpd/src/chttpd_view.erl index 26107d7c5..0d3d86d1a 100644 --- a/src/chttpd/src/chttpd_view.erl +++ b/src/chttpd/src/chttpd_view.erl @@ -88,22 +88,16 @@ handle_view_req(#httpd{method='POST', path_parts=[_, _, _, _, ViewName]}=Req, Db, DDoc) -> chttpd:validate_ctype(Req, "application/json"), Props = couch_httpd:json_body_obj(Req), - Keys = couch_mrview_util:get_view_keys(Props), - Queries = couch_mrview_util:get_view_queries(Props), - case {Queries, Keys} of - {Queries, undefined} when is_list(Queries) -> - [couch_stats:increment_counter([couchdb, httpd, view_reads]) || _I <- Queries], - multi_query_view(Req, Db, DDoc, ViewName, Queries); - {undefined, Keys} when is_list(Keys) -> + assert_no_queries_param(couch_mrview_util:get_view_queries(Props)), + case couch_mrview_util:get_view_keys(Props) of + Keys when is_list(Keys) -> couch_stats:increment_counter([couchdb, httpd, view_reads]), design_doc_view(Req, Db, DDoc, ViewName, Keys); - {undefined, undefined} -> + _ -> throw({ bad_request, - "POST body must contain `keys` or `queries` field" - }); - {_, _} -> - throw({bad_request, "`keys` and `queries` are mutually exclusive"}) + "POST body must contain an array called `keys`" + }) end; handle_view_req(Req, _Db, _DDoc) -> @@ -113,6 +107,14 @@ handle_temp_view_req(Req, _Db) -> Msg = <<"Temporary views are not supported in CouchDB">>, chttpd:send_error(Req, 410, gone, Msg). +% See https://github.com/apache/couchdb/issues/2168 +assert_no_queries_param(undefined) -> + ok; +assert_no_queries_param(_) -> + throw({ + bad_request, + "The `queries` parameter is no longer supported at this endpoint" + }). -ifdef(TEST). -- cgit v1.2.1