diff options
Diffstat (limited to 'src/couch/test/eunit/couchdb_views_tests.erl')
-rw-r--r-- | src/couch/test/eunit/couchdb_views_tests.erl | 410 |
1 files changed, 247 insertions, 163 deletions
diff --git a/src/couch/test/eunit/couchdb_views_tests.erl b/src/couch/test/eunit/couchdb_views_tests.erl index 06e2f03eb..f4d51bdd0 100644 --- a/src/couch/test/eunit/couchdb_views_tests.erl +++ b/src/couch/test/eunit/couchdb_views_tests.erl @@ -48,10 +48,18 @@ setup_legacy() -> DbDir = config:get("couchdb", "database_dir"), ViewDir = config:get("couchdb", "view_index_dir"), - OldViewFilePath = filename:join([ViewDir, ".test_design", "mrview", - OldViewName]), - NewViewFilePath = filename:join([ViewDir, ".test_design", "mrview", - NewViewName]), + OldViewFilePath = filename:join([ + ViewDir, + ".test_design", + "mrview", + OldViewName + ]), + NewViewFilePath = filename:join([ + ViewDir, + ".test_design", + "mrview", + NewViewName + ]), NewDbFilePath = filename:join([DbDir, DbFileName]), @@ -84,10 +92,12 @@ view_indexes_cleanup_test_() -> "View indexes cleanup", { setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, { foreach, - fun setup/0, fun teardown/1, + fun setup/0, + fun teardown/1, [ fun should_have_two_indexes_alive_before_deletion/1, fun should_cleanup_index_file_after_ddoc_deletion/1, @@ -102,10 +112,12 @@ view_group_db_leaks_test_() -> "View group db leaks", { setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, { foreach, - fun setup_with_docs/0, fun teardown/1, + fun setup_with_docs/0, + fun teardown/1, [ fun couchdb_1138/1, fun couchdb_1309/1 @@ -136,10 +148,12 @@ backup_restore_test_() -> "Upgrade and bugs related tests", { setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, { foreach, - fun setup_with_docs/0, fun teardown/1, + fun setup_with_docs/0, + fun teardown/1, [ fun should_not_remember_docs_in_index_after_backup_restore/1 ] @@ -147,16 +161,17 @@ backup_restore_test_() -> } }. - upgrade_test_() -> { "Upgrade tests", { setup, - fun test_util:start_couch/0, fun test_util:stop_couch/1, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, { foreach, - fun setup_legacy/0, fun teardown_legacy/1, + fun setup_legacy/0, + fun teardown_legacy/1, [ fun should_upgrade_legacy_view_files/1 ] @@ -184,7 +199,7 @@ should_not_remember_docs_in_index_after_backup_restore(DbName) -> ?assert(has_doc("doc2", Rows1)), ?assert(has_doc("doc3", Rows1)), ?assertNot(has_doc("doc666", Rows1)) - end). + end). should_upgrade_legacy_view_files({DbName, Files}) -> ?_test(begin @@ -206,21 +221,23 @@ should_upgrade_legacy_view_files({DbName, Files}) -> % add doc to trigger update DocUrl = db_url(DbName) ++ "/bar", {ok, _, _, _} = test_request:put( - DocUrl, [{"Content-Type", "application/json"}], <<"{\"a\":4}">>), + DocUrl, [{"Content-Type", "application/json"}], <<"{\"a\":4}">> + ), % query view for expected results Rows1 = query_view(DbName, "test", "test"), ?assertEqual(4, length(Rows1)), % ensure new header - timer:sleep(2000), % have to wait for awhile to upgrade the index + + % have to wait for awhile to upgrade the index + timer:sleep(2000), NewHeader = read_header(NewViewFilePath), ?assertMatch(#mrheader{}, NewHeader), NewViewStatus = hd(NewHeader#mrheader.view_states), ?assertEqual(3, tuple_size(NewViewStatus)) end). - should_have_two_indexes_alive_before_deletion({DbName, _}) -> view_cleanup(DbName), ?_assertEqual(2, count_index_files(DbName)). @@ -230,7 +247,7 @@ should_cleanup_index_file_after_ddoc_deletion({DbName, {FooRev, _}}) -> view_cleanup(DbName), ?_assertEqual(1, count_index_files(DbName)). -should_cleanup_all_index_files({DbName, {FooRev, BooRev}})-> +should_cleanup_all_index_files({DbName, {FooRev, BooRev}}) -> delete_design_doc(DbName, <<"_design/foo">>, FooRev), delete_design_doc(DbName, <<"_design/boo">>, BooRev), view_cleanup(DbName), @@ -239,7 +256,8 @@ should_cleanup_all_index_files({DbName, {FooRev, BooRev}})-> couchdb_1138(DbName) -> ?_test(begin {ok, IndexerPid} = couch_index_server:get_index( - couch_mrview_index, DbName, <<"_design/foo">>), + couch_mrview_index, DbName, <<"_design/foo">> + ), ?assert(is_pid(IndexerPid)), ?assert(is_process_alive(IndexerPid)), ?assertEqual(2, count_users(DbName)), @@ -277,7 +295,8 @@ couchdb_1138(DbName) -> couchdb_1309(DbName) -> ?_test(begin {ok, IndexerPid} = couch_index_server:get_index( - couch_mrview_index, DbName, <<"_design/foo">>), + couch_mrview_index, DbName, <<"_design/foo">> + ), ?assert(is_pid(IndexerPid)), ?assert(is_process_alive(IndexerPid)), ?assertEqual(2, count_users(DbName)), @@ -292,18 +311,21 @@ couchdb_1309(DbName) -> ?assert(is_process_alive(IndexerPid)), - update_design_doc(DbName, <<"_design/foo">>, <<"bar">>), + update_design_doc(DbName, <<"_design/foo">>, <<"bar">>), {ok, NewIndexerPid} = couch_index_server:get_index( - couch_mrview_index, DbName, <<"_design/foo">>), + couch_mrview_index, DbName, <<"_design/foo">> + ), ?assert(is_pid(NewIndexerPid)), ?assert(is_process_alive(NewIndexerPid)), ?assertNotEqual(IndexerPid, NewIndexerPid), - UserCnt = case count_users(DbName) of - N when N > 2 -> - timer:sleep(1000), - count_users(DbName); - N -> N - end, + UserCnt = + case count_users(DbName) of + N when N > 2 -> + timer:sleep(1000), + count_users(DbName); + N -> + N + end, ?assertEqual(2, UserCnt), Rows1 = query_view(DbName, "foo", "bar", ok), @@ -312,15 +334,20 @@ couchdb_1309(DbName) -> check_rows_value(Rows2, 1), ?assertEqual(4, length(Rows2)), - ok = stop_indexer( %% FIXME we need to grab monitor earlier - fun() -> ok end, - IndexerPid, ?LINE, - "old view group is not dead after ddoc update"), + %% FIXME we need to grab monitor earlier + ok = stop_indexer( + fun() -> ok end, + IndexerPid, + ?LINE, + "old view group is not dead after ddoc update" + ), ok = stop_indexer( - fun() -> couch_server:delete(DbName, [?ADMIN_USER]) end, - NewIndexerPid, ?LINE, - "new view group did not die after DB deletion") + fun() -> couch_server:delete(DbName, [?ADMIN_USER]) end, + NewIndexerPid, + ?LINE, + "new view group did not die after DB deletion" + ) end). couchdb_1283() -> @@ -328,41 +355,54 @@ couchdb_1283() -> ok = config:set("couchdb", "max_dbs_open", "3", false), {ok, MDb1} = couch_db:create(?tempdb(), [?ADMIN_CTX]), - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, <<"_design/foo">>}, - {<<"language">>, <<"javascript">>}, - {<<"views">>, {[ - {<<"foo">>, {[ - {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} - ]}}, - {<<"foo2">>, {[ - {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} - ]}}, - {<<"foo3">>, {[ - {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} - ]}}, - {<<"foo4">>, {[ - {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} - ]}}, - {<<"foo5">>, {[ - {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} - ]}} - ]}} - ]}), + DDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, <<"_design/foo">>}, + {<<"language">>, <<"javascript">>}, + {<<"views">>, + {[ + {<<"foo">>, + {[ + {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} + ]}}, + {<<"foo2">>, + {[ + {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} + ]}}, + {<<"foo3">>, + {[ + {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} + ]}}, + {<<"foo4">>, + {[ + {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} + ]}}, + {<<"foo5">>, + {[ + {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>} + ]}} + ]}} + ]} + ), {ok, _} = couch_db:update_doc(MDb1, DDoc, []), ok = populate_db(MDb1, 100, 100), query_view(couch_db:name(MDb1), "foo", "foo"), ok = couch_db:close(MDb1), {ok, Pid} = couch_index_server:get_index( - couch_mrview_index, couch_db:name(MDb1), <<"_design/foo">>), + couch_mrview_index, couch_db:name(MDb1), <<"_design/foo">> + ), % Start and pause compacton WaitRef = erlang:make_ref(), meck:expect(couch_mrview_index, compact, fun(Db, State, Opts) -> - receive {WaitRef, From, init} -> ok end, + receive + {WaitRef, From, init} -> ok + end, From ! {WaitRef, inited}, - receive {WaitRef, go} -> ok end, + receive + {WaitRef, go} -> ok + end, meck:passthrough([Db, State, Opts]) end), @@ -373,7 +413,9 @@ couchdb_1283() -> % Make sure that our compactor is waiting for us % before we continue our assertions CPid ! {WaitRef, self(), init}, - receive {WaitRef, inited} -> ok end, + receive + {WaitRef, inited} -> ok + end, % Make sure that a compaction process takes a monitor % on the database's main_pid @@ -382,64 +424,74 @@ couchdb_1283() -> % Finish compaction to and make sure the monitor % disappears CPid ! {WaitRef, go}, - wait_for_process_shutdown(CRef, normal, - {reason, "Failure compacting view group"}), + wait_for_process_shutdown( + CRef, + normal, + {reason, "Failure compacting view group"} + ), % Make sure that the monitor was removed ?assertEqual(false, lists:member(CPid, couch_db:monitored_by(MDb1))) end). wait_for_process_shutdown(Pid, ExpectedReason, Error) -> - receive - {'DOWN', Pid, process, _, Reason} -> - ?assertEqual(ExpectedReason, Reason) - after ?TIMEOUT -> - erlang:error( - {assertion_failed, - [{module, ?MODULE}, {line, ?LINE}, Error]}) - end. - + receive + {'DOWN', Pid, process, _, Reason} -> + ?assertEqual(ExpectedReason, Reason) + after ?TIMEOUT -> + erlang:error( + {assertion_failed, [{module, ?MODULE}, {line, ?LINE}, Error]} + ) + end. create_doc(DbName, DocId) when is_list(DocId) -> create_doc(DbName, ?l2b(DocId)); create_doc(DbName, DocId) when is_binary(DocId) -> {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - Doc666 = couch_doc:from_json_obj({[ - {<<"_id">>, DocId}, - {<<"value">>, 999} - ]}), + Doc666 = couch_doc:from_json_obj( + {[ + {<<"_id">>, DocId}, + {<<"value">>, 999} + ]} + ), {ok, _} = couch_db:update_docs(Db, [Doc666]), couch_db:close(Db). create_docs(DbName) -> {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - Doc1 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc1">>}, - {<<"value">>, 1} - - ]}), - Doc2 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc2">>}, - {<<"value">>, 2} - - ]}), - Doc3 = couch_doc:from_json_obj({[ - {<<"_id">>, <<"doc3">>}, - {<<"value">>, 3} - - ]}), + Doc1 = couch_doc:from_json_obj( + {[ + {<<"_id">>, <<"doc1">>}, + {<<"value">>, 1} + ]} + ), + Doc2 = couch_doc:from_json_obj( + {[ + {<<"_id">>, <<"doc2">>}, + {<<"value">>, 2} + ]} + ), + Doc3 = couch_doc:from_json_obj( + {[ + {<<"_id">>, <<"doc3">>}, + {<<"value">>, 3} + ]} + ), {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]), couch_db:close(Db). populate_db(Db, BatchSize, N) when N > 0 -> Docs = lists:map( fun(_) -> - couch_doc:from_json_obj({[ - {<<"_id">>, couch_uuids:new()}, - {<<"value">>, base64:encode(crypto:strong_rand_bytes(1000))} - ]}) + couch_doc:from_json_obj( + {[ + {<<"_id">>, couch_uuids:new()}, + {<<"value">>, base64:encode(crypto:strong_rand_bytes(1000))} + ]} + ) end, - lists:seq(1, BatchSize)), + lists:seq(1, BatchSize) + ), {ok, _} = couch_db:update_docs(Db, Docs, []), populate_db(Db, BatchSize, N - length(Docs)); populate_db(_Db, _, _) -> @@ -447,15 +499,19 @@ populate_db(_Db, _, _) -> create_design_doc(DbName, DDName, ViewName) -> {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDName}, - {<<"language">>, <<"javascript">>}, - {<<"views">>, {[ - {ViewName, {[ - {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>} - ]}} - ]}} - ]}), + DDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, DDName}, + {<<"language">>, <<"javascript">>}, + {<<"views">>, + {[ + {ViewName, + {[ + {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>} + ]}} + ]}} + ]} + ), {ok, Rev} = couch_db:update_doc(Db, DDoc, []), couch_db:close(Db), Rev. @@ -465,27 +521,33 @@ update_design_doc(DbName, DDName, ViewName) -> {ok, Doc} = couch_db:open_doc(Db, DDName, [?ADMIN_CTX]), {Props} = couch_doc:to_json_obj(Doc, []), Rev = couch_util:get_value(<<"_rev">>, Props), - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDName}, - {<<"_rev">>, Rev}, - {<<"language">>, <<"javascript">>}, - {<<"views">>, {[ - {ViewName, {[ - {<<"map">>, <<"function(doc) { emit(doc.value, 1); }">>} - ]}} - ]}} - ]}), + DDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, DDName}, + {<<"_rev">>, Rev}, + {<<"language">>, <<"javascript">>}, + {<<"views">>, + {[ + {ViewName, + {[ + {<<"map">>, <<"function(doc) { emit(doc.value, 1); }">>} + ]}} + ]}} + ]} + ), {ok, NewRev} = couch_db:update_doc(Db, DDoc, [?ADMIN_CTX]), couch_db:close(Db), NewRev. delete_design_doc(DbName, DDName, Rev) -> {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDName}, - {<<"_rev">>, couch_doc:rev_to_str(Rev)}, - {<<"_deleted">>, true} - ]}), + DDoc = couch_doc:from_json_obj( + {[ + {<<"_id">>, DDName}, + {<<"_rev">>, couch_doc:rev_to_str(Rev)}, + {<<"_deleted">>, true} + ]} + ), {ok, _} = couch_db:update_doc(Db, DDoc, [Rev]), couch_db:close(Db). @@ -499,11 +561,12 @@ query_view(DbName, DDoc, View) -> query_view(DbName, DDoc, View, Stale) -> {ok, Code, _Headers, Body} = test_request:get( - db_url(DbName) ++ "/_design/" ++ DDoc ++ "/_view/" ++ View - ++ case Stale of - false -> []; - _ -> "?stale=" ++ atom_to_list(Stale) - end), + db_url(DbName) ++ "/_design/" ++ DDoc ++ "/_view/" ++ View ++ + case Stale of + false -> []; + _ -> "?stale=" ++ atom_to_list(Stale) + end + ), ?assertEqual(200, Code), {Props} = jiffy:decode(Body), couch_util:get_value(<<"rows">>, Props, []). @@ -512,7 +575,9 @@ check_rows_value(Rows, Value) -> lists:foreach( fun({Row}) -> ?assertEqual(Value, couch_util:get_value(<<"value">>, Row)) - end, Rows). + end, + Rows + ). view_cleanup(DbName) -> {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), @@ -531,8 +596,12 @@ count_users(DbName) -> count_index_files(DbName) -> % call server to fetch the index files RootDir = config:get("couchdb", "view_index_dir"), - length(filelib:wildcard(RootDir ++ "/." ++ - binary_to_list(DbName) ++ "_design"++"/mrview/*")). + length( + filelib:wildcard( + RootDir ++ "/." ++ + binary_to_list(DbName) ++ "_design" ++ "/mrview/*" + ) + ). has_doc(DocId1, Rows) -> DocId = iolist_to_binary(DocId1), @@ -542,10 +611,11 @@ backup_db_file(DbName) -> {ok, Db} = couch_db:open_int(DbName, []), try SrcPath = couch_db:get_filepath(Db), - Src = if - is_list(SrcPath) -> SrcPath; - true -> binary_to_list(SrcPath) - end, + Src = + if + is_list(SrcPath) -> SrcPath; + true -> binary_to_list(SrcPath) + end, ok = copy_tree(Src, Src ++ ".backup") after couch_db:close(Db) @@ -559,17 +629,21 @@ restore_backup_db_file(DbName) -> exit(DbPid, shutdown), ok = copy_tree(Src ++ ".backup", Src), - test_util:wait(fun() -> - case couch_server:open(DbName, [{timeout, ?TIMEOUT}]) of - {ok, WaitDb} -> - case couch_db:get_pid(WaitDb) == DbPid of - true -> wait; - false -> ok - end; - Else -> - Else - end - end, ?TIMEOUT, ?DELAY). + test_util:wait( + fun() -> + case couch_server:open(DbName, [{timeout, ?TIMEOUT}]) of + {ok, WaitDb} -> + case couch_db:get_pid(WaitDb) == DbPid of + true -> wait; + false -> ok + end; + Else -> + Else + end + end, + ?TIMEOUT, + ?DELAY + ). compact_db(DbName) -> {ok, Db} = couch_db:open_int(DbName, []), @@ -578,20 +652,23 @@ compact_db(DbName) -> wait_db_compact_done(DbName, ?WAIT_DELAY_COUNT). wait_db_compact_done(_DbName, 0) -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "DB compaction failed to finish"}]}); + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, "DB compaction failed to finish"} + ]} + ); wait_db_compact_done(DbName, N) -> {ok, Db} = couch_db:open_int(DbName, []), ok = couch_db:close(Db), CompactorPid = couch_db:get_compactor_pid(Db), case is_pid(CompactorPid) of - false -> - ok; - true -> - ok = timer:sleep(?DELAY), - wait_db_compact_done(DbName, N - 1) + false -> + ok; + true -> + ok = timer:sleep(?DELAY), + wait_db_compact_done(DbName, N - 1) end. compact_view_group(DbName, DDocId) when is_list(DDocId) -> @@ -601,13 +678,17 @@ compact_view_group(DbName, DDocId) when is_binary(DDocId) -> wait_view_compact_done(DbName, DDocId, 10). wait_view_compact_done(_DbName, _DDocId, 0) -> - erlang:error({assertion_failed, - [{module, ?MODULE}, - {line, ?LINE}, - {reason, "DB compaction failed to finish"}]}); + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, ?LINE}, + {reason, "DB compaction failed to finish"} + ]} + ); wait_view_compact_done(DbName, DDocId, N) -> {ok, Code, _Headers, Body} = test_request:get( - db_url(DbName) ++ "/" ++ ?b2l(DDocId) ++ "/_info"), + db_url(DbName) ++ "/" ++ ?b2l(DDocId) ++ "/_info" + ), ?assertEqual(200, Code), {Info} = jiffy:decode(Body), {IndexInfo} = couch_util:get_value(<<"view_index">>, Info), @@ -628,13 +709,16 @@ read_header(File) -> stop_indexer(StopFun, Pid, Line, Reason) -> case test_util:stop_sync(Pid, StopFun) of - timeout -> - erlang:error( - {assertion_failed, - [{module, ?MODULE}, {line, Line}, - {reason, Reason}]}); - ok -> - ok + timeout -> + erlang:error( + {assertion_failed, [ + {module, ?MODULE}, + {line, Line}, + {reason, Reason} + ]} + ); + ok -> + ok end. wait_indexer(IndexerPid) -> |