diff options
author | Joan Touzet <wohali@users.noreply.github.com> | 2018-11-07 21:40:44 -0500 |
---|---|---|
committer | GitHub <noreply@github.com> | 2018-11-07 21:40:44 -0500 |
commit | cd2b130674856caa6ce725ccfca539202fae8cc2 (patch) | |
tree | 77db6b998be4ee6c89aa1c906f90d4a21e8f16c7 | |
parent | 2f0ad1b0ce07ad27becdfada74b03b2b14a686a3 (diff) | |
parent | c157c968b349112c5b3e69a3ded7aca7996e761d (diff) | |
download | couchdb-cd2b130674856caa6ce725ccfca539202fae8cc2.tar.gz |
Merge branch 'master' into elixir-suiteelixir-suite
61 files changed, 907 insertions, 301 deletions
diff --git a/.travis.yml b/.travis.yml index acb0b5102..01b2862a3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,6 +3,7 @@ sudo: false os: linux otp_release: + - 21.1 - 20.3 - 19.3 - 18.3 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e345ea487..cd3a4437c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -279,7 +279,7 @@ without needing any other steps like setting git upstreams! :sparkles: ## Thanks -Special thanks to [Hoodie][https://github.com/hoodiehq/hoodie] for the great +Special thanks to [Hoodie](https://github.com/hoodiehq/hoodie) for the great CONTRIBUTING.md template. [1]: http://mail-archives.apache.org/mod_mbox/couchdb-user/ diff --git a/build-aux/logfile-uploader.py b/build-aux/logfile-uploader.py index c84a96a97..a1ff7e4a7 100755 --- a/build-aux/logfile-uploader.py +++ b/build-aux/logfile-uploader.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python2 # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of @@ -209,7 +209,7 @@ EOF install_local_rebar() { if [ ! -x "${rootdir}/bin/rebar" ]; then if [ ! -d "${rootdir}/src/rebar" ]; then - git clone --depth 1 --branch 2.6.0-couchdb https://github.com/apache/couchdb-rebar.git ${rootdir}/src/rebar + git clone --depth 1 https://github.com/apache/couchdb-rebar.git ${rootdir}/src/rebar fi make -C ${rootdir}/src/rebar mv ${rootdir}/src/rebar/rebar ${rootdir}/bin/rebar diff --git a/rebar.config.script b/rebar.config.script index 22dde7cde..4d1649347 100644 --- a/rebar.config.script +++ b/rebar.config.script @@ -52,7 +52,7 @@ DepDescs = [ {b64url, "b64url", {tag, "1.0.1"}}, {ets_lru, "ets-lru", {tag, "1.0.0"}}, {khash, "khash", {tag, "1.0.1"}}, -{snappy, "snappy", {tag, "CouchDB-1.0.1"}}, +{snappy, "snappy", {tag, "CouchDB-1.0.2"}}, {ioq, "ioq", {tag, "1.0.1"}}, %% Non-Erlang deps @@ -65,7 +65,7 @@ DepDescs = [ {hyper, "hyper", {tag, "CouchDB-2.2.0-4"}}, {ibrowse, "ibrowse", {tag, "CouchDB-4.0.1"}}, {jiffy, "jiffy", {tag, "CouchDB-0.14.11-2"}}, -{mochiweb, "mochiweb", {tag, "v2.17.0"}}, +{mochiweb, "mochiweb", {tag, "CouchDB-v2.18.0-1"}}, {meck, "meck", {tag, "0.8.8"}} ], @@ -91,7 +91,7 @@ ErlOpts = case os:getenv("ERL_OPTS") of end, AddConfig = [ - {require_otp_vsn, "17|18|19|20"}, + {require_otp_vsn, "17|18|19|20|21"}, {deps_dir, "src"}, {deps, lists:map(MakeDep, DepDescs)}, {sub_dirs, SubDirs}, diff --git a/rel/overlay/bin/couchup b/rel/overlay/bin/couchup index 2d0105107..75b7d7e94 100755 --- a/rel/overlay/bin/couchup +++ b/rel/overlay/bin/couchup @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python2 # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at @@ -172,6 +172,25 @@ def _put_filter(args, db=None): print(exc.response.text) exit(1) +def _do_security(args, db=None): + """Copies the _security object from source to target DB.""" + try: + req = requests.get( + 'http://127.0.0.1:{}/{}/_security'.format( + args['local_port'], db), + auth=args['creds']) + req.raise_for_status() + security_doc = _tojson(req) + req = requests.put( + 'http://127.0.0.1:{}/{}/_security'.format( + args['clustered_port'], db), + data=json.dumps(security_doc), + auth=args['creds']) + req.raise_for_status() + except requests.exceptions.HTTPError as exc: + print(exc.response.text) + exit(1) + def _replicate(args): args = _args(args) if args['all_dbs']: @@ -229,6 +248,11 @@ def _replicate(args): if req.get('no_changes'): if not args['quiet']: print("No changes, replication is caught up.") + + if not args['quiet']: + print('Copying _security object for ' + db + '...') + _do_security(args, db) + if not args['quiet']: print("Replication complete.") @@ -474,7 +498,11 @@ def main(argv): parser_delete.set_defaults(func=_delete) args = parser.parse_args(argv[1:]) - args.func(args) + try: + args.func(args) + except AttributeError: + parser.print_help() + sys.exit(0) if __name__ == '__main__': main(sys.argv) diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini index dc2e51cc0..ba2a498eb 100644 --- a/rel/overlay/etc/default.ini +++ b/rel/overlay/etc/default.ini @@ -187,7 +187,7 @@ port = 6984 ; [rexi] ; buffer_count = 2000 -; server_per_node = false +; server_per_node = true ; [global_changes] ; max_event_delay = 25 diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl index a5628396b..b606ad414 100644 --- a/src/chttpd/src/chttpd.erl +++ b/src/chttpd/src/chttpd.erl @@ -11,6 +11,9 @@ % the License. -module(chttpd). + +-compile(tuple_calls). + -include_lib("couch/include/couch_db.hrl"). -include_lib("chttpd/include/chttpd.hrl"). diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl index 49d7b5849..d46b5bbf2 100644 --- a/src/chttpd/src/chttpd_db.erl +++ b/src/chttpd/src/chttpd_db.erl @@ -11,6 +11,9 @@ % the License. -module(chttpd_db). + +-compile(tuple_calls). + -include_lib("couch/include/couch_db.hrl"). -include_lib("couch_mrview/include/couch_mrview.hrl"). @@ -417,19 +420,16 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req, _ -> Options = [{user_ctx,Ctx}, {w,W}] end, + Docs = lists:map(fun(JsonObj) -> + Doc = couch_doc:from_json_obj_validate(JsonObj), + validate_attachment_names(Doc), + case Doc#doc.id of + <<>> -> Doc#doc{id = couch_uuids:new()}; + _ -> Doc + end + end, DocsArray), case couch_util:get_value(<<"new_edits">>, JsonProps, true) of true -> - Docs = lists:map( - fun(JsonObj) -> - Doc = couch_doc:from_json_obj_validate(JsonObj), - validate_attachment_names(Doc), - Id = case Doc#doc.id of - <<>> -> couch_uuids:new(); - Id0 -> Id0 - end, - Doc#doc{id=Id} - end, - DocsArray), Options2 = case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of true -> [all_or_nothing|Options]; @@ -452,8 +452,6 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req, send_json(Req, 417, ErrorsJson) end; false -> - Docs = [couch_doc:from_json_obj_validate(JsonObj) || JsonObj <- DocsArray], - [validate_attachment_names(D) || D <- Docs], case fabric:update_docs(Db, Docs, [replicated_changes|Options]) of {ok, Errors} -> ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors), @@ -479,8 +477,9 @@ db_req(#httpd{method='POST', path_parts=[_, <<"_bulk_get">>]}=Req, Db) -> throw({bad_request, <<"Missing JSON list of 'docs'.">>}); Docs -> #doc_query_args{ - options = Options + options = Options0 } = bulk_get_parse_doc_query(Req), + Options = [{user_ctx, Req#httpd.user_ctx} | Options0], {ok, Resp} = start_json_response(Req, 200), send_chunk(Resp, <<"{\"results\": [">>), @@ -519,8 +518,12 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) -> false -> throw({bad_request, "Exceeded maximum number of revisions."}); true -> ok end, - {ok, Results} = fabric:purge_docs(Db, IdsRevs2, Options), - {Code, Json} = purge_results_to_json(IdsRevs2, Results), + couch_stats:increment_counter([couchdb, document_purges, total], length(IdsRevs2)), + Results2 = case fabric:purge_docs(Db, IdsRevs2, Options) of + {ok, Results} -> Results; + {accepted, Results} -> Results + end, + {Code, Json} = purge_results_to_json(IdsRevs2, Results2), send_json(Req, Code, {[{<<"purge_seq">>, null}, {<<"purged">>, {Json}}]}); db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) -> @@ -1023,14 +1026,17 @@ purge_results_to_json([], []) -> {201, []}; purge_results_to_json([{DocId, _Revs} | RIn], [{ok, PRevs} | ROut]) -> {Code, Results} = purge_results_to_json(RIn, ROut), + couch_stats:increment_counter([couchdb, document_purges, success]), {Code, [{DocId, couch_doc:revs_to_strs(PRevs)} | Results]}; purge_results_to_json([{DocId, _Revs} | RIn], [{accepted, PRevs} | ROut]) -> {Code, Results} = purge_results_to_json(RIn, ROut), + couch_stats:increment_counter([couchdb, document_purges, success]), NewResults = [{DocId, couch_doc:revs_to_strs(PRevs)} | Results], {erlang:max(Code, 202), NewResults}; purge_results_to_json([{DocId, _Revs} | RIn], [Error | ROut]) -> {Code, Results} = purge_results_to_json(RIn, ROut), {NewCode, ErrorStr, Reason} = chttpd:error_info(Error), + couch_stats:increment_counter([couchdb, document_purges, failure]), NewResults = [{DocId, {[{error, ErrorStr}, {reason, Reason}]}} | Results], {erlang:max(NewCode, Code), NewResults}. diff --git a/src/chttpd/src/chttpd_external.erl b/src/chttpd/src/chttpd_external.erl index 64664b98e..fa35c6ba2 100644 --- a/src/chttpd/src/chttpd_external.erl +++ b/src/chttpd/src/chttpd_external.erl @@ -12,6 +12,8 @@ -module(chttpd_external). +-compile(tuple_calls). + -export([handle_external_req/2, handle_external_req/3]). -export([send_external_response/2]). -export([json_req_obj_fields/0, json_req_obj/2, json_req_obj/3, json_req_obj/4]). diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl index 89a86aecc..1a6b3cb50 100644 --- a/src/chttpd/src/chttpd_misc.erl +++ b/src/chttpd/src/chttpd_misc.erl @@ -50,6 +50,7 @@ handle_welcome_req(#httpd{method='GET'}=Req, WelcomeMessage) -> {couchdb, WelcomeMessage}, {version, list_to_binary(couch_server:get_version())}, {git_sha, list_to_binary(couch_server:get_git_sha())}, + {uuid, couch_server:get_uuid()}, {features, config:features()} ] ++ case config:get("vendor") of [] -> @@ -178,6 +179,7 @@ handle_dbs_info_req(Req) -> send_method_not_allowed(Req, "POST"). handle_task_status_req(#httpd{method='GET'}=Req) -> + ok = chttpd:verify_is_server_admin(Req), {Replies, _BadNodes} = gen_server:multi_call(couch_task_status, all), Response = lists:flatmap(fun({Node, Tasks}) -> [{[{node,Node} | Task]} || Task <- Tasks] @@ -240,7 +242,10 @@ cancel_replication(PostBody, Ctx) -> [] -> {error, badrpc}; Else -> - hd(Else) + % Unclear what to do here -- pick the first error? + % Except try ignoring any {error, not_found} responses + % because we'll always get two of those + hd(Else -- [{error, not_found}]) end end. diff --git a/src/chttpd/src/chttpd_prefer_header.erl b/src/chttpd/src/chttpd_prefer_header.erl index f550e80e5..1ad1443ea 100644 --- a/src/chttpd/src/chttpd_prefer_header.erl +++ b/src/chttpd/src/chttpd_prefer_header.erl @@ -12,6 +12,7 @@ -module(chttpd_prefer_header). +-compile(tuple_calls). -export([ maybe_return_minimal/2 diff --git a/src/chttpd/src/chttpd_rewrite.erl b/src/chttpd/src/chttpd_rewrite.erl index 039390eed..019651374 100644 --- a/src/chttpd/src/chttpd_rewrite.erl +++ b/src/chttpd/src/chttpd_rewrite.erl @@ -16,6 +16,9 @@ %% @doc Module for URL rewriting by pattern matching. -module(chttpd_rewrite). + +-compile(tuple_calls). + -export([handle_rewrite_req/3]). -include_lib("couch/include/couch_db.hrl"). @@ -64,7 +67,14 @@ do_rewrite(#httpd{mochi_req=MochiReq}=Req, {Props}=Rewrite) when is_list(Props) Path, MochiReq:get(version), Headers), - NewMochiReq:cleanup(), + Body = case couch_util:get_value(<<"body">>, Props) of + undefined -> erlang:get(mochiweb_request_body); + B -> B + end, + case Body of + undefined -> NewMochiReq:cleanup(); + _ -> erlang:put(mochiweb_request_body, Body) + end, couch_log:debug("rewrite to ~p", [Path]), chttpd:handle_request_int(NewMochiReq); Code -> diff --git a/src/chttpd/src/chttpd_sup.erl b/src/chttpd/src/chttpd_sup.erl index fe84b67eb..369248ea6 100644 --- a/src/chttpd/src/chttpd_sup.erl +++ b/src/chttpd/src/chttpd_sup.erl @@ -80,21 +80,18 @@ maybe_replace(Key, Value, Settings) -> end. lru_opts() -> - case config:get("chttpd_auth_cache", "max_objects") of - MxObjs when is_integer(MxObjs), MxObjs > 0 -> - [{max_objects, MxObjs}]; - _ -> - [] - end ++ - case config:get("chttpd_auth_cache", "max_size", "104857600") of - MxSize when is_integer(MxSize), MxSize > 0 -> - [{max_size, MxSize}]; - _ -> - [] - end ++ - case config:get("chttpd_auth_cache", "max_lifetime", "600000") of - MxLT when is_integer(MxLT), MxLT > 0 -> - [{max_lifetime, MxLT}]; - _ -> - [] - end. + lists:foldl(fun append_if_set/2, [], [ + {max_objects, config:get_integer("chttpd_auth_cache", "max_objects", 0)}, + {max_size, config:get_integer("chttpd_auth_cache", "max_size", 104857600)}, + {max_lifetime, config:get_integer("chttpd_auth_cache", "max_lifetime", 600000)} + ]). + +append_if_set({Key, Value}, Opts) when Value > 0 -> + [{Key, Value} | Opts]; +append_if_set({Key, 0}, Opts) -> + Opts; +append_if_set({Key, Value}, Opts) -> + couch_log:error( + "The value for `~s` should be string convertable " + "to integer which is >= 0 (got `~p`)", [Key, Value]), + Opts. diff --git a/src/chttpd/test/chttpd_db_bulk_get_test.erl b/src/chttpd/test/chttpd_db_bulk_get_test.erl index f8921311b..908d1f022 100644 --- a/src/chttpd/test/chttpd_db_bulk_get_test.erl +++ b/src/chttpd/test/chttpd_db_bulk_get_test.erl @@ -214,7 +214,8 @@ should_include_attachments_when_atts_since_specified(_) -> ?_assert(meck:called(fabric, open_revs, [nil, DocId, [{1, <<"revorev">>}], - [{atts_since, [{1, <<"abc">>}]}, attachments]])). + [{atts_since, [{1, <<"abc">>}]}, attachments, + {user_ctx, undefined}]])). %% helpers diff --git a/src/chttpd/test/chttpd_prefer_header_test.erl b/src/chttpd/test/chttpd_prefer_header_test.erl index a8a5b3d26..0f43ba437 100644 --- a/src/chttpd/test/chttpd_prefer_header_test.erl +++ b/src/chttpd/test/chttpd_prefer_header_test.erl @@ -11,6 +11,9 @@ % the License. -module(chttpd_prefer_header_test). + +-compile(tuple_calls). + -include_lib("couch/include/couch_db.hrl"). -include_lib("eunit/include/eunit.hrl"). diff --git a/src/chttpd/test/chttpd_purge_tests.erl b/src/chttpd/test/chttpd_purge_tests.erl index 686552590..af1bd0b1c 100644 --- a/src/chttpd/test/chttpd_purge_tests.erl +++ b/src/chttpd/test/chttpd_purge_tests.erl @@ -70,6 +70,7 @@ purge_test_() -> [ fun test_empty_purge_request/1, fun test_ok_purge_request/1, + fun test_accepted_purge_request/1, fun test_partial_purge_request/1, fun test_mixed_purge_request/1, fun test_overmany_ids_or_revs_purge_request/1, @@ -135,6 +136,38 @@ test_ok_purge_request(Url) -> end). +test_accepted_purge_request(Url) -> + ?_test(begin + {ok, _, _, Body} = create_doc(Url, "doc1"), + {Json} = ?JSON_DECODE(Body), + Rev1 = couch_util:get_value(<<"rev">>, Json, undefined), + IdsRevsEJson = {[ + {<<"doc1">>, [Rev1]} + ]}, + IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)), + meck:new(fabric, [passthrough]), + meck:expect(fabric, purge_docs, + fun(_, _, _) -> {accepted,[{accepted,[{1, + <<57,27,64,134,152,18,73,243,40,1,141,214,135,104,79,188>>}]}]} + end + ), + {ok, Status, _, ResultBody} = test_request:post(Url ++ "/_purge/", + [?CONTENT_JSON, ?AUTH], IdsRevs), + ResultJson = ?JSON_DECODE(ResultBody), + meck:unload(fabric), + ?assert(Status =:= 202), + ?assertEqual( + {[ + {<<"purge_seq">>, null}, + {<<"purged">>, {[ + {<<"doc1">>, [Rev1]} + ]}} + ]}, + ResultJson + ) + end). + + test_partial_purge_request(Url) -> ?_test(begin {ok, _, _, Body} = create_doc(Url, "doc1"), diff --git a/src/chttpd/test/chttpd_welcome_test.erl b/src/chttpd/test/chttpd_welcome_test.erl index b737abd7a..e427f4dff 100644 --- a/src/chttpd/test/chttpd_welcome_test.erl +++ b/src/chttpd/test/chttpd_welcome_test.erl @@ -45,12 +45,30 @@ welcome_test_() -> fun setup/0, fun teardown/1, [ fun should_have_version/1, - fun should_have_features/1 + fun should_have_features/1, + fun should_have_uuid/1 ] } } }. +should_have_uuid(Url) -> + ?_test(begin + {ok, Status, _, Body} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]), + ?assertEqual(200, Status), + {Json} = ?JSON_DECODE(Body), + CouchDB = couch_util:get_value(<<"couchdb">>, Json, undefined), + Uuid = couch_util:get_value(<<"uuid">>, Json, undefined), + Features = couch_util:get_value(<<"features">>, Json, undefined), + Sha = couch_util:get_value(<<"git_sha">>, Json, undefined), + ?assertNotEqual(Sha, undefined), + ?assertEqual(<<"Welcome">>, CouchDB), + RealUuid = couch_server:get_uuid(), + + ?assertEqual(RealUuid, Uuid), + ?assert(is_list(Features)) + end). + should_have_version(Url) -> ?_test(begin diff --git a/src/couch/priv/couch_js/help.h b/src/couch/priv/couch_js/help.h index c6d76b257..678651fd3 100644 --- a/src/couch/priv/couch_js/help.h +++ b/src/couch/priv/couch_js/help.h @@ -52,6 +52,7 @@ static const char USAGE_TEMPLATE[] = " should not be enabled for production systems)\n" " -S SIZE specify that the runtime should allow at\n" " most SIZE bytes of memory to be allocated\n" + " default is 64 MiB\n" " -u FILE path to a .uri file containing the address\n" " (or addresses) of one or more servers\n" " --eval Enable runtime code evaluation (dangerous!)\n" diff --git a/src/couch/priv/stats_descriptions.cfg b/src/couch/priv/stats_descriptions.cfg index bceb0cea8..e5ac9d722 100644 --- a/src/couch/priv/stats_descriptions.cfg +++ b/src/couch/priv/stats_descriptions.cfg @@ -50,9 +50,17 @@ {type, counter}, {desc, <<"number of document write operations">>} ]}. -{[couchdb, document_purges], [ +{[couchdb, document_purges, total], [ {type, counter}, - {desc, <<"number of document purge operations">>} + {desc, <<"number of total document purge operations">>} +]}. +{[couchdb, document_purges, success], [ + {type, counter}, + {desc, <<"number of successful document purge operations">>} +]}. +{[couchdb, document_purges, failure], [ + {type, counter}, + {desc, <<"number of failed document purge operations">>} ]}. {[couchdb, local_document_writes], [ {type, counter}, diff --git a/src/couch/src/couch_bt_engine.erl b/src/couch/src/couch_bt_engine.erl index 6d858ed49..f856bde8f 100644 --- a/src/couch/src/couch_bt_engine.erl +++ b/src/couch/src/couch_bt_engine.erl @@ -196,7 +196,7 @@ decref(St) -> monitored_by(St) -> case erlang:process_info(St#st.fd, monitored_by) of {monitored_by, Pids} -> - Pids; + lists:filter(fun is_pid/1, Pids); _ -> [] end. diff --git a/src/couch/src/couch_bt_engine_header.erl b/src/couch/src/couch_bt_engine_header.erl index 467bb2ff8..9c8e7adb3 100644 --- a/src/couch/src/couch_bt_engine_header.erl +++ b/src/couch/src/couch_bt_engine_header.erl @@ -234,8 +234,8 @@ upgrade_disk_version(#db_header{}=Header) -> 2 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR}); 3 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR}); 4 -> Header#db_header{security_ptr = nil}; % [0.10 - 0.11) - 5 -> Header; % pre 1.2 - 6 -> Header; % pre clustered purge + 5 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre 1.2 + 6 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre clustered purge ?LATEST_DISK_VERSION -> Header; _ -> Reason = "Incorrect disk header version", @@ -368,12 +368,12 @@ upgrade_v3_test() -> -endif. -upgrade_v5_test() -> +upgrade_v5_to_v7_test() -> Vsn5Header = mk_header(5), NewHeader = upgrade_disk_version(upgrade_tuple(Vsn5Header)), ?assert(is_record(NewHeader, db_header)), - ?assertEqual(5, disk_version(NewHeader)), + ?assertEqual(7, disk_version(NewHeader)), % Security ptr isn't changed for v5 headers ?assertEqual(bang, security_ptr(NewHeader)). diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl index 55664e964..9d6a5dc45 100644 --- a/src/couch/src/couch_db.erl +++ b/src/couch/src/couch_db.erl @@ -56,6 +56,7 @@ is_db/1, is_system_db/1, is_clustered/1, + is_system_db_name/1, set_revs_limit/2, set_purge_infos_limit/2, @@ -424,20 +425,22 @@ get_minimum_purge_seq(#db{} = Db) -> case DocId of <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> -> ClientSeq = couch_util:get_value(<<"purge_seq">>, Props), + DbName = couch_db:name(Db), + % If there's a broken doc we have to keep every + % purge info until the doc is fixed or removed. + Fmt = "Invalid purge doc '~s' on ~p with purge_seq '~w'", case ClientSeq of CS when is_integer(CS), CS >= PurgeSeq - PurgeInfosLimit -> {ok, SeqAcc}; CS when is_integer(CS) -> - case purge_client_exists(Db, DocId, Props) of - true -> {ok, erlang:min(CS, SeqAcc)}; - false -> {ok, SeqAcc} + case purge_client_exists(DbName, DocId, Props) of + true -> + {ok, erlang:min(CS, SeqAcc)}; + false -> + couch_log:error(Fmt, [DocId, DbName, ClientSeq]), + {ok, SeqAcc} end; _ -> - % If there's a broken doc we have to keep every - % purge info until the doc is fixed or removed. - Fmt = "Invalid purge doc '~s' on database ~p - with purge_seq '~w'", - DbName = couch_db:name(Db), couch_log:error(Fmt, [DocId, DbName, ClientSeq]), {ok, erlang:min(OldestPurgeSeq, SeqAcc)} end; @@ -490,7 +493,7 @@ purge_client_exists(DbName, DocId, Props) -> % it exists. Fmt2 = "Failed to check purge checkpoint using document '~p' in database ~p", - couch_log:error(Fmt2, [DbName, DocId]), + couch_log:error(Fmt2, [DocId, DbName]), true end. @@ -604,8 +607,8 @@ get_db_info(Db) -> ], {ok, InfoList}. -get_design_docs(#db{name = <<"shards/", _:18/binary, DbFullName/binary>>}) -> - DbName = ?l2b(filename:rootname(filename:basename(?b2l(DbFullName)))), +get_design_docs(#db{name = <<"shards/", _/binary>> = ShardDbName}) -> + DbName = mem3:dbname(ShardDbName), {_, Ref} = spawn_monitor(fun() -> exit(fabric:design_docs(DbName)) end), receive {'DOWN', Ref, _, _, Response} -> Response @@ -1111,69 +1114,35 @@ doc_tag(#doc{meta=Meta}) -> end. update_docs(Db, Docs0, Options, replicated_changes) -> - increment_stat(Db, [couchdb, database_writes]), Docs = tag_docs(Docs0), - DocBuckets = before_docs_update(Db, group_alike_docs(Docs)), - - case (Db#db.validate_doc_funs /= []) orelse - lists:any( - fun(#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}) -> true; - (#doc{atts=Atts}) -> - Atts /= [] - end, Docs) of - true -> - Ids = [Id || [#doc{id=Id}|_] <- DocBuckets], - ExistingDocs = get_full_doc_infos(Db, Ids), - {DocBuckets2, DocErrors} = - prep_and_validate_replicated_updates(Db, DocBuckets, ExistingDocs, [], []), - DocBuckets3 = [Bucket || [_|_]=Bucket <- DocBuckets2]; % remove empty buckets - false -> - DocErrors = [], - DocBuckets3 = DocBuckets + PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) -> + prep_and_validate_replicated_updates(Db0, DocBuckets0, + ExistingDocInfos, [], []) end, - DocBuckets4 = [[doc_flush_atts(Db, check_dup_atts(Doc)) - || Doc <- Bucket] || Bucket <- DocBuckets3], - {ok, []} = write_and_commit(Db, DocBuckets4, [], [merge_conflicts | Options]), + + {ok, DocBuckets, NonRepDocs, DocErrors} + = before_docs_update(Db, Docs, PrepValidateFun), + + DocBuckets2 = [[doc_flush_atts(Db, check_dup_atts(Doc)) + || Doc <- Bucket] || Bucket <- DocBuckets], + {ok, _} = write_and_commit(Db, DocBuckets2, + NonRepDocs, [merge_conflicts | Options]), {ok, DocErrors}; update_docs(Db, Docs0, Options, interactive_edit) -> - increment_stat(Db, [couchdb, database_writes]), - AllOrNothing = lists:member(all_or_nothing, Options), Docs = tag_docs(Docs0), - % Separate _local docs from normal docs - IsLocal = fun - (#doc{id= <<?LOCAL_DOC_PREFIX, _/binary>>}) -> true; - (_) -> false + AllOrNothing = lists:member(all_or_nothing, Options), + PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) -> + prep_and_validate_updates(Db0, DocBuckets0, ExistingDocInfos, + AllOrNothing, [], []) end, - {NonRepDocs, Docs2} = lists:partition(IsLocal, Docs), - - DocBuckets = before_docs_update(Db, group_alike_docs(Docs2)), - - case (Db#db.validate_doc_funs /= []) orelse - lists:any( - fun(#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}) -> - true; - (#doc{atts=Atts}) -> - Atts /= [] - end, Docs2) of - true -> - % lookup the doc by id and get the most recent - Ids = [Id || [#doc{id=Id}|_] <- DocBuckets], - ExistingDocInfos = get_full_doc_infos(Db, Ids), - {DocBucketsPrepped, PreCommitFailures} = prep_and_validate_updates(Db, - DocBuckets, ExistingDocInfos, AllOrNothing, [], []), + {ok, DocBuckets, NonRepDocs, DocErrors} + = before_docs_update(Db, Docs, PrepValidateFun), - % strip out any empty buckets - DocBuckets2 = [Bucket || [_|_] = Bucket <- DocBucketsPrepped]; - false -> - PreCommitFailures = [], - DocBuckets2 = DocBuckets - end, - - if (AllOrNothing) and (PreCommitFailures /= []) -> + if (AllOrNothing) and (DocErrors /= []) -> RefErrorDict = dict:from_list([{doc_tag(Doc), Doc} || Doc <- Docs]), {aborted, lists:map(fun({Ref, Error}) -> #doc{id=Id,revs={Start,RevIds}} = dict:fetch(Ref, RefErrorDict), @@ -1181,21 +1150,22 @@ update_docs(Db, Docs0, Options, interactive_edit) -> {Pos, [RevId | _]} -> {{Id, {Pos, RevId}}, Error}; {0, []} -> {{Id, {0, <<>>}}, Error} end - end, PreCommitFailures)}; + end, DocErrors)}; true -> Options2 = if AllOrNothing -> [merge_conflicts]; true -> [] end ++ Options, - DocBuckets3 = [[ + DocBuckets2 = [[ doc_flush_atts(Db, set_new_att_revpos( check_dup_atts(Doc))) - || Doc <- B] || B <- DocBuckets2], - {DocBuckets4, IdRevs} = new_revs(DocBuckets3, [], []), + || Doc <- B] || B <- DocBuckets], + {DocBuckets3, IdRevs} = new_revs(DocBuckets2, [], []), - {ok, CommitResults} = write_and_commit(Db, DocBuckets4, NonRepDocs, Options2), + {ok, CommitResults} = write_and_commit(Db, DocBuckets3, + NonRepDocs, Options2), ResultsDict = lists:foldl(fun({Key, Resp}, ResultsAcc) -> dict:store(Key, Resp, ResultsAcc) - end, dict:from_list(IdRevs), CommitResults ++ PreCommitFailures), + end, dict:from_list(IdRevs), CommitResults ++ DocErrors), {ok, lists:map(fun(Doc) -> dict:fetch(doc_tag(Doc), ResultsDict) end, Docs)} @@ -1313,13 +1283,42 @@ prepare_doc_summaries(Db, BucketList) -> Bucket) || Bucket <- BucketList]. -before_docs_update(#db{} = Db, BucketList) -> - [lists:map( - fun(Doc) -> - DocWithBody = couch_doc:with_ejson_body(Doc), - couch_db_plugin:before_doc_update(Db, DocWithBody) - end, - Bucket) || Bucket <- BucketList]. +before_docs_update(#db{validate_doc_funs = VDFuns} = Db, Docs, PVFun) -> + increment_stat(Db, [couchdb, database_writes]), + + % Separate _local docs from normal docs + IsLocal = fun + (#doc{id= <<?LOCAL_DOC_PREFIX, _/binary>>}) -> true; + (_) -> false + end, + {NonRepDocs, Docs2} = lists:partition(IsLocal, Docs), + + BucketList = group_alike_docs(Docs2), + + DocBuckets = lists:map(fun(Bucket) -> + lists:map(fun(Doc) -> + DocWithBody = couch_doc:with_ejson_body(Doc), + couch_db_plugin:before_doc_update(Db, DocWithBody) + end, Bucket) + end, BucketList), + + ValidatePred = fun + (#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>}) -> true; + (#doc{atts = Atts}) -> Atts /= [] + end, + + case (VDFuns /= []) orelse lists:any(ValidatePred, Docs2) of + true -> + % lookup the doc by id and get the most recent + Ids = [Id || [#doc{id = Id} | _] <- DocBuckets], + ExistingDocs = get_full_doc_infos(Db, Ids), + {DocBuckets2, DocErrors} = PVFun(Db, DocBuckets, ExistingDocs), + % remove empty buckets + DocBuckets3 = [Bucket || Bucket <- DocBuckets2, Bucket /= []], + {ok, DocBuckets3, NonRepDocs, DocErrors}; + false -> + {ok, DocBuckets, NonRepDocs, []} + end. set_new_att_revpos(#doc{revs={RevPos,_Revs},atts=Atts0}=Doc) -> @@ -1729,17 +1728,24 @@ validate_dbname_int(DbName, Normalized) when is_binary(DbName) -> match -> ok; nomatch -> - case is_systemdb(Normalized) of + case is_system_db_name(Normalized) of true -> ok; false -> {error, {illegal_database_name, DbName}} end end. -is_systemdb(DbName) when is_list(DbName) -> - is_systemdb(?l2b(DbName)); -is_systemdb(DbName) when is_binary(DbName) -> - lists:member(dbname_suffix(DbName), ?SYSTEM_DATABASES). - +is_system_db_name(DbName) when is_list(DbName) -> + is_system_db_name(?l2b(DbName)); +is_system_db_name(DbName) when is_binary(DbName) -> + Normalized = normalize_dbname(DbName), + Suffix = filename:basename(Normalized), + case {filename:dirname(Normalized), lists:member(Suffix, ?SYSTEM_DATABASES)} of + {<<".">>, Result} -> Result; + {Prefix, false} -> false; + {Prefix, true} -> + ReOpts = [{capture,none}, dollar_endonly], + re:run(Prefix, ?DBNAME_REGEX, ReOpts) == match + end. set_design_doc_keys(Options1) -> Dir = case lists:keyfind(dir, 1, Options1) of @@ -1831,7 +1837,9 @@ validate_dbname_fail_test_() -> Cases = generate_cases("_long/co$mplex-/path+/_something") ++ generate_cases("_something") ++ generate_cases_with_shards("long/co$mplex-/path+/_something#") - ++ generate_cases_with_shards("long/co$mplex-/path+/some.thing"), + ++ generate_cases_with_shards("long/co$mplex-/path+/some.thing") + ++ generate_cases("!abcdefg/werwej/_users") + ++ generate_cases_with_shards("!abcdefg/werwej/_users"), { foreach, fun setup/0, fun teardown/1, [should_fail_validate_dbname(A) || {_, A} <- Cases] @@ -1851,7 +1859,7 @@ dbname_suffix_test_() -> [{test_name({Expected, Db}), ?_assertEqual(Expected, dbname_suffix(Db))} || {Expected, Db} <- WithExpected]. -is_systemdb_test_() -> +is_system_db_name_test_() -> Cases = lists:append([ generate_cases_with_shards("long/co$mplex-/path+/" ++ ?b2l(Db)) || Db <- ?SYSTEM_DATABASES] @@ -1860,7 +1868,7 @@ is_systemdb_test_() -> WithExpected = [{?l2b(filename:basename(filename:rootname(Arg))), Db} || {Arg, Db} <- Cases], [{test_name({Expected, Db}) ++ " in ?SYSTEM_DATABASES", - ?_assert(is_systemdb(Db))} || {Expected, Db} <- WithExpected]. + ?_assert(is_system_db_name(Db))} || {Expected, Db} <- WithExpected]. should_pass_validate_dbname(DbName) -> {test_name(DbName), ?_assertEqual(ok, validate_dbname(DbName))}. diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl index 52a4d2f1b..87301d2d8 100644 --- a/src/couch/src/couch_db_updater.erl +++ b/src/couch/src/couch_db_updater.erl @@ -627,28 +627,31 @@ update_docs_int(Db, DocsList, LocalDocs, MergeConflicts, FullCommit) -> update_local_doc_revs(Docs) -> - lists:map(fun({Client, NewDoc}) -> - #doc{ - deleted = Delete, - revs = {0, PrevRevs} - } = NewDoc, - case PrevRevs of - [RevStr | _] -> - PrevRev = binary_to_integer(RevStr); - [] -> - PrevRev = 0 - end, - NewRev = case Delete of - false -> - PrevRev + 1; - true -> - 0 - end, - send_result(Client, NewDoc, {ok, {0, integer_to_binary(NewRev)}}), - NewDoc#doc{ - revs = {0, [NewRev]} - } - end, Docs). + lists:foldl(fun({Client, Doc}, Acc) -> + case increment_local_doc_revs(Doc) of + {ok, #doc{revs = {0, [NewRev]}} = NewDoc} -> + send_result(Client, Doc, {ok, {0, integer_to_binary(NewRev)}}), + [NewDoc | Acc]; + {error, Error} -> + send_result(Client, Doc, {error, Error}), + Acc + end + end, [], Docs). + + +increment_local_doc_revs(#doc{deleted = true} = Doc) -> + {ok, Doc#doc{revs = {0, [0]}}}; +increment_local_doc_revs(#doc{revs = {0, []}} = Doc) -> + {ok, Doc#doc{revs = {0, [1]}}}; +increment_local_doc_revs(#doc{revs = {0, [RevStr | _]}} = Doc) -> + try + PrevRev = binary_to_integer(RevStr), + {ok, Doc#doc{revs = {0, [PrevRev + 1]}}} + catch error:badarg -> + {error, <<"Invalid rev format">>} + end; +increment_local_doc_revs(#doc{}) -> + {error, <<"Invalid rev format">>}. purge_docs(Db, []) -> @@ -808,3 +811,64 @@ hibernate_if_no_idle_limit() -> Timeout when is_integer(Timeout) -> Timeout end. + + +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). + + +update_local_doc_revs_test_() -> + {inparallel, [ + {"Test local doc with valid rev", fun t_good_local_doc/0}, + {"Test local doc with invalid rev", fun t_bad_local_doc/0}, + {"Test deleted local doc", fun t_dead_local_doc/0} + ]}. + + +t_good_local_doc() -> + Doc = #doc{ + id = <<"_local/alice">>, + revs = {0, [<<"1">>]}, + meta = [{ref, make_ref()}] + }, + [NewDoc] = update_local_doc_revs([{self(), Doc}]), + ?assertEqual({0, [2]}, NewDoc#doc.revs), + {ok, Result} = receive_result(Doc), + ?assertEqual({ok,{0,<<"2">>}}, Result). + + +t_bad_local_doc() -> + lists:foreach(fun(BadRevs) -> + Doc = #doc{ + id = <<"_local/alice">>, + revs = BadRevs, + meta = [{ref, make_ref()}] + }, + NewDocs = update_local_doc_revs([{self(), Doc}]), + ?assertEqual([], NewDocs), + {ok, Result} = receive_result(Doc), + ?assertEqual({error,<<"Invalid rev format">>}, Result) + end, [{0, [<<"a">>]}, {1, [<<"1">>]}]). + + + +t_dead_local_doc() -> + Doc = #doc{ + id = <<"_local/alice">>, + revs = {0, [<<"122">>]}, + deleted = true, + meta = [{ref, make_ref()}] + }, + [NewDoc] = update_local_doc_revs([{self(), Doc}]), + ?assertEqual({0, [0]}, NewDoc#doc.revs), + {ok, Result} = receive_result(Doc), + ?assertEqual({ok,{0,<<"0">>}}, Result). + + +receive_result(#doc{meta = Meta}) -> + Ref = couch_util:get_value(ref, Meta), + receive + {result, _, {Ref, Result}} -> {ok, Result} + end. + +-endif. diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl index f960ec5c2..e5ad9e9f1 100644 --- a/src/couch/src/couch_doc.erl +++ b/src/couch/src/couch_doc.erl @@ -200,7 +200,7 @@ parse_revs(_) -> validate_docid(DocId, DbName) -> case DbName =:= ?l2b(config:get("mem3", "shards_db", "_dbs")) andalso - lists:member(DocId, ?SYSTEM_DATABASES) of + couch_db:is_system_db_name(DocId) of true -> ok; false -> diff --git a/src/couch/src/couch_file.erl b/src/couch/src/couch_file.erl index 6aa8d0b89..d6e4066db 100644 --- a/src/couch/src/couch_file.erl +++ b/src/couch/src/couch_file.erl @@ -614,18 +614,18 @@ read_raw_iolist_int(Fd, {Pos, _Size}, Len) -> % 0110 UPGRADE CODE read_raw_iolist_int(#file{fd = Fd, pread_limit = Limit} = F, Pos, Len) -> BlockOffset = Pos rem ?SIZE_BLOCK, TotalBytes = calculate_total_read_len(BlockOffset, Len), - case Pos + TotalBytes of - Size when Size > F#file.eof -> - couch_stats:increment_counter([pread, exceed_eof]), - {_Fd, Filepath} = get(couch_file_fd), - throw({read_beyond_eof, Filepath}); - Size when Size > Limit -> - couch_stats:increment_counter([pread, exceed_limit]), - {_Fd, Filepath} = get(couch_file_fd), - throw({exceed_pread_limit, Filepath, Limit}); - Size -> - {ok, <<RawBin:TotalBytes/binary>>} = file:pread(Fd, Pos, TotalBytes), - {remove_block_prefixes(BlockOffset, RawBin), Size} + if + (Pos + TotalBytes) > F#file.eof -> + couch_stats:increment_counter([pread, exceed_eof]), + {_Fd, Filepath} = get(couch_file_fd), + throw({read_beyond_eof, Filepath}); + TotalBytes > Limit -> + couch_stats:increment_counter([pread, exceed_limit]), + {_Fd, Filepath} = get(couch_file_fd), + throw({exceed_pread_limit, Filepath, Limit}); + true -> + {ok, <<RawBin:TotalBytes/binary>>} = file:pread(Fd, Pos, TotalBytes), + {remove_block_prefixes(BlockOffset, RawBin), Pos + TotalBytes} end. -spec extract_md5(iolist()) -> {binary(), iolist()}. @@ -695,19 +695,22 @@ split_iolist([Sublist| Rest], SplitAt, BeginAcc) when is_list(Sublist) -> split_iolist([Byte | Rest], SplitAt, BeginAcc) when is_integer(Byte) -> split_iolist(Rest, SplitAt - 1, [Byte | BeginAcc]). +monitored_by_pids() -> + {monitored_by, PidsAndRefs} = process_info(self(), monitored_by), + lists:filter(fun is_pid/1, PidsAndRefs). % System dbs aren't monitored by couch_stats_process_tracker is_idle(#file{is_sys=true}) -> - case process_info(self(), monitored_by) of - {monitored_by, []} -> true; + case monitored_by_pids() of + [] -> true; _ -> false end; is_idle(#file{is_sys=false}) -> Tracker = whereis(couch_stats_process_tracker), - case process_info(self(), monitored_by) of - {monitored_by, []} -> true; - {monitored_by, [Tracker]} -> true; - {monitored_by, [_]} -> exit(tracker_monitoring_failed); + case monitored_by_pids() of + [] -> true; + [Tracker] -> true; + [_] -> exit(tracker_monitoring_failed); _ -> false end. diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl index a8cfca6d2..e66a78e70 100644 --- a/src/couch/src/couch_httpd.erl +++ b/src/couch/src/couch_httpd.erl @@ -11,6 +11,9 @@ % the License. -module(couch_httpd). + +-compile(tuple_calls). + -include_lib("couch/include/couch_db.hrl"). -export([start_link/0, start_link/1, stop/0, handle_request/5]). @@ -37,6 +40,8 @@ -define(HANDLER_NAME_IN_MODULE_POS, 6). +-define(MAX_DRAIN_BYTES, 1048576). +-define(MAX_DRAIN_TIME_MSEC, 1000). start_link() -> start_link(http). @@ -1178,10 +1183,9 @@ respond_(#httpd{mochi_req = MochiReq}, 413, Headers, Args, Type) -> % just increases the chances of 413 being detected correctly by the client % (rather than getting a brutal TCP reset). erlang:put(mochiweb_request_force_close, true), - Socket = MochiReq:get(socket), - mochiweb_socket:recv(Socket, 0, 0), Result = MochiReq:Type({413, Headers, Args}), - mochiweb_socket:recv(Socket, 0, 0), + Socket = MochiReq:get(socket), + mochiweb_socket:recv(Socket, ?MAX_DRAIN_BYTES, ?MAX_DRAIN_TIME_MSEC), Result; respond_(#httpd{mochi_req = MochiReq}, Code, Headers, Args, Type) -> MochiReq:Type({Code, Headers, Args}). diff --git a/src/couch/src/couch_httpd_auth.erl b/src/couch/src/couch_httpd_auth.erl index 6ac7b75af..660727195 100644 --- a/src/couch/src/couch_httpd_auth.erl +++ b/src/couch/src/couch_httpd_auth.erl @@ -11,6 +11,9 @@ % the License. -module(couch_httpd_auth). + +-compile(tuple_calls). + -include_lib("couch/include/couch_db.hrl"). -export([party_mode_handler/1]). diff --git a/src/couch/src/couch_httpd_db.erl b/src/couch/src/couch_httpd_db.erl index 81209d9ff..ced146e39 100644 --- a/src/couch/src/couch_httpd_db.erl +++ b/src/couch/src/couch_httpd_db.erl @@ -11,6 +11,9 @@ % the License. -module(couch_httpd_db). + +-compile(tuple_calls). + -include_lib("couch/include/couch_db.hrl"). -export([handle_request/1, handle_compact_req/2, handle_design_req/2, diff --git a/src/couch/src/couch_httpd_external.erl b/src/couch/src/couch_httpd_external.erl index 1f2f1e884..684f90091 100644 --- a/src/couch/src/couch_httpd_external.erl +++ b/src/couch/src/couch_httpd_external.erl @@ -12,6 +12,8 @@ -module(couch_httpd_external). +-compile(tuple_calls). + -export([handle_external_req/2, handle_external_req/3]). -export([send_external_response/2, json_req_obj/2, json_req_obj/3]). -export([default_or_content_type/2, parse_external_response/1]). diff --git a/src/couch/src/couch_httpd_proxy.erl b/src/couch/src/couch_httpd_proxy.erl index 7e9aed721..d2c7acc3a 100644 --- a/src/couch/src/couch_httpd_proxy.erl +++ b/src/couch/src/couch_httpd_proxy.erl @@ -11,6 +11,8 @@ % the License. -module(couch_httpd_proxy). +-compile(tuple_calls). + -export([handle_proxy_req/2]). -include_lib("couch/include/couch_db.hrl"). diff --git a/src/couch/src/couch_httpd_rewrite.erl b/src/couch/src/couch_httpd_rewrite.erl index e2a24218b..2845c0b16 100644 --- a/src/couch/src/couch_httpd_rewrite.erl +++ b/src/couch/src/couch_httpd_rewrite.erl @@ -16,6 +16,9 @@ %% @doc Module for URL rewriting by pattern matching. -module(couch_httpd_rewrite). + +-compile(tuple_calls). + -export([handle_rewrite_req/3]). -include_lib("couch/include/couch_db.hrl"). diff --git a/src/couch/src/couch_httpd_vhost.erl b/src/couch/src/couch_httpd_vhost.erl index f23f41da2..d8f952190 100644 --- a/src/couch/src/couch_httpd_vhost.erl +++ b/src/couch/src/couch_httpd_vhost.erl @@ -15,6 +15,8 @@ -vsn(1). -behaviour(config_listener). +-compile(tuple_calls). + -export([start_link/0, reload/0, get_state/0, dispatch_host/1]). -export([urlsplit_netloc/2, redirect_to_vhost/2]). -export([host/1, split_host_port/1]). diff --git a/src/couch/test/couch_bt_engine_upgrade_tests.erl b/src/couch/test/couch_bt_engine_upgrade_tests.erl index 1d2a86d71..8025528fb 100644 --- a/src/couch/test/couch_bt_engine_upgrade_tests.erl +++ b/src/couch/test/couch_bt_engine_upgrade_tests.erl @@ -66,12 +66,13 @@ t_upgrade_without_purge_req() -> % db with zero purge entries DbName = <<"db_without_purge_req">>, + ?assertEqual(6, get_disk_version_from_header(DbName)), {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> ?assertEqual(0, couch_db:get_purge_seq(Db)), couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, []) end), ?assertEqual([], UpgradedPurged), - + ?assertEqual(7, get_disk_version_from_header(DbName)), {ok, Rev} = save_doc( DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]} ), @@ -104,10 +105,12 @@ t_upgrade_with_1_purge_req() -> % with a single purge entry DbName = <<"db_with_1_purge_req">>, + ?assertEqual(6, get_disk_version_from_header(DbName)), {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> ?assertEqual(1, couch_db:get_purge_seq(Db)), couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, []) end), + ?assertEqual(7, get_disk_version_from_header(DbName)), ?assertEqual([{1, <<"doc1">>}], UpgradedPurged), {ok, Rev} = save_doc( @@ -142,10 +145,12 @@ t_upgrade_with_N_purge_req() -> % with two docs that have been purged DbName = <<"db_with_2_purge_req">>, + ?assertEqual(6, get_disk_version_from_header(DbName)), {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> ?assertEqual(2, couch_db:get_purge_seq(Db)), couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, []) end), + ?assertEqual(7, get_disk_version_from_header(DbName)), ?assertEqual([{2, <<"doc2">>}], UpgradedPurged), {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}), @@ -179,10 +184,12 @@ t_upgrade_with_1_purge_req_for_2_docs() -> % with one purge req for Doc1 and another purge req for Doc 2 and Doc3 DbName = <<"db_with_1_purge_req_for_2_docs">>, + ?assertEqual(6, get_disk_version_from_header(DbName)), {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> ?assertEqual(3, couch_db:get_purge_seq(Db)), couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, []) end), + ?assertEqual(7, get_disk_version_from_header(DbName)), ?assertEqual([{3,<<"doc2">>},{2,<<"doc3">>}], UpgradedPurged), {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc6">>}, {<<"v">>, 1}]}), @@ -218,3 +225,13 @@ save_doc(DbName, Json) -> fold_fun({PSeq, _UUID, Id, _Revs}, Acc) -> {ok, [{PSeq, Id} | Acc]}. + + +get_disk_version_from_header(DbFileName) -> + DbDir = config:get("couchdb", "database_dir"), + DbFilePath = filename:join([DbDir, ?l2b(?b2l(DbFileName) ++ ".couch")]), + {ok, Fd} = couch_file:open(DbFilePath, []), + {ok, Header} = couch_file:read_header(Fd), + DiskVerison = couch_bt_engine_header:disk_version(Header), + couch_file:close(Fd), + DiskVerison. diff --git a/src/couch/test/couchdb_http_proxy_tests.erl b/src/couch/test/couchdb_http_proxy_tests.erl index d54ff15c4..f60ba3b08 100644 --- a/src/couch/test/couchdb_http_proxy_tests.erl +++ b/src/couch/test/couchdb_http_proxy_tests.erl @@ -12,6 +12,8 @@ -module(couchdb_http_proxy_tests). +-compile(tuple_calls). + -include_lib("couch/include/couch_eunit.hrl"). -record(req, {method=get, path="", headers=[], body="", opts=[]}). diff --git a/src/couch/test/couchdb_update_conflicts_tests.erl b/src/couch/test/couchdb_update_conflicts_tests.erl index 09c2834a8..e92c73856 100644 --- a/src/couch/test/couchdb_update_conflicts_tests.erl +++ b/src/couch/test/couchdb_update_conflicts_tests.erl @@ -17,6 +17,7 @@ -define(i2l(I), integer_to_list(I)). -define(DOC_ID, <<"foobar">>). +-define(LOCAL_DOC_ID, <<"_local/foobar">>). -define(NUM_CLIENTS, [100, 500, 1000, 2000, 5000, 10000]). -define(TIMEOUT, 20000). @@ -52,7 +53,7 @@ view_indexes_cleanup_test_() -> fun start/0, fun test_util:stop_couch/1, [ concurrent_updates(), - couchdb_188() + bulk_docs_updates() ] } }. @@ -68,13 +69,17 @@ concurrent_updates()-> } }. -couchdb_188()-> +bulk_docs_updates()-> { - "COUCHDB-188", + "Bulk docs updates", { foreach, fun setup/0, fun teardown/1, - [fun should_bulk_create_delete_doc/1] + [ + fun should_bulk_create_delete_doc/1, + fun should_bulk_create_local_doc/1, + fun should_ignore_invalid_local_doc/1 + ] } }. @@ -91,6 +96,12 @@ should_concurrently_update_doc(NumClients, {DbName, InitRev})-> should_bulk_create_delete_doc({DbName, InitRev})-> ?_test(bulk_delete_create(DbName, InitRev)). +should_bulk_create_local_doc({DbName, _})-> + ?_test(bulk_create_local_doc(DbName)). + +should_ignore_invalid_local_doc({DbName, _})-> + ?_test(ignore_invalid_local_doc(DbName)). + concurrent_doc_update(NumClients, DbName, InitRev) -> Clients = lists:map( @@ -157,10 +168,10 @@ ensure_in_single_revision_leaf(DbName) -> [{ok, Doc2}] = Leaves, ?assertEqual(Doc, Doc2). - + bulk_delete_create(DbName, InitRev) -> {ok, Db} = couch_db:open_int(DbName, []), - + DeletedDoc = couch_doc:from_json_obj({[ {<<"_id">>, ?DOC_ID}, {<<"_rev">>, InitRev}, @@ -176,7 +187,7 @@ bulk_delete_create(DbName, InitRev) -> ?assertEqual(2, length([ok || {ok, _} <- Results])), [{ok, Rev1}, {ok, Rev2}] = Results, - + {ok, Db2} = couch_db:open_int(DbName, []), {ok, [{ok, Doc1}]} = couch_db:open_doc_revs( Db2, ?DOC_ID, [Rev1], [conflicts, deleted_conflicts]), @@ -214,6 +225,45 @@ bulk_delete_create(DbName, InitRev) -> ?assertEqual(3, element(1, Rev2)). +bulk_create_local_doc(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + + LocalDoc = couch_doc:from_json_obj({[ + {<<"_id">>, ?LOCAL_DOC_ID}, + {<<"_rev">>, <<"0-1">>} + ]}), + + {ok, Results} = couch_db:update_docs(Db, [LocalDoc], + [], replicated_changes), + ok = couch_db:close(Db), + ?assertEqual([], Results), + + {ok, Db2} = couch_db:open_int(DbName, []), + {ok, LocalDoc1} = couch_db:open_doc_int(Db2, ?LOCAL_DOC_ID, []), + ok = couch_db:close(Db2), + ?assertEqual(?LOCAL_DOC_ID, LocalDoc1#doc.id), + ?assertEqual({0, [<<"2">>]}, LocalDoc1#doc.revs). + + +ignore_invalid_local_doc(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + + LocalDoc = couch_doc:from_json_obj({[ + {<<"_id">>, ?LOCAL_DOC_ID}, + {<<"_rev">>, <<"0-abcdef">>} + ]}), + + {ok, Results} = couch_db:update_docs(Db, [LocalDoc], + [], replicated_changes), + ok = couch_db:close(Db), + ?assertEqual([], Results), + + {ok, Db2} = couch_db:open_int(DbName, []), + Result2 = couch_db:open_doc_int(Db2, ?LOCAL_DOC_ID, []), + ok = couch_db:close(Db2), + ?assertEqual({not_found, missing}, Result2). + + spawn_client(DbName, Doc) -> spawn(fun() -> {ok, Db} = couch_db:open_int(DbName, []), diff --git a/src/couch/test/couchdb_views_tests.erl b/src/couch/test/couchdb_views_tests.erl index 1b1a8e56b..60bb5c975 100644 --- a/src/couch/test/couchdb_views_tests.erl +++ b/src/couch/test/couchdb_views_tests.erl @@ -523,7 +523,8 @@ view_cleanup(DbName) -> count_users(DbName) -> {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]), DbPid = couch_db:get_pid(Db), - {monitored_by, Monitors} = process_info(DbPid, monitored_by), + {monitored_by, Monitors0} = process_info(DbPid, monitored_by), + Monitors = lists:filter(fun is_pid/1, Monitors0), CouchFiles = [P || P <- Monitors, couch_file:process_info(P) =/= undefined], ok = couch_db:close(Db), length(lists:usort(Monitors) -- [self() | CouchFiles]). diff --git a/src/couch/test/test_web.erl b/src/couch/test/test_web.erl index d568b7e45..b1b3e65c9 100644 --- a/src/couch/test/test_web.erl +++ b/src/couch/test/test_web.erl @@ -13,6 +13,8 @@ -module(test_web). -behaviour(gen_server). +-compile(tuple_calls). + -include_lib("couch/include/couch_eunit.hrl"). -export([start_link/0, stop/0, loop/1, get_port/0, set_assert/1, check_last/0]). diff --git a/src/couch_index/src/couch_index.erl b/src/couch_index/src/couch_index.erl index 8fba0a23f..cae95779e 100644 --- a/src/couch_index/src/couch_index.erl +++ b/src/couch_index/src/couch_index.erl @@ -13,6 +13,8 @@ -module(couch_index). -behaviour(gen_server). +-compile(tuple_calls). + -vsn(3). %% API diff --git a/src/couch_log/src/couch_log_formatter.erl b/src/couch_log/src/couch_log_formatter.erl index 5be3619f2..4d81f184f 100644 --- a/src/couch_log/src/couch_log_formatter.erl +++ b/src/couch_log/src/couch_log_formatter.erl @@ -56,23 +56,33 @@ format(Level, Pid, Msg) -> }. -format({error, _GL, {Pid, "** Generic server " ++ _, Args}}) -> +format(Event) -> + try + do_format(Event) + catch + Tag:Err -> + Msg = "Encountered error ~w when formatting ~w", + format(error, self(), Msg, [{Tag, Err}, Event]) + end. + + +do_format({error, _GL, {Pid, "** Generic server " ++ _, Args}}) -> %% gen_server terminate - [Name, LastMsg, State, Reason] = Args, + [Name, LastMsg, State, Reason | Extra] = Args, MsgFmt = "gen_server ~w terminated with reason: ~s~n" ++ - " last msg: ~p~n state: ~p", - MsgArgs = [Name, format_reason(Reason), LastMsg, State], + " last msg: ~p~n state: ~p~n extra: ~p", + MsgArgs = [Name, format_reason(Reason), LastMsg, State, Extra], format(error, Pid, MsgFmt, MsgArgs); -format({error, _GL, {Pid, "** State machine " ++ _, Args}}) -> +do_format({error, _GL, {Pid, "** State machine " ++ _, Args}}) -> %% gen_fsm terminate - [Name, LastMsg, StateName, State, Reason] = Args, + [Name, LastMsg, StateName, State, Reason | Extra] = Args, MsgFmt = "gen_fsm ~w in state ~w terminated with reason: ~s~n" ++ - " last msg: ~p~n state: ~p", - MsgArgs = [Name, StateName, format_reason(Reason), LastMsg, State], + " last msg: ~p~n state: ~p~n extra: ~p", + MsgArgs = [Name, StateName, format_reason(Reason), LastMsg, State, Extra], format(error, Pid, MsgFmt, MsgArgs); -format({error, _GL, {Pid, "** gen_event handler" ++ _, Args}}) -> +do_format({error, _GL, {Pid, "** gen_event handler" ++ _, Args}}) -> %% gen_event handler terminate [ID, Name, LastMsg, State, Reason] = Args, MsgFmt = "gen_event ~w installed in ~w terminated with reason: ~s~n" ++ @@ -80,20 +90,20 @@ format({error, _GL, {Pid, "** gen_event handler" ++ _, Args}}) -> MsgArgs = [ID, Name, format_reason(Reason), LastMsg, State], format(error, Pid, MsgFmt, MsgArgs); -format({error, _GL, {emulator, "~s~n", [Msg]}}) when is_list(Msg) -> +do_format({error, _GL, {emulator, "~s~n", [Msg]}}) when is_list(Msg) -> % These messages are for whenever any process exits due % to a throw or error. We intercept here to remove the % extra newlines. NewMsg = lists:sublist(Msg, length(Msg) - 1), format(error, emulator, NewMsg); -format({error, _GL, {Pid, Fmt, Args}}) -> +do_format({error, _GL, {Pid, Fmt, Args}}) -> format(error, Pid, Fmt, Args); -format({error_report, _GL, {Pid, std_error, D}}) -> +do_format({error_report, _GL, {Pid, std_error, D}}) -> format(error, Pid, print_silly_list(D)); -format({error_report, _GL, {Pid, supervisor_report, D}}) -> +do_format({error_report, _GL, {Pid, supervisor_report, D}}) -> case lists:sort(D) of [{errorContext, Ctx}, {offender, Off}, {reason, Reason}, {supervisor, Name}] -> @@ -111,20 +121,20 @@ format({error_report, _GL, {Pid, supervisor_report, D}}) -> format(error, Pid, "SUPERVISOR REPORT " ++ print_silly_list(D)) end; -format({error_report, _GL, {Pid, crash_report, [Report, Neighbors]}}) -> +do_format({error_report, _GL, {Pid, crash_report, [Report, Neighbors]}}) -> Msg = "CRASH REPORT " ++ format_crash_report(Report, Neighbors), format(error, Pid, Msg); -format({warning_msg, _GL, {Pid, Fmt, Args}}) -> +do_format({warning_msg, _GL, {Pid, Fmt, Args}}) -> format(warning, Pid, Fmt, Args); -format({warning_report, _GL, {Pid, std_warning, Report}}) -> +do_format({warning_report, _GL, {Pid, std_warning, Report}}) -> format(warning, Pid, print_silly_list(Report)); -format({info_msg, _GL, {Pid, Fmt, Args}}) -> +do_format({info_msg, _GL, {Pid, Fmt, Args}}) -> format(info, Pid, Fmt, Args); -format({info_report, _GL, {Pid, std_info, D}}) when is_list(D) -> +do_format({info_report, _GL, {Pid, std_info, D}}) when is_list(D) -> case lists:sort(D) of [{application, App}, {exited, Reason}, {type, _Type}] -> MsgFmt = "Application ~w exited with reason: ~s", @@ -133,10 +143,10 @@ format({info_report, _GL, {Pid, std_info, D}}) when is_list(D) -> format(info, Pid, print_silly_list(D)) end; -format({info_report, _GL, {Pid, std_info, D}}) -> +do_format({info_report, _GL, {Pid, std_info, D}}) -> format(info, Pid, "~w", [D]); -format({info_report, _GL, {Pid, progress, D}}) -> +do_format({info_report, _GL, {Pid, progress, D}}) -> case lists:sort(D) of [{application, App}, {started_at, Node}] -> MsgFmt = "Application ~w started on node ~w", @@ -150,7 +160,7 @@ format({info_report, _GL, {Pid, progress, D}}) -> format(info, Pid, "PROGRESS REPORT " ++ print_silly_list(D)) end; -format(Event) -> +do_format(Event) -> format(warning, self(), "Unexpected error_logger event ~w", [Event]). diff --git a/src/couch_log/src/couch_log_monitor.erl b/src/couch_log/src/couch_log_monitor.erl index 236d34012..ab0ae115f 100644 --- a/src/couch_log/src/couch_log_monitor.erl +++ b/src/couch_log/src/couch_log_monitor.erl @@ -38,6 +38,7 @@ start_link() -> init(_) -> + error_logger:start(), ok = gen_event:add_sup_handler(error_logger, ?HANDLER_MOD, []), {ok, nil}. diff --git a/src/couch_log/src/couch_log_sup.erl b/src/couch_log/src/couch_log_sup.erl index 083f5fc33..6219a36e9 100644 --- a/src/couch_log/src/couch_log_sup.erl +++ b/src/couch_log/src/couch_log_sup.erl @@ -26,7 +26,7 @@ start_link() -> init([]) -> ok = couch_log_config:init(), - {ok, {{one_for_one, 1, 1}, children()}}. + {ok, {{one_for_one, 10, 10}, children()}}. children() -> diff --git a/src/couch_log/test/couch_log_formatter_test.erl b/src/couch_log/test/couch_log_formatter_test.erl index a8f69b221..795efcf29 100644 --- a/src/couch_log/test/couch_log_formatter_test.erl +++ b/src/couch_log/test/couch_log_formatter_test.erl @@ -37,6 +37,29 @@ format_reason_test() -> ?assertEqual(Formatted, lists:flatten(Entry#log_entry.msg)). +crashing_formatting_test() -> + Pid = self(), + Event = { + error, + erlang:group_leader(), + { + Pid, + "** Generic server and some stuff", + [a_gen_server, {foo, bar}, server_state] % not enough args! + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = Pid + }, + do_format(Event) + ), + do_matches(do_format(Event), [ + "Encountered error {error,{badmatch" + ]). + + gen_server_error_test() -> Pid = self(), Event = { @@ -59,7 +82,35 @@ gen_server_error_test() -> "gen_server a_gen_server terminated", "with reason: some_reason", "last msg: {foo,bar}", - "state: server_state" + "state: server_state", + "extra: \\[\\]" + ]). + + +gen_server_error_with_extra_args_test() -> + Pid = self(), + Event = { + error, + erlang:group_leader(), + { + Pid, + "** Generic server and some stuff", + [a_gen_server, {foo, bar}, server_state, some_reason, sad, args] + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = Pid + }, + do_format(Event) + ), + do_matches(do_format(Event), [ + "gen_server a_gen_server terminated", + "with reason: some_reason", + "last msg: {foo,bar}", + "state: server_state", + "extra: \\[sad,args\\]" ]). @@ -85,7 +136,35 @@ gen_fsm_error_test() -> "gen_fsm a_gen_fsm in state state_name", "with reason: barf", "last msg: {ohai,there}", - "state: curr_state" + "state: curr_state", + "extra: \\[\\]" + ]). + + +gen_fsm_error_with_extra_args_test() -> + Pid = self(), + Event = { + error, + erlang:group_leader(), + { + Pid, + "** State machine did a thing", + [a_gen_fsm, {ohai,there}, state_name, curr_state, barf, sad, args] + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = Pid + }, + do_format(Event) + ), + do_matches(do_format(Event), [ + "gen_fsm a_gen_fsm in state state_name", + "with reason: barf", + "last msg: {ohai,there}", + "state: curr_state", + "extra: \\[sad,args\\]" ]). diff --git a/src/couch_log/test/couch_log_test_util.erl b/src/couch_log/test/couch_log_test_util.erl index 05d64d8a9..c7fd34f2d 100644 --- a/src/couch_log/test/couch_log_test_util.erl +++ b/src/couch_log/test/couch_log_test_util.erl @@ -123,9 +123,12 @@ last_log() -> remove_error_loggers() -> - lists:foreach(fun(Handler) -> - error_logger:delete_report_handler(Handler) - end, gen_event:which_handlers(error_logger)). + ErrorLoggerPid = whereis(error_logger), + if ErrorLoggerPid == undefined -> ok; true -> + lists:foreach(fun(Handler) -> + error_logger:delete_report_handler(Handler) + end, gen_event:which_handlers(ErrorLoggerPid)) + end. config_files() -> diff --git a/src/couch_mrview/src/couch_mrview_compactor.erl b/src/couch_mrview/src/couch_mrview_compactor.erl index 3ef11805f..9a069cec0 100644 --- a/src/couch_mrview/src/couch_mrview_compactor.erl +++ b/src/couch_mrview/src/couch_mrview_compactor.erl @@ -86,7 +86,9 @@ compact(State) -> {type, view_compaction}, {database, DbName}, {design_document, IdxName}, - {progress, 0} + {progress, 0}, + {changes_done, 0}, + {total_changes, TotalChanges} ]), BufferSize0 = config:get( diff --git a/src/couch_replicator/src/couch_replicator_connection.erl b/src/couch_replicator/src/couch_replicator_connection.erl index 9c6472360..f3e4a864d 100644 --- a/src/couch_replicator/src/couch_replicator_connection.erl +++ b/src/couch_replicator/src/couch_replicator_connection.erl @@ -156,7 +156,11 @@ handle_cast({connection_close_interval, V}, State) -> handle_info({'DOWN', Ref, process, _Pid, _Reason}, State) -> couch_stats:increment_counter([couch_replicator, connection, owner_crashes]), - ets:match_delete(?MODULE, #connection{mref=Ref, _='_'}), + Conns = ets:match_object(?MODULE, #connection{mref = Ref, _='_'}), + lists:foreach(fun(Conn) -> + couch_stats:increment_counter([couch_replicator, connection, closes]), + delete_worker(Conn) + end, Conns), {noreply, State}; % worker crashed diff --git a/src/couch_replicator/src/couch_replicator_doc_processor.erl b/src/couch_replicator/src/couch_replicator_doc_processor.erl index d3c001f26..1b43598da 100644 --- a/src/couch_replicator/src/couch_replicator_doc_processor.erl +++ b/src/couch_replicator/src/couch_replicator_doc_processor.erl @@ -264,7 +264,9 @@ code_change(_OldVsn, State, _Extra) -> % same document. -spec updated_doc(db_doc_id(), #rep{}, filter_type()) -> ok. updated_doc(Id, Rep, Filter) -> - case normalize_rep(current_rep(Id)) == normalize_rep(Rep) of + NormCurRep = couch_replicator_utils:normalize_rep(current_rep(Id)), + NormNewRep = couch_replicator_utils:normalize_rep(Rep), + case NormCurRep == NormNewRep of false -> removed_doc(Id), Row = #rdoc{ @@ -304,25 +306,6 @@ current_rep({DbName, DocId}) when is_binary(DbName), is_binary(DocId) -> end. -% Normalize a #rep{} record such that it doesn't contain time dependent fields -% pids (like httpc pools), and options / props are sorted. This function would -% used during comparisons. --spec normalize_rep(#rep{} | nil) -> #rep{} | nil. -normalize_rep(nil) -> - nil; - -normalize_rep(#rep{} = Rep)-> - #rep{ - source = couch_replicator_api_wrap:normalize_db(Rep#rep.source), - target = couch_replicator_api_wrap:normalize_db(Rep#rep.target), - options = Rep#rep.options, % already sorted in make_options/1 - type = Rep#rep.type, - view = Rep#rep.view, - doc_id = Rep#rep.doc_id, - db_name = Rep#rep.db_name - }. - - -spec worker_returned(reference(), db_doc_id(), rep_start_result()) -> ok. worker_returned(Ref, Id, {ok, RepId}) -> case ets:lookup(?MODULE, Id) of @@ -819,34 +802,6 @@ t_cluster_membership_foldl() -> end). -normalize_rep_test_() -> - { - setup, - fun() -> meck:expect(config, get, - fun(_, _, Default) -> Default end) - end, - fun(_) -> meck:unload() end, - ?_test(begin - EJson1 = {[ - {<<"source">>, <<"http://host.com/source_db">>}, - {<<"target">>, <<"local">>}, - {<<"doc_ids">>, [<<"a">>, <<"c">>, <<"b">>]}, - {<<"other_field">>, <<"some_value">>} - ]}, - Rep1 = couch_replicator_docs:parse_rep_doc_without_id(EJson1), - EJson2 = {[ - {<<"other_field">>, <<"unrelated">>}, - {<<"target">>, <<"local">>}, - {<<"source">>, <<"http://host.com/source_db">>}, - {<<"doc_ids">>, [<<"c">>, <<"a">>, <<"b">>]}, - {<<"other_field2">>, <<"unrelated2">>} - ]}, - Rep2 = couch_replicator_docs:parse_rep_doc_without_id(EJson2), - ?assertEqual(normalize_rep(Rep1), normalize_rep(Rep2)) - end) - }. - - get_worker_ref_test_() -> { setup, diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl index 62d21fe12..013475683 100644 --- a/src/couch_replicator/src/couch_replicator_docs.erl +++ b/src/couch_replicator/src/couch_replicator_docs.erl @@ -366,6 +366,14 @@ save_rep_doc(DbName, Doc) -> {ok, Db} = couch_db:open_int(DbName, [?CTX, sys_db]), try couch_db:update_doc(Db, Doc, []) + catch + % User can accidently write a VDU which prevents _replicator from + % updating replication documents. Avoid crashing replicator and thus + % preventing all other replication jobs on the node from running. + throw:{forbidden, Reason} -> + Msg = "~p VDU function preventing doc update to ~s ~s ~p", + couch_log:error(Msg, [?MODULE, DbName, Doc#doc.id, Reason]), + {ok, forbidden} after couch_db:close(Db) end. @@ -694,7 +702,9 @@ error_reason(Reason) -> -ifdef(TEST). --include_lib("eunit/include/eunit.hrl"). + +-include_lib("couch/include/couch_eunit.hrl"). + check_options_pass_values_test() -> ?assertEqual(check_options([]), []), @@ -766,4 +776,50 @@ check_strip_credentials_test() -> } ]]. + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + create_vdu(DbName), + DbName. + + +teardown(DbName) when is_binary(DbName) -> + couch_server:delete(DbName, [?ADMIN_CTX]), + ok. + + +create_vdu(DbName) -> + couch_util:with_db(DbName, fun(Db) -> + VduFun = <<"function(newdoc, olddoc, userctx) {throw({'forbidden':'fail'})}">>, + Doc = #doc{ + id = <<"_design/vdu">>, + body = {[{<<"validate_doc_update">>, VduFun}]} + }, + {ok, _} = couch_db:update_docs(Db, [Doc]), + couch_db:ensure_full_commit(Db) + end). + + +update_replicator_doc_with_bad_vdu_test_() -> + { + setup, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, + { + foreach, fun setup/0, fun teardown/1, + [ + fun t_vdu_does_not_crash_on_save/1 + ] + } + }. + + +t_vdu_does_not_crash_on_save(DbName) -> + ?_test(begin + Doc = #doc{id = <<"some_id">>, body = {[{<<"foo">>, 42}]}}, + ?assertEqual({ok, forbidden}, save_rep_doc(DbName, Doc)) + end). + -endif. diff --git a/src/couch_replicator/src/couch_replicator_scheduler.erl b/src/couch_replicator/src/couch_replicator_scheduler.erl index 50896c548..762ef18fe 100644 --- a/src/couch_replicator/src/couch_replicator_scheduler.erl +++ b/src/couch_replicator/src/couch_replicator_scheduler.erl @@ -102,12 +102,17 @@ start_link() -> -spec add_job(#rep{}) -> ok. add_job(#rep{} = Rep) when Rep#rep.id /= undefined -> - Job = #job{ - id = Rep#rep.id, - rep = Rep, - history = [{added, os:timestamp()}] - }, - gen_server:call(?MODULE, {add_job, Job}, infinity). + case existing_replication(Rep) of + false -> + Job = #job{ + id = Rep#rep.id, + rep = Rep, + history = [{added, os:timestamp()}] + }, + gen_server:call(?MODULE, {add_job, Job}, infinity); + true -> + ok + end. -spec remove_job(job_id()) -> ok. @@ -925,6 +930,17 @@ stats_fold(#job{pid = P, history = [{started, _} | _]}, Acc) when is_pid(P) -> Acc#stats_acc{running_n = Acc#stats_acc.running_n + 1}. +-spec existing_replication(#rep{}) -> boolean(). +existing_replication(#rep{} = NewRep) -> + case job_by_id(NewRep#rep.id) of + {ok, #job{rep = CurRep}} -> + NormCurRep = couch_replicator_utils:normalize_rep(CurRep), + NormNewRep = couch_replicator_utils:normalize_rep(NewRep), + NormCurRep == NormNewRep; + {error, not_found} -> + false + end. + -ifdef(TEST). @@ -1017,6 +1033,7 @@ scheduler_test_() -> t_start_oldest_first(), t_dont_stop_if_nothing_pending(), t_max_churn_limits_number_of_rotated_jobs(), + t_existing_jobs(), t_if_pending_less_than_running_start_all_pending(), t_running_less_than_pending_swap_all_running(), t_oneshot_dont_get_rotated(), @@ -1308,6 +1325,30 @@ t_if_permanent_job_crashes_it_stays_in_ets() -> end). +t_existing_jobs() -> + ?_test(begin + Rep = #rep{ + id = job1, + db_name = <<"db">>, + source = <<"s">>, + target = <<"t">>, + options = [{continuous, true}] + }, + setup_jobs([#job{id = Rep#rep.id, rep = Rep}]), + NewRep = #rep{ + id = Rep#rep.id, + db_name = <<"db">>, + source = <<"s">>, + target = <<"t">>, + options = [{continuous, true}] + }, + ?assert(existing_replication(NewRep)), + ?assertNot(existing_replication(NewRep#rep{source = <<"s1">>})), + ?assertNot(existing_replication(NewRep#rep{target = <<"t1">>})), + ?assertNot(existing_replication(NewRep#rep{options = []})) + end). + + t_job_summary_running() -> ?_test(begin Job = #job{ diff --git a/src/couch_replicator/src/couch_replicator_utils.erl b/src/couch_replicator/src/couch_replicator_utils.erl index 218fcf501..b0d706953 100644 --- a/src/couch_replicator/src/couch_replicator_utils.erl +++ b/src/couch_replicator/src/couch_replicator_utils.erl @@ -28,7 +28,8 @@ pp_rep_id/1, iso8601/1, filter_state/3, - remove_basic_auth_from_headers/1 + remove_basic_auth_from_headers/1, + normalize_rep/1 ]). -export([ @@ -208,6 +209,25 @@ decode_basic_creds(Base64) -> end. +% Normalize a #rep{} record such that it doesn't contain time dependent fields +% pids (like httpc pools), and options / props are sorted. This function would +% used during comparisons. +-spec normalize_rep(#rep{} | nil) -> #rep{} | nil. +normalize_rep(nil) -> + nil; + +normalize_rep(#rep{} = Rep)-> + #rep{ + source = couch_replicator_api_wrap:normalize_db(Rep#rep.source), + target = couch_replicator_api_wrap:normalize_db(Rep#rep.target), + options = Rep#rep.options, % already sorted in make_options/1 + type = Rep#rep.type, + view = Rep#rep.view, + doc_id = Rep#rep.doc_id, + db_name = Rep#rep.db_name + }. + + -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). @@ -259,4 +279,31 @@ b64creds(User, Pass) -> base64:encode_to_string(User ++ ":" ++ Pass). +normalize_rep_test_() -> + { + setup, + fun() -> meck:expect(config, get, + fun(_, _, Default) -> Default end) + end, + fun(_) -> meck:unload() end, + ?_test(begin + EJson1 = {[ + {<<"source">>, <<"http://host.com/source_db">>}, + {<<"target">>, <<"local">>}, + {<<"doc_ids">>, [<<"a">>, <<"c">>, <<"b">>]}, + {<<"other_field">>, <<"some_value">>} + ]}, + Rep1 = couch_replicator_docs:parse_rep_doc_without_id(EJson1), + EJson2 = {[ + {<<"other_field">>, <<"unrelated">>}, + {<<"target">>, <<"local">>}, + {<<"source">>, <<"http://host.com/source_db">>}, + {<<"doc_ids">>, [<<"c">>, <<"a">>, <<"b">>]}, + {<<"other_field2">>, <<"unrelated2">>} + ]}, + Rep2 = couch_replicator_docs:parse_rep_doc_without_id(EJson2), + ?assertEqual(normalize_rep(Rep1), normalize_rep(Rep2)) + end) + }. + -endif. diff --git a/src/fabric/src/fabric_doc_attachments.erl b/src/fabric/src/fabric_doc_attachments.erl index 7c6ba6610..723b9e804 100644 --- a/src/fabric/src/fabric_doc_attachments.erl +++ b/src/fabric/src/fabric_doc_attachments.erl @@ -12,6 +12,8 @@ -module(fabric_doc_attachments). +-compile(tuple_calls). + -include_lib("fabric/include/fabric.hrl"). -include_lib("couch/include/couch_db.hrl"). diff --git a/src/fabric/src/fabric_doc_atts.erl b/src/fabric/src/fabric_doc_atts.erl index 7ef5dd893..a3aae80ec 100644 --- a/src/fabric/src/fabric_doc_atts.erl +++ b/src/fabric/src/fabric_doc_atts.erl @@ -12,6 +12,8 @@ -module(fabric_doc_atts). +-compile(tuple_calls). + -include_lib("fabric/include/fabric.hrl"). -include_lib("couch/include/couch_db.hrl"). diff --git a/src/mango/Makefile b/src/mango/Makefile index 1b2a50452..59f4a29c2 100644 --- a/src/mango/Makefile +++ b/src/mango/Makefile @@ -46,7 +46,7 @@ test: # target: pip-install - Installs requires Python packages pip-install: pip install nose requests - pip install hypothesis + pip install hypothesis==3.79.0 .PHONY: venv diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl index 51ec68c45..b3a7f4080 100644 --- a/src/mango/src/mango_cursor_view.erl +++ b/src/mango/src/mango_cursor_view.erl @@ -229,6 +229,9 @@ view_cb({row, Row}, #mrargs{extra = Options} = Acc) -> doc = couch_util:get_value(doc, Row) }, case ViewRow#view_row.doc of + null -> + put(mango_docs_examined, get(mango_docs_examined) + 1), + maybe_send_mango_ping(); undefined -> ViewRow2 = ViewRow#view_row{ value = couch_util:get_value(value, Row) @@ -405,31 +408,44 @@ doc_member(Cursor, RowProps) -> Opts = Cursor#cursor.opts, ExecutionStats = Cursor#cursor.execution_stats, Selector = Cursor#cursor.selector, - Incr = case couch_util:get_value(value, RowProps) of - N when is_integer(N) -> N; - _ -> 1 + {Matched, Incr} = case couch_util:get_value(value, RowProps) of + N when is_integer(N) -> {true, N}; + _ -> {false, 1} end, case couch_util:get_value(doc, RowProps) of {DocProps} -> ExecutionStats1 = mango_execution_stats:incr_docs_examined(ExecutionStats, Incr), - {ok, {DocProps}, {execution_stats, ExecutionStats1}}; + case Matched of + true -> + {ok, {DocProps}, {execution_stats, ExecutionStats1}}; + false -> + match_doc(Selector, {DocProps}, ExecutionStats1) + end; undefined -> ExecutionStats1 = mango_execution_stats:incr_quorum_docs_examined(ExecutionStats), Id = couch_util:get_value(id, RowProps), case mango_util:defer(fabric, open_doc, [Db, Id, Opts]) of {ok, #doc{}=DocProps} -> Doc = couch_doc:to_json_obj(DocProps, []), - case mango_selector:match(Selector, Doc) of - true -> - {ok, Doc, {execution_stats, ExecutionStats1}}; - false -> - {no_match, Doc, {execution_stats, ExecutionStats1}} - end; + match_doc(Selector, Doc, ExecutionStats1); Else -> Else - end + end; + null -> + ExecutionStats1 = mango_execution_stats:incr_docs_examined(ExecutionStats), + {no_match, null, {execution_stats, ExecutionStats1}} + end. + + +match_doc(Selector, Doc, ExecutionStats) -> + case mango_selector:match(Selector, Doc) of + true -> + {ok, Doc, {execution_stats, ExecutionStats}}; + false -> + {no_match, Doc, {execution_stats, ExecutionStats}} end. + is_design_doc(RowProps) -> case couch_util:get_value(id, RowProps) of <<"_design/", _/binary>> -> true; @@ -446,3 +462,55 @@ update_bookmark_keys(#cursor{limit = Limit} = Cursor, Props) when Limit > 0 -> }; update_bookmark_keys(Cursor, _Props) -> Cursor. + + +%%%%%%%% module tests below %%%%%%%% + +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). + +runs_match_on_doc_with_no_value_test() -> + Cursor = #cursor { + db = <<"db">>, + opts = [], + execution_stats = #execution_stats{}, + selector = mango_selector:normalize({[{<<"user_id">>, <<"1234">>}]}) + }, + RowProps = [ + {id,<<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>}, + {key,<<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>}, + {doc,{ + [ + {<<"_id">>,<<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>}, + {<<"_rev">>,<<"1-a954fe2308f14307756067b0e18c2968">>}, + {<<"user_id">>,11} + ] + }} + ], + {Match, _, _} = doc_member(Cursor, RowProps), + ?assertEqual(Match, no_match). + +does_not_run_match_on_doc_with_value_test() -> + Cursor = #cursor { + db = <<"db">>, + opts = [], + execution_stats = #execution_stats{}, + selector = mango_selector:normalize({[{<<"user_id">>, <<"1234">>}]}) + }, + RowProps = [ + {id,<<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>}, + {key,<<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>}, + {value,1}, + {doc,{ + [ + {<<"_id">>,<<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>}, + {<<"_rev">>,<<"1-a954fe2308f14307756067b0e18c2968">>}, + {<<"user_id">>,11} + ] + }} + ], + {Match, _, _} = doc_member(Cursor, RowProps), + ?assertEqual(Match, ok). + + +-endif. diff --git a/src/mango/src/mango_native_proc.erl b/src/mango/src/mango_native_proc.erl index 6150e1d19..4c536f871 100644 --- a/src/mango/src/mango_native_proc.erl +++ b/src/mango/src/mango_native_proc.erl @@ -89,11 +89,11 @@ handle_call({prompt, [<<"add_fun">>, IndexInfo]}, _From, St) -> handle_call({prompt, [<<"map_doc">>, Doc]}, _From, St) -> {reply, map_doc(St, mango_json:to_binary(Doc)), St}; -handle_call({prompt, [<<"reduce">>, _, _]}, _From, St) -> - {reply, null, St}; +handle_call({prompt, [<<"reduce">>, RedSrcs, _]}, _From, St) -> + {reply, [true, [null || _ <- RedSrcs]], St}; -handle_call({prompt, [<<"rereduce">>, _, _]}, _From, St) -> - {reply, null, St}; +handle_call({prompt, [<<"rereduce">>, RedSrcs, _]}, _From, St) -> + {reply, [true, [null || _ <- RedSrcs]], St}; handle_call({prompt, [<<"index_doc">>, Doc]}, _From, St) -> Vals = case index_doc(St, mango_json:to_binary(Doc)) of diff --git a/src/mango/test/01-index-crud-test.py b/src/mango/test/01-index-crud-test.py index cf5b91865..f57db39af 100644 --- a/src/mango/test/01-index-crud-test.py +++ b/src/mango/test/01-index-crud-test.py @@ -13,8 +13,24 @@ import random import mango +import copy import unittest +DOCS = [ + { + "_id": "1", + "name": "Jimi", + "age": 10, + "cars": 1 + }, + { + "_id": "2", + "name": "kate", + "age": 8, + "cars": 0 + } +] + class IndexCrudTests(mango.DbPerClass): def setUp(self): self.db.recreate() @@ -271,6 +287,25 @@ class IndexCrudTests(mango.DbPerClass): except Exception as e: self.assertEqual(e.response.status_code, 500) + def test_out_of_sync(self): + self.db.save_docs(copy.deepcopy(DOCS)) + self.db.create_index(["age"], name="age") + + selector = { + "age": { + "$gt": 0 + }, + } + docs = self.db.find(selector, + use_index="_design/a017b603a47036005de93034ff689bbbb6a873c4") + self.assertEqual(len(docs), 2) + + self.db.delete_doc("1") + + docs1 = self.db.find(selector, update="False", + use_index="_design/a017b603a47036005de93034ff689bbbb6a873c4") + self.assertEqual(len(docs1), 1) + @unittest.skipUnless(mango.has_text_service(), "requires text service") class IndexCrudTextTests(mango.DbPerClass): diff --git a/src/mango/test/mango.py b/src/mango/test/mango.py index bc12bbc68..59486c861 100644 --- a/src/mango/test/mango.py +++ b/src/mango/test/mango.py @@ -113,6 +113,12 @@ class Database(object): r.raise_for_status() return r.json() + def delete_doc(self, docid): + r = self.sess.get(self.path(docid)) + r.raise_for_status() + original_rev = r.json()['_rev'] + self.sess.delete(self.path(docid), params={"rev": original_rev}) + def ddoc_info(self, ddocid): r = self.sess.get(self.path([ddocid, "_info"])) r.raise_for_status() diff --git a/src/rexi/src/rexi_server_mon.erl b/src/rexi/src/rexi_server_mon.erl index 86fecaff6..cfe1144ce 100644 --- a/src/rexi/src/rexi_server_mon.erl +++ b/src/rexi/src/rexi_server_mon.erl @@ -68,6 +68,8 @@ cluster_stable(Server) -> init(ChildMod) -> {ok, _Mem3Cluster} = mem3_cluster:start_link(?MODULE, self(), ?CLUSTER_STABILITY_PERIOD_SEC, ?CLUSTER_STABILITY_PERIOD_SEC), + start_servers(ChildMod), + couch_log:notice("~s : started servers", [ChildMod]), {ok, ChildMod}. diff --git a/src/rexi/src/rexi_utils.erl b/src/rexi/src/rexi_utils.erl index 11dbb252a..960318418 100644 --- a/src/rexi/src/rexi_utils.erl +++ b/src/rexi/src/rexi_utils.erl @@ -16,8 +16,8 @@ %% @doc Return a rexi_server id for the given node. server_id(Node) -> - case config:get("rexi", "server_per_node", "false") of - "true" -> + case config:get_boolean("rexi", "server_per_node", true) of + true -> list_to_atom("rexi_server_" ++ atom_to_list(Node)); _ -> rexi_server diff --git a/test/javascript/run b/test/javascript/run index a3b3ab704..ec12431b0 100755 --- a/test/javascript/run +++ b/test/javascript/run @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python2 # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of @@ -146,7 +146,7 @@ def main(): sys.stderr.write("=======================================================" + os.linesep) sys.stderr.write("JavaScript tests complete." + os.linesep) - sys.stderr.write(" Failed: {}. Skipped or passed: {}.".format( + sys.stderr.write(" Failed: {0}. Skipped or passed: {1}.".format( failed, passed) + os.linesep) exit(failed > 0) diff --git a/test/javascript/tests/rewrite_js.js b/test/javascript/tests/rewrite_js.js index 9893127af..22de6c940 100644 --- a/test/javascript/tests/rewrite_js.js +++ b/test/javascript/tests/rewrite_js.js @@ -100,9 +100,12 @@ couchTests.rewrite = function(debug) { return {path: '_list/simpleForm/complexView2', query: {key: JSON.stringify({"c": 1})}}; } - if (req.path.slice(4).join('/') === 'simpleForm/complexView4') { - return {path: '_list/simpleForm/complexView2', - query: {key: JSON.stringify({"c": 1})}}; + if (req.path.slice(4).join('/') === 'simpleForm/sendBody1') { + return {path: '_list/simpleForm/complexView2', + method: 'POST', + query: {limit: '1'}, + headers:{'Content-type':'application/json'}, + body: JSON.stringify( {keys: [{"c": 1}]} )}; } if (req.path.slice(4).join('/') === '/') { return {path: '_view/basicView'}; @@ -283,6 +286,11 @@ couchTests.rewrite = function(debug) { T(xhr.status == 200, "with query params"); T(/Value: doc 5/.test(xhr.responseText)); + // COUCHDB-1612 - send body rewriting get to post + xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/simpleForm/sendBody1"); + T(xhr.status == 200, "get->post rewrite failed:\n"+xhr.responseText); + T(/Value: doc 5 LineNo: 1/.test(xhr.responseText), "get->post rewrite responded wrong:\n"+xhr.responseText); + // COUCHDB-2031 - path normalization versus qs params xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/db/_design/test?meta=true"); T(xhr.status == 200, "path normalization works with qs params"); @@ -340,4 +348,4 @@ couchTests.rewrite = function(debug) { // cleanup db.deleteDb(); } -} +}
\ No newline at end of file |