From 60107992135bc65f69ab8528825a13d36d77b4c9 Mon Sep 17 00:00:00 2001 From: garren smith Date: Fri, 21 Sep 2018 13:53:40 +0200 Subject: Mango match doc on co-ordinating node (#1609) * Mango match doc on co-ordinating node This fixes an issue when doing a rolling upgrade of a CouchDB cluster and adding commit a6bc72e the nodes that were not upgraded yet would send through all the docs in the index and those would be passed through to the user because the co-oridnator would assume it was matched at the node level. This adds in a check to see if it has been matched at the node level or not. And then performs a match if required. --- src/mango/src/mango_cursor_view.erl | 82 ++++++++++++++++++++++++++++++++----- 1 file changed, 72 insertions(+), 10 deletions(-) diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl index 51ec68c45..174381e4a 100644 --- a/src/mango/src/mango_cursor_view.erl +++ b/src/mango/src/mango_cursor_view.erl @@ -405,31 +405,41 @@ doc_member(Cursor, RowProps) -> Opts = Cursor#cursor.opts, ExecutionStats = Cursor#cursor.execution_stats, Selector = Cursor#cursor.selector, - Incr = case couch_util:get_value(value, RowProps) of - N when is_integer(N) -> N; - _ -> 1 + {Matched, Incr} = case couch_util:get_value(value, RowProps) of + N when is_integer(N) -> {true, N}; + _ -> {false, 1} end, case couch_util:get_value(doc, RowProps) of {DocProps} -> ExecutionStats1 = mango_execution_stats:incr_docs_examined(ExecutionStats, Incr), - {ok, {DocProps}, {execution_stats, ExecutionStats1}}; + case Matched of + true -> + {ok, {DocProps}, {execution_stats, ExecutionStats1}}; + false -> + match_doc(Selector, {DocProps}, ExecutionStats1) + end; undefined -> ExecutionStats1 = mango_execution_stats:incr_quorum_docs_examined(ExecutionStats), Id = couch_util:get_value(id, RowProps), case mango_util:defer(fabric, open_doc, [Db, Id, Opts]) of {ok, #doc{}=DocProps} -> Doc = couch_doc:to_json_obj(DocProps, []), - case mango_selector:match(Selector, Doc) of - true -> - {ok, Doc, {execution_stats, ExecutionStats1}}; - false -> - {no_match, Doc, {execution_stats, ExecutionStats1}} - end; + match_doc(Selector, Doc, ExecutionStats1); Else -> Else end end. + +match_doc(Selector, Doc, ExecutionStats) -> + case mango_selector:match(Selector, Doc) of + true -> + {ok, Doc, {execution_stats, ExecutionStats}}; + false -> + {no_match, Doc, {execution_stats, ExecutionStats}} + end. + + is_design_doc(RowProps) -> case couch_util:get_value(id, RowProps) of <<"_design/", _/binary>> -> true; @@ -446,3 +456,55 @@ update_bookmark_keys(#cursor{limit = Limit} = Cursor, Props) when Limit > 0 -> }; update_bookmark_keys(Cursor, _Props) -> Cursor. + + +%%%%%%%% module tests below %%%%%%%% + +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). + +runs_match_on_doc_with_no_value_test() -> + Cursor = #cursor { + db = <<"db">>, + opts = [], + execution_stats = #execution_stats{}, + selector = mango_selector:normalize({[{<<"user_id">>, <<"1234">>}]}) + }, + RowProps = [ + {id,<<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>}, + {key,<<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>}, + {doc,{ + [ + {<<"_id">>,<<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>}, + {<<"_rev">>,<<"1-a954fe2308f14307756067b0e18c2968">>}, + {<<"user_id">>,11} + ] + }} + ], + {Match, _, _} = doc_member(Cursor, RowProps), + ?assertEqual(Match, no_match). + +does_not_run_match_on_doc_with_value_test() -> + Cursor = #cursor { + db = <<"db">>, + opts = [], + execution_stats = #execution_stats{}, + selector = mango_selector:normalize({[{<<"user_id">>, <<"1234">>}]}) + }, + RowProps = [ + {id,<<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>}, + {key,<<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>}, + {value,1}, + {doc,{ + [ + {<<"_id">>,<<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>}, + {<<"_rev">>,<<"1-a954fe2308f14307756067b0e18c2968">>}, + {<<"user_id">>,11} + ] + }} + ], + {Match, _, _} = doc_member(Cursor, RowProps), + ?assertEqual(Match, ok). + + +-endif. -- cgit v1.2.1 From fbb5588e2d023eb1bcf97cf9e44d483de2b2a62d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrien=20Verg=C3=A9?= Date: Mon, 24 Sep 2018 16:19:09 +0200 Subject: Explicit Python version in scripts Recent Linux distributions start defaulting to Python 3, and require ambiguous scripts to be more explicit. For example building for Fedora 30 (not released yet) fails with: ERROR: ambiguous python shebang in /opt/couchdb/bin/couchup: #!/usr/bin/env python. Change it to python3 (or python2) explicitly. So this commit changes the four Python scripts to use `python2`. Note: They seem to be Python-3-compatible, but I couldn't be sure. If you know they are, please tell me, I'll change it to `python3`. --- build-aux/logfile-uploader.py | 2 +- dev/run | 2 +- rel/overlay/bin/couchup | 2 +- test/javascript/run | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/build-aux/logfile-uploader.py b/build-aux/logfile-uploader.py index c84a96a97..a1ff7e4a7 100755 --- a/build-aux/logfile-uploader.py +++ b/build-aux/logfile-uploader.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python2 # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of diff --git a/dev/run b/dev/run index a5d8fde8c..d105140b7 100755 --- a/dev/run +++ b/dev/run @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python2 # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of diff --git a/rel/overlay/bin/couchup b/rel/overlay/bin/couchup index 2d0105107..6532170aa 100755 --- a/rel/overlay/bin/couchup +++ b/rel/overlay/bin/couchup @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python2 # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at diff --git a/test/javascript/run b/test/javascript/run index a3b3ab704..11f9faee2 100755 --- a/test/javascript/run +++ b/test/javascript/run @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python2 # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of -- cgit v1.2.1 From 691605b9336acdc2daf76ae27eaabe23a70e8e63 Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Tue, 2 Oct 2018 01:05:07 -0400 Subject: Improve connection cleanup in replication connection pool Previously when an owner process crashed before it had a chance to release the worker to the pool, the worker entry was simply deleted. In some cases that was ok because ibrowse's inactivity timeout would kick in and connection would stop itself. In other cases, as observed in practice with _changes feed connection over TLS protocol, inactivity timeout would never fire, so these deleted connections would slowly accumulate leaking memory and filling the process table. TLS connection would keep an associated session open as well making things even worse. To prevent the connection leak, explicitly unlink and kill the worker. --- src/couch_replicator/src/couch_replicator_connection.erl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/couch_replicator/src/couch_replicator_connection.erl b/src/couch_replicator/src/couch_replicator_connection.erl index 9c6472360..f3e4a864d 100644 --- a/src/couch_replicator/src/couch_replicator_connection.erl +++ b/src/couch_replicator/src/couch_replicator_connection.erl @@ -156,7 +156,11 @@ handle_cast({connection_close_interval, V}, State) -> handle_info({'DOWN', Ref, process, _Pid, _Reason}, State) -> couch_stats:increment_counter([couch_replicator, connection, owner_crashes]), - ets:match_delete(?MODULE, #connection{mref=Ref, _='_'}), + Conns = ets:match_object(?MODULE, #connection{mref = Ref, _='_'}), + lists:foreach(fun(Conn) -> + couch_stats:increment_counter([couch_replicator, connection, closes]), + delete_worker(Conn) + end, Conns), {noreply, State}; % worker crashed -- cgit v1.2.1 From 080da7cbad751a85394f700610c0a45da79651ae Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Tue, 2 Oct 2018 00:33:21 -0400 Subject: Avoid restarting /_replicate jobs if parameters haven't changed This used to be the case before the scheduling replicator: https://github.com/apache/couchdb-couch-replicator/blob/master/src/couch_replicator.erl#L166 This is also how replications backed by a document in a _replicator db behave: https://github.com/apache/couchdb/blob/master/src/couch_replicator/src/couch_replicator_doc_processor.erl#L283 --- .../src/couch_replicator_doc_processor.erl | 51 ++------------------- .../src/couch_replicator_scheduler.erl | 53 +++++++++++++++++++--- .../src/couch_replicator_utils.erl | 49 +++++++++++++++++++- 3 files changed, 98 insertions(+), 55 deletions(-) diff --git a/src/couch_replicator/src/couch_replicator_doc_processor.erl b/src/couch_replicator/src/couch_replicator_doc_processor.erl index d3c001f26..1b43598da 100644 --- a/src/couch_replicator/src/couch_replicator_doc_processor.erl +++ b/src/couch_replicator/src/couch_replicator_doc_processor.erl @@ -264,7 +264,9 @@ code_change(_OldVsn, State, _Extra) -> % same document. -spec updated_doc(db_doc_id(), #rep{}, filter_type()) -> ok. updated_doc(Id, Rep, Filter) -> - case normalize_rep(current_rep(Id)) == normalize_rep(Rep) of + NormCurRep = couch_replicator_utils:normalize_rep(current_rep(Id)), + NormNewRep = couch_replicator_utils:normalize_rep(Rep), + case NormCurRep == NormNewRep of false -> removed_doc(Id), Row = #rdoc{ @@ -304,25 +306,6 @@ current_rep({DbName, DocId}) when is_binary(DbName), is_binary(DocId) -> end. -% Normalize a #rep{} record such that it doesn't contain time dependent fields -% pids (like httpc pools), and options / props are sorted. This function would -% used during comparisons. --spec normalize_rep(#rep{} | nil) -> #rep{} | nil. -normalize_rep(nil) -> - nil; - -normalize_rep(#rep{} = Rep)-> - #rep{ - source = couch_replicator_api_wrap:normalize_db(Rep#rep.source), - target = couch_replicator_api_wrap:normalize_db(Rep#rep.target), - options = Rep#rep.options, % already sorted in make_options/1 - type = Rep#rep.type, - view = Rep#rep.view, - doc_id = Rep#rep.doc_id, - db_name = Rep#rep.db_name - }. - - -spec worker_returned(reference(), db_doc_id(), rep_start_result()) -> ok. worker_returned(Ref, Id, {ok, RepId}) -> case ets:lookup(?MODULE, Id) of @@ -819,34 +802,6 @@ t_cluster_membership_foldl() -> end). -normalize_rep_test_() -> - { - setup, - fun() -> meck:expect(config, get, - fun(_, _, Default) -> Default end) - end, - fun(_) -> meck:unload() end, - ?_test(begin - EJson1 = {[ - {<<"source">>, <<"http://host.com/source_db">>}, - {<<"target">>, <<"local">>}, - {<<"doc_ids">>, [<<"a">>, <<"c">>, <<"b">>]}, - {<<"other_field">>, <<"some_value">>} - ]}, - Rep1 = couch_replicator_docs:parse_rep_doc_without_id(EJson1), - EJson2 = {[ - {<<"other_field">>, <<"unrelated">>}, - {<<"target">>, <<"local">>}, - {<<"source">>, <<"http://host.com/source_db">>}, - {<<"doc_ids">>, [<<"c">>, <<"a">>, <<"b">>]}, - {<<"other_field2">>, <<"unrelated2">>} - ]}, - Rep2 = couch_replicator_docs:parse_rep_doc_without_id(EJson2), - ?assertEqual(normalize_rep(Rep1), normalize_rep(Rep2)) - end) - }. - - get_worker_ref_test_() -> { setup, diff --git a/src/couch_replicator/src/couch_replicator_scheduler.erl b/src/couch_replicator/src/couch_replicator_scheduler.erl index 50896c548..762ef18fe 100644 --- a/src/couch_replicator/src/couch_replicator_scheduler.erl +++ b/src/couch_replicator/src/couch_replicator_scheduler.erl @@ -102,12 +102,17 @@ start_link() -> -spec add_job(#rep{}) -> ok. add_job(#rep{} = Rep) when Rep#rep.id /= undefined -> - Job = #job{ - id = Rep#rep.id, - rep = Rep, - history = [{added, os:timestamp()}] - }, - gen_server:call(?MODULE, {add_job, Job}, infinity). + case existing_replication(Rep) of + false -> + Job = #job{ + id = Rep#rep.id, + rep = Rep, + history = [{added, os:timestamp()}] + }, + gen_server:call(?MODULE, {add_job, Job}, infinity); + true -> + ok + end. -spec remove_job(job_id()) -> ok. @@ -925,6 +930,17 @@ stats_fold(#job{pid = P, history = [{started, _} | _]}, Acc) when is_pid(P) -> Acc#stats_acc{running_n = Acc#stats_acc.running_n + 1}. +-spec existing_replication(#rep{}) -> boolean(). +existing_replication(#rep{} = NewRep) -> + case job_by_id(NewRep#rep.id) of + {ok, #job{rep = CurRep}} -> + NormCurRep = couch_replicator_utils:normalize_rep(CurRep), + NormNewRep = couch_replicator_utils:normalize_rep(NewRep), + NormCurRep == NormNewRep; + {error, not_found} -> + false + end. + -ifdef(TEST). @@ -1017,6 +1033,7 @@ scheduler_test_() -> t_start_oldest_first(), t_dont_stop_if_nothing_pending(), t_max_churn_limits_number_of_rotated_jobs(), + t_existing_jobs(), t_if_pending_less_than_running_start_all_pending(), t_running_less_than_pending_swap_all_running(), t_oneshot_dont_get_rotated(), @@ -1308,6 +1325,30 @@ t_if_permanent_job_crashes_it_stays_in_ets() -> end). +t_existing_jobs() -> + ?_test(begin + Rep = #rep{ + id = job1, + db_name = <<"db">>, + source = <<"s">>, + target = <<"t">>, + options = [{continuous, true}] + }, + setup_jobs([#job{id = Rep#rep.id, rep = Rep}]), + NewRep = #rep{ + id = Rep#rep.id, + db_name = <<"db">>, + source = <<"s">>, + target = <<"t">>, + options = [{continuous, true}] + }, + ?assert(existing_replication(NewRep)), + ?assertNot(existing_replication(NewRep#rep{source = <<"s1">>})), + ?assertNot(existing_replication(NewRep#rep{target = <<"t1">>})), + ?assertNot(existing_replication(NewRep#rep{options = []})) + end). + + t_job_summary_running() -> ?_test(begin Job = #job{ diff --git a/src/couch_replicator/src/couch_replicator_utils.erl b/src/couch_replicator/src/couch_replicator_utils.erl index 218fcf501..b0d706953 100644 --- a/src/couch_replicator/src/couch_replicator_utils.erl +++ b/src/couch_replicator/src/couch_replicator_utils.erl @@ -28,7 +28,8 @@ pp_rep_id/1, iso8601/1, filter_state/3, - remove_basic_auth_from_headers/1 + remove_basic_auth_from_headers/1, + normalize_rep/1 ]). -export([ @@ -208,6 +209,25 @@ decode_basic_creds(Base64) -> end. +% Normalize a #rep{} record such that it doesn't contain time dependent fields +% pids (like httpc pools), and options / props are sorted. This function would +% used during comparisons. +-spec normalize_rep(#rep{} | nil) -> #rep{} | nil. +normalize_rep(nil) -> + nil; + +normalize_rep(#rep{} = Rep)-> + #rep{ + source = couch_replicator_api_wrap:normalize_db(Rep#rep.source), + target = couch_replicator_api_wrap:normalize_db(Rep#rep.target), + options = Rep#rep.options, % already sorted in make_options/1 + type = Rep#rep.type, + view = Rep#rep.view, + doc_id = Rep#rep.doc_id, + db_name = Rep#rep.db_name + }. + + -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). @@ -259,4 +279,31 @@ b64creds(User, Pass) -> base64:encode_to_string(User ++ ":" ++ Pass). +normalize_rep_test_() -> + { + setup, + fun() -> meck:expect(config, get, + fun(_, _, Default) -> Default end) + end, + fun(_) -> meck:unload() end, + ?_test(begin + EJson1 = {[ + {<<"source">>, <<"http://host.com/source_db">>}, + {<<"target">>, <<"local">>}, + {<<"doc_ids">>, [<<"a">>, <<"c">>, <<"b">>]}, + {<<"other_field">>, <<"some_value">>} + ]}, + Rep1 = couch_replicator_docs:parse_rep_doc_without_id(EJson1), + EJson2 = {[ + {<<"other_field">>, <<"unrelated">>}, + {<<"target">>, <<"local">>}, + {<<"source">>, <<"http://host.com/source_db">>}, + {<<"doc_ids">>, [<<"c">>, <<"a">>, <<"b">>]}, + {<<"other_field2">>, <<"unrelated2">>} + ]}, + Rep2 = couch_replicator_docs:parse_rep_doc_without_id(EJson2), + ?assertEqual(normalize_rep(Rep1), normalize_rep(Rep2)) + end) + }. + -endif. -- cgit v1.2.1 From b3549149f1a871974cff3c9a260c217eb57ec849 Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Wed, 26 Sep 2018 10:00:27 -0400 Subject: Switch rexi server_per_node to true by default This has been solid for years and when not enabled can be a performance bottleneck. Fixes #1625 --- rel/overlay/etc/default.ini | 2 +- src/rexi/src/rexi_utils.erl | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini index dc2e51cc0..ba2a498eb 100644 --- a/rel/overlay/etc/default.ini +++ b/rel/overlay/etc/default.ini @@ -187,7 +187,7 @@ port = 6984 ; [rexi] ; buffer_count = 2000 -; server_per_node = false +; server_per_node = true ; [global_changes] ; max_event_delay = 25 diff --git a/src/rexi/src/rexi_utils.erl b/src/rexi/src/rexi_utils.erl index 11dbb252a..960318418 100644 --- a/src/rexi/src/rexi_utils.erl +++ b/src/rexi/src/rexi_utils.erl @@ -16,8 +16,8 @@ %% @doc Return a rexi_server id for the given node. server_id(Node) -> - case config:get("rexi", "server_per_node", "false") of - "true" -> + case config:get_boolean("rexi", "server_per_node", true) of + true -> list_to_atom("rexi_server_" ++ atom_to_list(Node)); _ -> rexi_server -- cgit v1.2.1 From abb2a86545d311884fe3256a30c4f7d75e0b26ef Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Wed, 26 Sep 2018 13:08:45 -0400 Subject: Make sure to start per-node rexi servers right away This ensures they will be ready to process requests as soon as the application starts up. This should make the service available sooner and should help tests which setup and tear down the services repeatedly, where it would avoid an annoying retry-until-ready loop. Per-node servers/buffers are started in the init method of the monitors. There is not chance of deadlock there because per-node supervisors are started before the monitors. Issue #1625 --- src/rexi/src/rexi_server_mon.erl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/rexi/src/rexi_server_mon.erl b/src/rexi/src/rexi_server_mon.erl index 86fecaff6..cfe1144ce 100644 --- a/src/rexi/src/rexi_server_mon.erl +++ b/src/rexi/src/rexi_server_mon.erl @@ -68,6 +68,8 @@ cluster_stable(Server) -> init(ChildMod) -> {ok, _Mem3Cluster} = mem3_cluster:start_link(?MODULE, self(), ?CLUSTER_STABILITY_PERIOD_SEC, ?CLUSTER_STABILITY_PERIOD_SEC), + start_servers(ChildMod), + couch_log:notice("~s : started servers", [ChildMod]), {ok, ChildMod}. -- cgit v1.2.1 From 4267e1ff805e6f5719f770c780874b3d8ba78d9a Mon Sep 17 00:00:00 2001 From: ermouth Date: Fri, 21 Sep 2018 20:52:30 +0300 Subject: js rewrite send body Fixes #1612 --- src/chttpd/src/chttpd_rewrite.erl | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/chttpd/src/chttpd_rewrite.erl b/src/chttpd/src/chttpd_rewrite.erl index 039390eed..24a48248b 100644 --- a/src/chttpd/src/chttpd_rewrite.erl +++ b/src/chttpd/src/chttpd_rewrite.erl @@ -64,7 +64,14 @@ do_rewrite(#httpd{mochi_req=MochiReq}=Req, {Props}=Rewrite) when is_list(Props) Path, MochiReq:get(version), Headers), - NewMochiReq:cleanup(), + Body = case couch_util:get_value(<<"body">>, Props) of + undefined -> erlang:get(mochiweb_request_body); + B -> B + end, + case Body of + undefined -> NewMochiReq:cleanup(); + _ -> erlang:put(mochiweb_request_body, Body) + end, couch_log:debug("rewrite to ~p", [Path]), chttpd:handle_request_int(NewMochiReq); Code -> -- cgit v1.2.1 From 7b48b63a7044f30b7f133f4b8001c1906a6f3136 Mon Sep 17 00:00:00 2001 From: ermouth Date: Fri, 28 Sep 2018 03:10:29 +0300 Subject: add test for 1612 --- test/javascript/tests/rewrite_js.js | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/test/javascript/tests/rewrite_js.js b/test/javascript/tests/rewrite_js.js index 9893127af..22de6c940 100644 --- a/test/javascript/tests/rewrite_js.js +++ b/test/javascript/tests/rewrite_js.js @@ -100,9 +100,12 @@ couchTests.rewrite = function(debug) { return {path: '_list/simpleForm/complexView2', query: {key: JSON.stringify({"c": 1})}}; } - if (req.path.slice(4).join('/') === 'simpleForm/complexView4') { - return {path: '_list/simpleForm/complexView2', - query: {key: JSON.stringify({"c": 1})}}; + if (req.path.slice(4).join('/') === 'simpleForm/sendBody1') { + return {path: '_list/simpleForm/complexView2', + method: 'POST', + query: {limit: '1'}, + headers:{'Content-type':'application/json'}, + body: JSON.stringify( {keys: [{"c": 1}]} )}; } if (req.path.slice(4).join('/') === '/') { return {path: '_view/basicView'}; @@ -283,6 +286,11 @@ couchTests.rewrite = function(debug) { T(xhr.status == 200, "with query params"); T(/Value: doc 5/.test(xhr.responseText)); + // COUCHDB-1612 - send body rewriting get to post + xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/simpleForm/sendBody1"); + T(xhr.status == 200, "get->post rewrite failed:\n"+xhr.responseText); + T(/Value: doc 5 LineNo: 1/.test(xhr.responseText), "get->post rewrite responded wrong:\n"+xhr.responseText); + // COUCHDB-2031 - path normalization versus qs params xhr = CouchDB.request("GET", "/"+dbName+"/_design/test/_rewrite/db/_design/test?meta=true"); T(xhr.status == 200, "path normalization works with qs params"); @@ -340,4 +348,4 @@ couchTests.rewrite = function(debug) { // cleanup db.deleteDb(); } -} +} \ No newline at end of file -- cgit v1.2.1 From 5ee80858da68566a6007b5eb4682963534aa5185 Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Fri, 28 Sep 2018 14:15:19 -0400 Subject: Do not crash replicator on VDU function failure Previously a user could insert a VDU function into one of the _replicator databases such that it prevents the replicator application from updating documents in that db. Replicator application would then crash and prevent replications from running on the whole cluster. To avoid crashing the replicator when saving documents, log the error and return `{ok, forbidden}`. The return might seem odd but we are asserting that forbidden is an OK value in this context and explicitly handling it. This shape of the return also conforms to the expected `{ok, _Rev}` result, noticing that `_Rev` is never actually used. --- src/couch_replicator/src/couch_replicator_docs.erl | 58 +++++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl index 62d21fe12..013475683 100644 --- a/src/couch_replicator/src/couch_replicator_docs.erl +++ b/src/couch_replicator/src/couch_replicator_docs.erl @@ -366,6 +366,14 @@ save_rep_doc(DbName, Doc) -> {ok, Db} = couch_db:open_int(DbName, [?CTX, sys_db]), try couch_db:update_doc(Db, Doc, []) + catch + % User can accidently write a VDU which prevents _replicator from + % updating replication documents. Avoid crashing replicator and thus + % preventing all other replication jobs on the node from running. + throw:{forbidden, Reason} -> + Msg = "~p VDU function preventing doc update to ~s ~s ~p", + couch_log:error(Msg, [?MODULE, DbName, Doc#doc.id, Reason]), + {ok, forbidden} after couch_db:close(Db) end. @@ -694,7 +702,9 @@ error_reason(Reason) -> -ifdef(TEST). --include_lib("eunit/include/eunit.hrl"). + +-include_lib("couch/include/couch_eunit.hrl"). + check_options_pass_values_test() -> ?assertEqual(check_options([]), []), @@ -766,4 +776,50 @@ check_strip_credentials_test() -> } ]]. + +setup() -> + DbName = ?tempdb(), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db), + create_vdu(DbName), + DbName. + + +teardown(DbName) when is_binary(DbName) -> + couch_server:delete(DbName, [?ADMIN_CTX]), + ok. + + +create_vdu(DbName) -> + couch_util:with_db(DbName, fun(Db) -> + VduFun = <<"function(newdoc, olddoc, userctx) {throw({'forbidden':'fail'})}">>, + Doc = #doc{ + id = <<"_design/vdu">>, + body = {[{<<"validate_doc_update">>, VduFun}]} + }, + {ok, _} = couch_db:update_docs(Db, [Doc]), + couch_db:ensure_full_commit(Db) + end). + + +update_replicator_doc_with_bad_vdu_test_() -> + { + setup, + fun test_util:start_couch/0, + fun test_util:stop_couch/1, + { + foreach, fun setup/0, fun teardown/1, + [ + fun t_vdu_does_not_crash_on_save/1 + ] + } + }. + + +t_vdu_does_not_crash_on_save(DbName) -> + ?_test(begin + Doc = #doc{id = <<"some_id">>, body = {[{<<"foo">>, 42}]}}, + ?assertEqual({ok, forbidden}, save_rep_doc(DbName, Doc)) + end). + -endif. -- cgit v1.2.1 From 4d3dfc6c0aad1f8786d89616b961a51afec2cd6e Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Thu, 21 Jun 2018 17:16:33 -0500 Subject: Enable parameterized module calls This is a temporary bandaid to allow us to continue using parameterized modules with Erlang 21. We'll have to go back and modify every one of these files to avoid that as well as figuring out how to upgrade mochiweb to something that doesn't use parameterized modules by the time they are fully removed from Erlang. Fixes #1396 --- src/chttpd/src/chttpd.erl | 3 +++ src/chttpd/src/chttpd_db.erl | 3 +++ src/chttpd/src/chttpd_external.erl | 2 ++ src/chttpd/src/chttpd_prefer_header.erl | 1 + src/chttpd/src/chttpd_rewrite.erl | 3 +++ src/chttpd/test/chttpd_prefer_header_test.erl | 3 +++ src/couch/src/couch_httpd.erl | 3 +++ src/couch/src/couch_httpd_auth.erl | 3 +++ src/couch/src/couch_httpd_db.erl | 3 +++ src/couch/src/couch_httpd_external.erl | 2 ++ src/couch/src/couch_httpd_proxy.erl | 2 ++ src/couch/src/couch_httpd_rewrite.erl | 3 +++ src/couch/src/couch_httpd_vhost.erl | 2 ++ src/couch/test/couchdb_http_proxy_tests.erl | 2 ++ src/couch/test/test_web.erl | 2 ++ src/couch_index/src/couch_index.erl | 2 ++ src/fabric/src/fabric_doc_attachments.erl | 2 ++ src/fabric/src/fabric_doc_atts.erl | 2 ++ 18 files changed, 43 insertions(+) diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl index a5628396b..b606ad414 100644 --- a/src/chttpd/src/chttpd.erl +++ b/src/chttpd/src/chttpd.erl @@ -11,6 +11,9 @@ % the License. -module(chttpd). + +-compile(tuple_calls). + -include_lib("couch/include/couch_db.hrl"). -include_lib("chttpd/include/chttpd.hrl"). diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl index 49d7b5849..9cde6d907 100644 --- a/src/chttpd/src/chttpd_db.erl +++ b/src/chttpd/src/chttpd_db.erl @@ -11,6 +11,9 @@ % the License. -module(chttpd_db). + +-compile(tuple_calls). + -include_lib("couch/include/couch_db.hrl"). -include_lib("couch_mrview/include/couch_mrview.hrl"). diff --git a/src/chttpd/src/chttpd_external.erl b/src/chttpd/src/chttpd_external.erl index 64664b98e..fa35c6ba2 100644 --- a/src/chttpd/src/chttpd_external.erl +++ b/src/chttpd/src/chttpd_external.erl @@ -12,6 +12,8 @@ -module(chttpd_external). +-compile(tuple_calls). + -export([handle_external_req/2, handle_external_req/3]). -export([send_external_response/2]). -export([json_req_obj_fields/0, json_req_obj/2, json_req_obj/3, json_req_obj/4]). diff --git a/src/chttpd/src/chttpd_prefer_header.erl b/src/chttpd/src/chttpd_prefer_header.erl index f550e80e5..1ad1443ea 100644 --- a/src/chttpd/src/chttpd_prefer_header.erl +++ b/src/chttpd/src/chttpd_prefer_header.erl @@ -12,6 +12,7 @@ -module(chttpd_prefer_header). +-compile(tuple_calls). -export([ maybe_return_minimal/2 diff --git a/src/chttpd/src/chttpd_rewrite.erl b/src/chttpd/src/chttpd_rewrite.erl index 24a48248b..019651374 100644 --- a/src/chttpd/src/chttpd_rewrite.erl +++ b/src/chttpd/src/chttpd_rewrite.erl @@ -16,6 +16,9 @@ %% @doc Module for URL rewriting by pattern matching. -module(chttpd_rewrite). + +-compile(tuple_calls). + -export([handle_rewrite_req/3]). -include_lib("couch/include/couch_db.hrl"). diff --git a/src/chttpd/test/chttpd_prefer_header_test.erl b/src/chttpd/test/chttpd_prefer_header_test.erl index a8a5b3d26..0f43ba437 100644 --- a/src/chttpd/test/chttpd_prefer_header_test.erl +++ b/src/chttpd/test/chttpd_prefer_header_test.erl @@ -11,6 +11,9 @@ % the License. -module(chttpd_prefer_header_test). + +-compile(tuple_calls). + -include_lib("couch/include/couch_db.hrl"). -include_lib("eunit/include/eunit.hrl"). diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl index a8cfca6d2..ec397c298 100644 --- a/src/couch/src/couch_httpd.erl +++ b/src/couch/src/couch_httpd.erl @@ -11,6 +11,9 @@ % the License. -module(couch_httpd). + +-compile(tuple_calls). + -include_lib("couch/include/couch_db.hrl"). -export([start_link/0, start_link/1, stop/0, handle_request/5]). diff --git a/src/couch/src/couch_httpd_auth.erl b/src/couch/src/couch_httpd_auth.erl index 6ac7b75af..660727195 100644 --- a/src/couch/src/couch_httpd_auth.erl +++ b/src/couch/src/couch_httpd_auth.erl @@ -11,6 +11,9 @@ % the License. -module(couch_httpd_auth). + +-compile(tuple_calls). + -include_lib("couch/include/couch_db.hrl"). -export([party_mode_handler/1]). diff --git a/src/couch/src/couch_httpd_db.erl b/src/couch/src/couch_httpd_db.erl index 81209d9ff..ced146e39 100644 --- a/src/couch/src/couch_httpd_db.erl +++ b/src/couch/src/couch_httpd_db.erl @@ -11,6 +11,9 @@ % the License. -module(couch_httpd_db). + +-compile(tuple_calls). + -include_lib("couch/include/couch_db.hrl"). -export([handle_request/1, handle_compact_req/2, handle_design_req/2, diff --git a/src/couch/src/couch_httpd_external.erl b/src/couch/src/couch_httpd_external.erl index 1f2f1e884..684f90091 100644 --- a/src/couch/src/couch_httpd_external.erl +++ b/src/couch/src/couch_httpd_external.erl @@ -12,6 +12,8 @@ -module(couch_httpd_external). +-compile(tuple_calls). + -export([handle_external_req/2, handle_external_req/3]). -export([send_external_response/2, json_req_obj/2, json_req_obj/3]). -export([default_or_content_type/2, parse_external_response/1]). diff --git a/src/couch/src/couch_httpd_proxy.erl b/src/couch/src/couch_httpd_proxy.erl index 7e9aed721..d2c7acc3a 100644 --- a/src/couch/src/couch_httpd_proxy.erl +++ b/src/couch/src/couch_httpd_proxy.erl @@ -11,6 +11,8 @@ % the License. -module(couch_httpd_proxy). +-compile(tuple_calls). + -export([handle_proxy_req/2]). -include_lib("couch/include/couch_db.hrl"). diff --git a/src/couch/src/couch_httpd_rewrite.erl b/src/couch/src/couch_httpd_rewrite.erl index e2a24218b..2845c0b16 100644 --- a/src/couch/src/couch_httpd_rewrite.erl +++ b/src/couch/src/couch_httpd_rewrite.erl @@ -16,6 +16,9 @@ %% @doc Module for URL rewriting by pattern matching. -module(couch_httpd_rewrite). + +-compile(tuple_calls). + -export([handle_rewrite_req/3]). -include_lib("couch/include/couch_db.hrl"). diff --git a/src/couch/src/couch_httpd_vhost.erl b/src/couch/src/couch_httpd_vhost.erl index f23f41da2..d8f952190 100644 --- a/src/couch/src/couch_httpd_vhost.erl +++ b/src/couch/src/couch_httpd_vhost.erl @@ -15,6 +15,8 @@ -vsn(1). -behaviour(config_listener). +-compile(tuple_calls). + -export([start_link/0, reload/0, get_state/0, dispatch_host/1]). -export([urlsplit_netloc/2, redirect_to_vhost/2]). -export([host/1, split_host_port/1]). diff --git a/src/couch/test/couchdb_http_proxy_tests.erl b/src/couch/test/couchdb_http_proxy_tests.erl index d54ff15c4..f60ba3b08 100644 --- a/src/couch/test/couchdb_http_proxy_tests.erl +++ b/src/couch/test/couchdb_http_proxy_tests.erl @@ -12,6 +12,8 @@ -module(couchdb_http_proxy_tests). +-compile(tuple_calls). + -include_lib("couch/include/couch_eunit.hrl"). -record(req, {method=get, path="", headers=[], body="", opts=[]}). diff --git a/src/couch/test/test_web.erl b/src/couch/test/test_web.erl index d568b7e45..b1b3e65c9 100644 --- a/src/couch/test/test_web.erl +++ b/src/couch/test/test_web.erl @@ -13,6 +13,8 @@ -module(test_web). -behaviour(gen_server). +-compile(tuple_calls). + -include_lib("couch/include/couch_eunit.hrl"). -export([start_link/0, stop/0, loop/1, get_port/0, set_assert/1, check_last/0]). diff --git a/src/couch_index/src/couch_index.erl b/src/couch_index/src/couch_index.erl index 8fba0a23f..cae95779e 100644 --- a/src/couch_index/src/couch_index.erl +++ b/src/couch_index/src/couch_index.erl @@ -13,6 +13,8 @@ -module(couch_index). -behaviour(gen_server). +-compile(tuple_calls). + -vsn(3). %% API diff --git a/src/fabric/src/fabric_doc_attachments.erl b/src/fabric/src/fabric_doc_attachments.erl index 7c6ba6610..723b9e804 100644 --- a/src/fabric/src/fabric_doc_attachments.erl +++ b/src/fabric/src/fabric_doc_attachments.erl @@ -12,6 +12,8 @@ -module(fabric_doc_attachments). +-compile(tuple_calls). + -include_lib("fabric/include/fabric.hrl"). -include_lib("couch/include/couch_db.hrl"). diff --git a/src/fabric/src/fabric_doc_atts.erl b/src/fabric/src/fabric_doc_atts.erl index 7ef5dd893..a3aae80ec 100644 --- a/src/fabric/src/fabric_doc_atts.erl +++ b/src/fabric/src/fabric_doc_atts.erl @@ -12,6 +12,8 @@ -module(fabric_doc_atts). +-compile(tuple_calls). + -include_lib("fabric/include/fabric.hrl"). -include_lib("couch/include/couch_db.hrl"). -- cgit v1.2.1 From 2e8cde50bd3973fdb40fcd7c0ecd9d299b80dc85 Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Thu, 21 Jun 2018 12:44:32 -0500 Subject: Fix couch_log eunit tests Fixes #1396 --- src/couch/src/couch_httpd.erl | 7 ++++--- src/couch_log/src/couch_log_monitor.erl | 1 + src/couch_log/test/couch_log_test_util.erl | 9 ++++++--- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl index ec397c298..e66a78e70 100644 --- a/src/couch/src/couch_httpd.erl +++ b/src/couch/src/couch_httpd.erl @@ -40,6 +40,8 @@ -define(HANDLER_NAME_IN_MODULE_POS, 6). +-define(MAX_DRAIN_BYTES, 1048576). +-define(MAX_DRAIN_TIME_MSEC, 1000). start_link() -> start_link(http). @@ -1181,10 +1183,9 @@ respond_(#httpd{mochi_req = MochiReq}, 413, Headers, Args, Type) -> % just increases the chances of 413 being detected correctly by the client % (rather than getting a brutal TCP reset). erlang:put(mochiweb_request_force_close, true), - Socket = MochiReq:get(socket), - mochiweb_socket:recv(Socket, 0, 0), Result = MochiReq:Type({413, Headers, Args}), - mochiweb_socket:recv(Socket, 0, 0), + Socket = MochiReq:get(socket), + mochiweb_socket:recv(Socket, ?MAX_DRAIN_BYTES, ?MAX_DRAIN_TIME_MSEC), Result; respond_(#httpd{mochi_req = MochiReq}, Code, Headers, Args, Type) -> MochiReq:Type({Code, Headers, Args}). diff --git a/src/couch_log/src/couch_log_monitor.erl b/src/couch_log/src/couch_log_monitor.erl index 236d34012..ab0ae115f 100644 --- a/src/couch_log/src/couch_log_monitor.erl +++ b/src/couch_log/src/couch_log_monitor.erl @@ -38,6 +38,7 @@ start_link() -> init(_) -> + error_logger:start(), ok = gen_event:add_sup_handler(error_logger, ?HANDLER_MOD, []), {ok, nil}. diff --git a/src/couch_log/test/couch_log_test_util.erl b/src/couch_log/test/couch_log_test_util.erl index 05d64d8a9..c7fd34f2d 100644 --- a/src/couch_log/test/couch_log_test_util.erl +++ b/src/couch_log/test/couch_log_test_util.erl @@ -123,9 +123,12 @@ last_log() -> remove_error_loggers() -> - lists:foreach(fun(Handler) -> - error_logger:delete_report_handler(Handler) - end, gen_event:which_handlers(error_logger)). + ErrorLoggerPid = whereis(error_logger), + if ErrorLoggerPid == undefined -> ok; true -> + lists:foreach(fun(Handler) -> + error_logger:delete_report_handler(Handler) + end, gen_event:which_handlers(ErrorLoggerPid)) + end. config_files() -> -- cgit v1.2.1 From 436e5f0d29ab399d6e14a4c531bfa35336591d41 Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Thu, 21 Jun 2018 16:23:32 -0500 Subject: Fix use of process_info(Pid, monitored_by) This can now return references that are from NIFs monitoring the process. This is important for the new file IO NIFs that monitor the controlling process. For now we'll just take the easy way out by filtering the references from our returned monitor lists. Fixes #1396 --- src/couch/src/couch_bt_engine.erl | 2 +- src/couch/src/couch_file.erl | 15 +++++++++------ src/couch/test/couchdb_views_tests.erl | 3 ++- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/src/couch/src/couch_bt_engine.erl b/src/couch/src/couch_bt_engine.erl index 6d858ed49..f856bde8f 100644 --- a/src/couch/src/couch_bt_engine.erl +++ b/src/couch/src/couch_bt_engine.erl @@ -196,7 +196,7 @@ decref(St) -> monitored_by(St) -> case erlang:process_info(St#st.fd, monitored_by) of {monitored_by, Pids} -> - Pids; + lists:filter(fun is_pid/1, Pids); _ -> [] end. diff --git a/src/couch/src/couch_file.erl b/src/couch/src/couch_file.erl index 6aa8d0b89..0c6070dad 100644 --- a/src/couch/src/couch_file.erl +++ b/src/couch/src/couch_file.erl @@ -695,19 +695,22 @@ split_iolist([Sublist| Rest], SplitAt, BeginAcc) when is_list(Sublist) -> split_iolist([Byte | Rest], SplitAt, BeginAcc) when is_integer(Byte) -> split_iolist(Rest, SplitAt - 1, [Byte | BeginAcc]). +monitored_by_pids() -> + {monitored_by, PidsAndRefs} = process_info(self(), monitored_by), + lists:filter(fun is_pid/1, PidsAndRefs). % System dbs aren't monitored by couch_stats_process_tracker is_idle(#file{is_sys=true}) -> - case process_info(self(), monitored_by) of - {monitored_by, []} -> true; + case monitored_by_pids() of + [] -> true; _ -> false end; is_idle(#file{is_sys=false}) -> Tracker = whereis(couch_stats_process_tracker), - case process_info(self(), monitored_by) of - {monitored_by, []} -> true; - {monitored_by, [Tracker]} -> true; - {monitored_by, [_]} -> exit(tracker_monitoring_failed); + case monitored_by_pids() of + [] -> true; + [Tracker] -> true; + [_] -> exit(tracker_monitoring_failed); _ -> false end. diff --git a/src/couch/test/couchdb_views_tests.erl b/src/couch/test/couchdb_views_tests.erl index 1b1a8e56b..60bb5c975 100644 --- a/src/couch/test/couchdb_views_tests.erl +++ b/src/couch/test/couchdb_views_tests.erl @@ -523,7 +523,8 @@ view_cleanup(DbName) -> count_users(DbName) -> {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]), DbPid = couch_db:get_pid(Db), - {monitored_by, Monitors} = process_info(DbPid, monitored_by), + {monitored_by, Monitors0} = process_info(DbPid, monitored_by), + Monitors = lists:filter(fun is_pid/1, Monitors0), CouchFiles = [P || P <- Monitors, couch_file:process_info(P) =/= undefined], ok = couch_db:close(Db), length(lists:usort(Monitors) -- [self() | CouchFiles]). -- cgit v1.2.1 From be6de6f32d0be7147dce8ebe39dd54c07d7be31f Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Thu, 21 Jun 2018 16:28:56 -0500 Subject: Update rebar.config.script and travis CI Fixes #1396 --- .travis.yml | 1 + rebar.config.script | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index acb0b5102..01b2862a3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,6 +3,7 @@ sudo: false os: linux otp_release: + - 21.1 - 20.3 - 19.3 - 18.3 diff --git a/rebar.config.script b/rebar.config.script index 22dde7cde..3bb1a584c 100644 --- a/rebar.config.script +++ b/rebar.config.script @@ -65,7 +65,7 @@ DepDescs = [ {hyper, "hyper", {tag, "CouchDB-2.2.0-4"}}, {ibrowse, "ibrowse", {tag, "CouchDB-4.0.1"}}, {jiffy, "jiffy", {tag, "CouchDB-0.14.11-2"}}, -{mochiweb, "mochiweb", {tag, "v2.17.0"}}, +{mochiweb, "mochiweb", {tag, "CouchDB-v2.18.0-1"}}, {meck, "meck", {tag, "0.8.8"}} ], @@ -91,7 +91,7 @@ ErlOpts = case os:getenv("ERL_OPTS") of end, AddConfig = [ - {require_otp_vsn, "17|18|19|20"}, + {require_otp_vsn, "17|18|19|20|21"}, {deps_dir, "src"}, {deps, lists:map(MakeDep, DepDescs)}, {sub_dirs, SubDirs}, -- cgit v1.2.1 From aa63804c3b3fc4b637d7095f5ff993ff2d01818e Mon Sep 17 00:00:00 2001 From: ILYA Khlopotov Date: Mon, 8 Oct 2018 04:11:52 -0700 Subject: Validate database prefix against DBNAME_REGEX for system dbs Previously we only checked that the suffix of the database is matching one of the predefined system databases. We really should check the prefix against DBNAME_REGEXP to prevent creation of illegally named databases. This fixes #1644 --- src/couch/src/couch_db.erl | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl index 55664e964..4d76ceedc 100644 --- a/src/couch/src/couch_db.erl +++ b/src/couch/src/couch_db.erl @@ -1738,8 +1738,15 @@ validate_dbname_int(DbName, Normalized) when is_binary(DbName) -> is_systemdb(DbName) when is_list(DbName) -> is_systemdb(?l2b(DbName)); is_systemdb(DbName) when is_binary(DbName) -> - lists:member(dbname_suffix(DbName), ?SYSTEM_DATABASES). - + Normalized = normalize_dbname(DbName), + Suffix = filename:basename(Normalized), + case {filename:dirname(Normalized), lists:member(Suffix, ?SYSTEM_DATABASES)} of + {<<".">>, Result} -> Result; + {Prefix, false} -> false; + {Prefix, true} -> + ReOpts = [{capture,none}, dollar_endonly], + re:run(Prefix, ?DBNAME_REGEX, ReOpts) == match + end. set_design_doc_keys(Options1) -> Dir = case lists:keyfind(dir, 1, Options1) of @@ -1831,7 +1838,9 @@ validate_dbname_fail_test_() -> Cases = generate_cases("_long/co$mplex-/path+/_something") ++ generate_cases("_something") ++ generate_cases_with_shards("long/co$mplex-/path+/_something#") - ++ generate_cases_with_shards("long/co$mplex-/path+/some.thing"), + ++ generate_cases_with_shards("long/co$mplex-/path+/some.thing") + ++ generate_cases("!abcdefg/werwej/_users") + ++ generate_cases_with_shards("!abcdefg/werwej/_users"), { foreach, fun setup/0, fun teardown/1, [should_fail_validate_dbname(A) || {_, A} <- Cases] -- cgit v1.2.1 From d9843824ee16ef237b182abd30b5dbfa5d251bcd Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Thu, 11 Oct 2018 17:13:46 +0100 Subject: Pass user_ctx in _bulk_get This fixes _bulk_get for _users db and probably others I don't know --- src/chttpd/src/chttpd_db.erl | 3 ++- src/chttpd/test/chttpd_db_bulk_get_test.erl | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl index 9cde6d907..d0a5a5b74 100644 --- a/src/chttpd/src/chttpd_db.erl +++ b/src/chttpd/src/chttpd_db.erl @@ -482,8 +482,9 @@ db_req(#httpd{method='POST', path_parts=[_, <<"_bulk_get">>]}=Req, Db) -> throw({bad_request, <<"Missing JSON list of 'docs'.">>}); Docs -> #doc_query_args{ - options = Options + options = Options0 } = bulk_get_parse_doc_query(Req), + Options = [{user_ctx, Req#httpd.user_ctx} | Options0], {ok, Resp} = start_json_response(Req, 200), send_chunk(Resp, <<"{\"results\": [">>), diff --git a/src/chttpd/test/chttpd_db_bulk_get_test.erl b/src/chttpd/test/chttpd_db_bulk_get_test.erl index f8921311b..908d1f022 100644 --- a/src/chttpd/test/chttpd_db_bulk_get_test.erl +++ b/src/chttpd/test/chttpd_db_bulk_get_test.erl @@ -214,7 +214,8 @@ should_include_attachments_when_atts_since_specified(_) -> ?_assert(meck:called(fabric, open_revs, [nil, DocId, [{1, <<"revorev">>}], - [{atts_since, [{1, <<"abc">>}]}, attachments]])). + [{atts_since, [{1, <<"abc">>}]}, attachments, + {user_ctx, undefined}]])). %% helpers -- cgit v1.2.1 From ec7ec28164dd62130aa475c7edae7241a48af164 Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Thu, 11 Oct 2018 16:00:59 -0300 Subject: Restrict access to _active_tasks to server admin --- src/chttpd/src/chttpd_misc.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl index c72ef7cd4..7b417b442 100644 --- a/src/chttpd/src/chttpd_misc.erl +++ b/src/chttpd/src/chttpd_misc.erl @@ -178,6 +178,7 @@ handle_dbs_info_req(Req) -> send_method_not_allowed(Req, "POST"). handle_task_status_req(#httpd{method='GET'}=Req) -> + ok = chttpd:verify_is_server_admin(Req), {Replies, _BadNodes} = gen_server:multi_call(couch_task_status, all), Response = lists:flatmap(fun({Node, Tasks}) -> [{[{node,Node} | Task]} || Task <- Tasks] -- cgit v1.2.1 From 87fde5988e84913909ed1ad5258f151f7ef4c165 Mon Sep 17 00:00:00 2001 From: jiangph Date: Thu, 11 Oct 2018 14:25:15 +0800 Subject: Add document_purges counter for stats COUCHDB-3326 --- src/chttpd/src/chttpd_db.erl | 4 ++++ src/couch/priv/stats_descriptions.cfg | 12 ++++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl index d0a5a5b74..bd01b93bd 100644 --- a/src/chttpd/src/chttpd_db.erl +++ b/src/chttpd/src/chttpd_db.erl @@ -523,6 +523,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) -> false -> throw({bad_request, "Exceeded maximum number of revisions."}); true -> ok end, + couch_stats:increment_counter([couchdb, document_purges, total], length(IdsRevs2)), {ok, Results} = fabric:purge_docs(Db, IdsRevs2, Options), {Code, Json} = purge_results_to_json(IdsRevs2, Results), send_json(Req, Code, {[{<<"purge_seq">>, null}, {<<"purged">>, {Json}}]}); @@ -1027,14 +1028,17 @@ purge_results_to_json([], []) -> {201, []}; purge_results_to_json([{DocId, _Revs} | RIn], [{ok, PRevs} | ROut]) -> {Code, Results} = purge_results_to_json(RIn, ROut), + couch_stats:increment_counter([couchdb, document_purges, success]), {Code, [{DocId, couch_doc:revs_to_strs(PRevs)} | Results]}; purge_results_to_json([{DocId, _Revs} | RIn], [{accepted, PRevs} | ROut]) -> {Code, Results} = purge_results_to_json(RIn, ROut), + couch_stats:increment_counter([couchdb, document_purges, success]), NewResults = [{DocId, couch_doc:revs_to_strs(PRevs)} | Results], {erlang:max(Code, 202), NewResults}; purge_results_to_json([{DocId, _Revs} | RIn], [Error | ROut]) -> {Code, Results} = purge_results_to_json(RIn, ROut), {NewCode, ErrorStr, Reason} = chttpd:error_info(Error), + couch_stats:increment_counter([couchdb, document_purges, failure]), NewResults = [{DocId, {[{error, ErrorStr}, {reason, Reason}]}} | Results], {erlang:max(NewCode, Code), NewResults}. diff --git a/src/couch/priv/stats_descriptions.cfg b/src/couch/priv/stats_descriptions.cfg index bceb0cea8..e5ac9d722 100644 --- a/src/couch/priv/stats_descriptions.cfg +++ b/src/couch/priv/stats_descriptions.cfg @@ -50,9 +50,17 @@ {type, counter}, {desc, <<"number of document write operations">>} ]}. -{[couchdb, document_purges], [ +{[couchdb, document_purges, total], [ {type, counter}, - {desc, <<"number of document purge operations">>} + {desc, <<"number of total document purge operations">>} +]}. +{[couchdb, document_purges, success], [ + {type, counter}, + {desc, <<"number of successful document purge operations">>} +]}. +{[couchdb, document_purges, failure], [ + {type, counter}, + {desc, <<"number of failed document purge operations">>} ]}. {[couchdb, local_document_writes], [ {type, counter}, -- cgit v1.2.1 From 1b28c58654c049977ce08e1a87ea854f5e5fdd55 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Mon, 15 Oct 2018 17:13:05 +0100 Subject: Test correct condition for exceed_limit error Previously we were testing if Pos + TotalBytes exceeded the pread limit. This is the wrong logic entirely. We are trying to prevent an attempted call to file:pread/3 where the third parameter, the number of bytes to read, is a very large number (due to a corruption elsewhere, say). Instead we throw exceed_limit as soon as a file gets above a certain size. I switched this to an if statement to make it clear that the "read past EOF" and "try to read too many bytes" checks are quite distinct from each other. --- src/couch/src/couch_file.erl | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/couch/src/couch_file.erl b/src/couch/src/couch_file.erl index 0c6070dad..d6e4066db 100644 --- a/src/couch/src/couch_file.erl +++ b/src/couch/src/couch_file.erl @@ -614,18 +614,18 @@ read_raw_iolist_int(Fd, {Pos, _Size}, Len) -> % 0110 UPGRADE CODE read_raw_iolist_int(#file{fd = Fd, pread_limit = Limit} = F, Pos, Len) -> BlockOffset = Pos rem ?SIZE_BLOCK, TotalBytes = calculate_total_read_len(BlockOffset, Len), - case Pos + TotalBytes of - Size when Size > F#file.eof -> - couch_stats:increment_counter([pread, exceed_eof]), - {_Fd, Filepath} = get(couch_file_fd), - throw({read_beyond_eof, Filepath}); - Size when Size > Limit -> - couch_stats:increment_counter([pread, exceed_limit]), - {_Fd, Filepath} = get(couch_file_fd), - throw({exceed_pread_limit, Filepath, Limit}); - Size -> - {ok, <>} = file:pread(Fd, Pos, TotalBytes), - {remove_block_prefixes(BlockOffset, RawBin), Size} + if + (Pos + TotalBytes) > F#file.eof -> + couch_stats:increment_counter([pread, exceed_eof]), + {_Fd, Filepath} = get(couch_file_fd), + throw({read_beyond_eof, Filepath}); + TotalBytes > Limit -> + couch_stats:increment_counter([pread, exceed_limit]), + {_Fd, Filepath} = get(couch_file_fd), + throw({exceed_pread_limit, Filepath, Limit}); + true -> + {ok, <>} = file:pread(Fd, Pos, TotalBytes), + {remove_block_prefixes(BlockOffset, RawBin), Pos + TotalBytes} end. -spec extract_md5(iolist()) -> {binary(), iolist()}. -- cgit v1.2.1 From 78a388d7ee83f71655368941fdd3471b07b41103 Mon Sep 17 00:00:00 2001 From: jiangph Date: Mon, 15 Oct 2018 19:38:24 +0800 Subject: Upgrade disk version to 7 for databases - for databases generated before this code base, the disk version needs to be upgraded to 7 or higher so that it can match the db_header with purge_tree and purge_seq_tree COUCHDB-3326 --- src/couch/src/couch_bt_engine_header.erl | 4 ++-- src/couch/test/couch_bt_engine_upgrade_tests.erl | 19 ++++++++++++++++++- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/src/couch/src/couch_bt_engine_header.erl b/src/couch/src/couch_bt_engine_header.erl index 467bb2ff8..c6034179a 100644 --- a/src/couch/src/couch_bt_engine_header.erl +++ b/src/couch/src/couch_bt_engine_header.erl @@ -234,8 +234,8 @@ upgrade_disk_version(#db_header{}=Header) -> 2 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR}); 3 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR}); 4 -> Header#db_header{security_ptr = nil}; % [0.10 - 0.11) - 5 -> Header; % pre 1.2 - 6 -> Header; % pre clustered purge + 5 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre 1.2 + 6 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre clustered purge ?LATEST_DISK_VERSION -> Header; _ -> Reason = "Incorrect disk header version", diff --git a/src/couch/test/couch_bt_engine_upgrade_tests.erl b/src/couch/test/couch_bt_engine_upgrade_tests.erl index 1d2a86d71..8025528fb 100644 --- a/src/couch/test/couch_bt_engine_upgrade_tests.erl +++ b/src/couch/test/couch_bt_engine_upgrade_tests.erl @@ -66,12 +66,13 @@ t_upgrade_without_purge_req() -> % db with zero purge entries DbName = <<"db_without_purge_req">>, + ?assertEqual(6, get_disk_version_from_header(DbName)), {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> ?assertEqual(0, couch_db:get_purge_seq(Db)), couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, []) end), ?assertEqual([], UpgradedPurged), - + ?assertEqual(7, get_disk_version_from_header(DbName)), {ok, Rev} = save_doc( DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]} ), @@ -104,10 +105,12 @@ t_upgrade_with_1_purge_req() -> % with a single purge entry DbName = <<"db_with_1_purge_req">>, + ?assertEqual(6, get_disk_version_from_header(DbName)), {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> ?assertEqual(1, couch_db:get_purge_seq(Db)), couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, []) end), + ?assertEqual(7, get_disk_version_from_header(DbName)), ?assertEqual([{1, <<"doc1">>}], UpgradedPurged), {ok, Rev} = save_doc( @@ -142,10 +145,12 @@ t_upgrade_with_N_purge_req() -> % with two docs that have been purged DbName = <<"db_with_2_purge_req">>, + ?assertEqual(6, get_disk_version_from_header(DbName)), {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> ?assertEqual(2, couch_db:get_purge_seq(Db)), couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, []) end), + ?assertEqual(7, get_disk_version_from_header(DbName)), ?assertEqual([{2, <<"doc2">>}], UpgradedPurged), {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}), @@ -179,10 +184,12 @@ t_upgrade_with_1_purge_req_for_2_docs() -> % with one purge req for Doc1 and another purge req for Doc 2 and Doc3 DbName = <<"db_with_1_purge_req_for_2_docs">>, + ?assertEqual(6, get_disk_version_from_header(DbName)), {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) -> ?assertEqual(3, couch_db:get_purge_seq(Db)), couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, []) end), + ?assertEqual(7, get_disk_version_from_header(DbName)), ?assertEqual([{3,<<"doc2">>},{2,<<"doc3">>}], UpgradedPurged), {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc6">>}, {<<"v">>, 1}]}), @@ -218,3 +225,13 @@ save_doc(DbName, Json) -> fold_fun({PSeq, _UUID, Id, _Revs}, Acc) -> {ok, [{PSeq, Id} | Acc]}. + + +get_disk_version_from_header(DbFileName) -> + DbDir = config:get("couchdb", "database_dir"), + DbFilePath = filename:join([DbDir, ?l2b(?b2l(DbFileName) ++ ".couch")]), + {ok, Fd} = couch_file:open(DbFilePath, []), + {ok, Header} = couch_file:read_header(Fd), + DiskVerison = couch_bt_engine_header:disk_version(Header), + couch_file:close(Fd), + DiskVerison. -- cgit v1.2.1 From bb285676b0506f06ee8f4e86f0d4edbdcd615c51 Mon Sep 17 00:00:00 2001 From: jiangph Date: Wed, 17 Oct 2018 17:49:54 +0800 Subject: Fix test failure on upgrade_v5_test COUCHDB-3326 --- src/couch/src/couch_bt_engine_header.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/couch/src/couch_bt_engine_header.erl b/src/couch/src/couch_bt_engine_header.erl index c6034179a..9c8e7adb3 100644 --- a/src/couch/src/couch_bt_engine_header.erl +++ b/src/couch/src/couch_bt_engine_header.erl @@ -368,12 +368,12 @@ upgrade_v3_test() -> -endif. -upgrade_v5_test() -> +upgrade_v5_to_v7_test() -> Vsn5Header = mk_header(5), NewHeader = upgrade_disk_version(upgrade_tuple(Vsn5Header)), ?assert(is_record(NewHeader, db_header)), - ?assertEqual(5, disk_version(NewHeader)), + ?assertEqual(7, disk_version(NewHeader)), % Security ptr isn't changed for v5 headers ?assertEqual(bang, security_ptr(NewHeader)). -- cgit v1.2.1 From 92c7530fc8d5d881cdb452b5412f91b9ead4f1dd Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Wed, 17 Oct 2018 16:24:28 -0400 Subject: Do not crash couch_log application when gen_* servers send extra args gen_server, gen_fsm and gen_statem might send extra args when terminating. This is a recent behavior and not handling these extra args could lead to couch_log application crashing and taking down the whole VM with it. There are two improvements to fix the issue: 1) Handle the extra args. Format them and log as they might have useful information included. 2) Wrap the whole `format` function in a `try ... catch` statement. This will avoid any other cases where the logger itself if crashing when attepting to format error events. --- src/couch_log/src/couch_log_formatter.erl | 52 +++++++++------- src/couch_log/test/couch_log_formatter_test.erl | 83 ++++++++++++++++++++++++- 2 files changed, 112 insertions(+), 23 deletions(-) diff --git a/src/couch_log/src/couch_log_formatter.erl b/src/couch_log/src/couch_log_formatter.erl index 5be3619f2..4d81f184f 100644 --- a/src/couch_log/src/couch_log_formatter.erl +++ b/src/couch_log/src/couch_log_formatter.erl @@ -56,23 +56,33 @@ format(Level, Pid, Msg) -> }. -format({error, _GL, {Pid, "** Generic server " ++ _, Args}}) -> +format(Event) -> + try + do_format(Event) + catch + Tag:Err -> + Msg = "Encountered error ~w when formatting ~w", + format(error, self(), Msg, [{Tag, Err}, Event]) + end. + + +do_format({error, _GL, {Pid, "** Generic server " ++ _, Args}}) -> %% gen_server terminate - [Name, LastMsg, State, Reason] = Args, + [Name, LastMsg, State, Reason | Extra] = Args, MsgFmt = "gen_server ~w terminated with reason: ~s~n" ++ - " last msg: ~p~n state: ~p", - MsgArgs = [Name, format_reason(Reason), LastMsg, State], + " last msg: ~p~n state: ~p~n extra: ~p", + MsgArgs = [Name, format_reason(Reason), LastMsg, State, Extra], format(error, Pid, MsgFmt, MsgArgs); -format({error, _GL, {Pid, "** State machine " ++ _, Args}}) -> +do_format({error, _GL, {Pid, "** State machine " ++ _, Args}}) -> %% gen_fsm terminate - [Name, LastMsg, StateName, State, Reason] = Args, + [Name, LastMsg, StateName, State, Reason | Extra] = Args, MsgFmt = "gen_fsm ~w in state ~w terminated with reason: ~s~n" ++ - " last msg: ~p~n state: ~p", - MsgArgs = [Name, StateName, format_reason(Reason), LastMsg, State], + " last msg: ~p~n state: ~p~n extra: ~p", + MsgArgs = [Name, StateName, format_reason(Reason), LastMsg, State, Extra], format(error, Pid, MsgFmt, MsgArgs); -format({error, _GL, {Pid, "** gen_event handler" ++ _, Args}}) -> +do_format({error, _GL, {Pid, "** gen_event handler" ++ _, Args}}) -> %% gen_event handler terminate [ID, Name, LastMsg, State, Reason] = Args, MsgFmt = "gen_event ~w installed in ~w terminated with reason: ~s~n" ++ @@ -80,20 +90,20 @@ format({error, _GL, {Pid, "** gen_event handler" ++ _, Args}}) -> MsgArgs = [ID, Name, format_reason(Reason), LastMsg, State], format(error, Pid, MsgFmt, MsgArgs); -format({error, _GL, {emulator, "~s~n", [Msg]}}) when is_list(Msg) -> +do_format({error, _GL, {emulator, "~s~n", [Msg]}}) when is_list(Msg) -> % These messages are for whenever any process exits due % to a throw or error. We intercept here to remove the % extra newlines. NewMsg = lists:sublist(Msg, length(Msg) - 1), format(error, emulator, NewMsg); -format({error, _GL, {Pid, Fmt, Args}}) -> +do_format({error, _GL, {Pid, Fmt, Args}}) -> format(error, Pid, Fmt, Args); -format({error_report, _GL, {Pid, std_error, D}}) -> +do_format({error_report, _GL, {Pid, std_error, D}}) -> format(error, Pid, print_silly_list(D)); -format({error_report, _GL, {Pid, supervisor_report, D}}) -> +do_format({error_report, _GL, {Pid, supervisor_report, D}}) -> case lists:sort(D) of [{errorContext, Ctx}, {offender, Off}, {reason, Reason}, {supervisor, Name}] -> @@ -111,20 +121,20 @@ format({error_report, _GL, {Pid, supervisor_report, D}}) -> format(error, Pid, "SUPERVISOR REPORT " ++ print_silly_list(D)) end; -format({error_report, _GL, {Pid, crash_report, [Report, Neighbors]}}) -> +do_format({error_report, _GL, {Pid, crash_report, [Report, Neighbors]}}) -> Msg = "CRASH REPORT " ++ format_crash_report(Report, Neighbors), format(error, Pid, Msg); -format({warning_msg, _GL, {Pid, Fmt, Args}}) -> +do_format({warning_msg, _GL, {Pid, Fmt, Args}}) -> format(warning, Pid, Fmt, Args); -format({warning_report, _GL, {Pid, std_warning, Report}}) -> +do_format({warning_report, _GL, {Pid, std_warning, Report}}) -> format(warning, Pid, print_silly_list(Report)); -format({info_msg, _GL, {Pid, Fmt, Args}}) -> +do_format({info_msg, _GL, {Pid, Fmt, Args}}) -> format(info, Pid, Fmt, Args); -format({info_report, _GL, {Pid, std_info, D}}) when is_list(D) -> +do_format({info_report, _GL, {Pid, std_info, D}}) when is_list(D) -> case lists:sort(D) of [{application, App}, {exited, Reason}, {type, _Type}] -> MsgFmt = "Application ~w exited with reason: ~s", @@ -133,10 +143,10 @@ format({info_report, _GL, {Pid, std_info, D}}) when is_list(D) -> format(info, Pid, print_silly_list(D)) end; -format({info_report, _GL, {Pid, std_info, D}}) -> +do_format({info_report, _GL, {Pid, std_info, D}}) -> format(info, Pid, "~w", [D]); -format({info_report, _GL, {Pid, progress, D}}) -> +do_format({info_report, _GL, {Pid, progress, D}}) -> case lists:sort(D) of [{application, App}, {started_at, Node}] -> MsgFmt = "Application ~w started on node ~w", @@ -150,7 +160,7 @@ format({info_report, _GL, {Pid, progress, D}}) -> format(info, Pid, "PROGRESS REPORT " ++ print_silly_list(D)) end; -format(Event) -> +do_format(Event) -> format(warning, self(), "Unexpected error_logger event ~w", [Event]). diff --git a/src/couch_log/test/couch_log_formatter_test.erl b/src/couch_log/test/couch_log_formatter_test.erl index a8f69b221..795efcf29 100644 --- a/src/couch_log/test/couch_log_formatter_test.erl +++ b/src/couch_log/test/couch_log_formatter_test.erl @@ -37,6 +37,29 @@ format_reason_test() -> ?assertEqual(Formatted, lists:flatten(Entry#log_entry.msg)). +crashing_formatting_test() -> + Pid = self(), + Event = { + error, + erlang:group_leader(), + { + Pid, + "** Generic server and some stuff", + [a_gen_server, {foo, bar}, server_state] % not enough args! + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = Pid + }, + do_format(Event) + ), + do_matches(do_format(Event), [ + "Encountered error {error,{badmatch" + ]). + + gen_server_error_test() -> Pid = self(), Event = { @@ -59,7 +82,35 @@ gen_server_error_test() -> "gen_server a_gen_server terminated", "with reason: some_reason", "last msg: {foo,bar}", - "state: server_state" + "state: server_state", + "extra: \\[\\]" + ]). + + +gen_server_error_with_extra_args_test() -> + Pid = self(), + Event = { + error, + erlang:group_leader(), + { + Pid, + "** Generic server and some stuff", + [a_gen_server, {foo, bar}, server_state, some_reason, sad, args] + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = Pid + }, + do_format(Event) + ), + do_matches(do_format(Event), [ + "gen_server a_gen_server terminated", + "with reason: some_reason", + "last msg: {foo,bar}", + "state: server_state", + "extra: \\[sad,args\\]" ]). @@ -85,7 +136,35 @@ gen_fsm_error_test() -> "gen_fsm a_gen_fsm in state state_name", "with reason: barf", "last msg: {ohai,there}", - "state: curr_state" + "state: curr_state", + "extra: \\[\\]" + ]). + + +gen_fsm_error_with_extra_args_test() -> + Pid = self(), + Event = { + error, + erlang:group_leader(), + { + Pid, + "** State machine did a thing", + [a_gen_fsm, {ohai,there}, state_name, curr_state, barf, sad, args] + } + }, + ?assertMatch( + #log_entry{ + level = error, + pid = Pid + }, + do_format(Event) + ), + do_matches(do_format(Event), [ + "gen_fsm a_gen_fsm in state state_name", + "with reason: barf", + "last msg: {ohai,there}", + "state: curr_state", + "extra: \\[sad,args\\]" ]). -- cgit v1.2.1 From c980b8016dcf2c205571c3fb9d25f44e2f631074 Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Wed, 17 Oct 2018 17:43:56 -0400 Subject: Improve restart resilience of couch_log application Previously it was too easy to crash the whole node when any of couch_log's children restarted. To improve resiliency, let couch_log application restart a few more times before taking down the whole node with it. --- src/couch_log/src/couch_log_sup.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/couch_log/src/couch_log_sup.erl b/src/couch_log/src/couch_log_sup.erl index 083f5fc33..6219a36e9 100644 --- a/src/couch_log/src/couch_log_sup.erl +++ b/src/couch_log/src/couch_log_sup.erl @@ -26,7 +26,7 @@ start_link() -> init([]) -> ok = couch_log_config:init(), - {ok, {{one_for_one, 1, 1}, children()}}. + {ok, {{one_for_one, 10, 10}, children()}}. children() -> -- cgit v1.2.1 From 4a76ccbd51786218f8bf20b888bbb23e40a019db Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Thu, 18 Oct 2018 11:43:42 -0400 Subject: Avoid crashing if a mango query is reduced Previously returning null from mango native proc lead to case clause error in couch_query_servers. Instead return a proper shape but with null results for each reduction. --- src/mango/src/mango_native_proc.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/mango/src/mango_native_proc.erl b/src/mango/src/mango_native_proc.erl index 6150e1d19..4c536f871 100644 --- a/src/mango/src/mango_native_proc.erl +++ b/src/mango/src/mango_native_proc.erl @@ -89,11 +89,11 @@ handle_call({prompt, [<<"add_fun">>, IndexInfo]}, _From, St) -> handle_call({prompt, [<<"map_doc">>, Doc]}, _From, St) -> {reply, map_doc(St, mango_json:to_binary(Doc)), St}; -handle_call({prompt, [<<"reduce">>, _, _]}, _From, St) -> - {reply, null, St}; +handle_call({prompt, [<<"reduce">>, RedSrcs, _]}, _From, St) -> + {reply, [true, [null || _ <- RedSrcs]], St}; -handle_call({prompt, [<<"rereduce">>, _, _]}, _From, St) -> - {reply, null, St}; +handle_call({prompt, [<<"rereduce">>, RedSrcs, _]}, _From, St) -> + {reply, [true, [null || _ <- RedSrcs]], St}; handle_call({prompt, [<<"index_doc">>, Doc]}, _From, St) -> Vals = case index_doc(St, mango_json:to_binary(Doc)) of -- cgit v1.2.1 From 2301cf35b8f08659fcdfcebd260d628b067abefd Mon Sep 17 00:00:00 2001 From: ILYA Khlopotov Date: Mon, 15 Oct 2018 14:19:41 -0700 Subject: Fix ets_lru configuration in chttpd application The code was incorect in a sense that it was using is_integer guard, while `config:get` cannot return integer. --- src/chttpd/src/chttpd_sup.erl | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/src/chttpd/src/chttpd_sup.erl b/src/chttpd/src/chttpd_sup.erl index fe84b67eb..369248ea6 100644 --- a/src/chttpd/src/chttpd_sup.erl +++ b/src/chttpd/src/chttpd_sup.erl @@ -80,21 +80,18 @@ maybe_replace(Key, Value, Settings) -> end. lru_opts() -> - case config:get("chttpd_auth_cache", "max_objects") of - MxObjs when is_integer(MxObjs), MxObjs > 0 -> - [{max_objects, MxObjs}]; - _ -> - [] - end ++ - case config:get("chttpd_auth_cache", "max_size", "104857600") of - MxSize when is_integer(MxSize), MxSize > 0 -> - [{max_size, MxSize}]; - _ -> - [] - end ++ - case config:get("chttpd_auth_cache", "max_lifetime", "600000") of - MxLT when is_integer(MxLT), MxLT > 0 -> - [{max_lifetime, MxLT}]; - _ -> - [] - end. + lists:foldl(fun append_if_set/2, [], [ + {max_objects, config:get_integer("chttpd_auth_cache", "max_objects", 0)}, + {max_size, config:get_integer("chttpd_auth_cache", "max_size", 104857600)}, + {max_lifetime, config:get_integer("chttpd_auth_cache", "max_lifetime", 600000)} + ]). + +append_if_set({Key, Value}, Opts) when Value > 0 -> + [{Key, Value} | Opts]; +append_if_set({Key, 0}, Opts) -> + Opts; +append_if_set({Key, Value}, Opts) -> + couch_log:error( + "The value for `~s` should be string convertable " + "to integer which is >= 0 (got `~p`)", [Key, Value]), + Opts. -- cgit v1.2.1 From 620b1e192ed0abd833a73b8ec2f55a6185ed09dc Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Thu, 18 Oct 2018 13:33:44 -0400 Subject: Update snappy dependency to CouchDB-1.0.2 This fixes a memory bug: https://github.com/apache/couchdb-snappy/commit/2038ad13b1d6926468f25adea110028e3c0b4b0c --- rebar.config.script | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rebar.config.script b/rebar.config.script index 3bb1a584c..4d1649347 100644 --- a/rebar.config.script +++ b/rebar.config.script @@ -52,7 +52,7 @@ DepDescs = [ {b64url, "b64url", {tag, "1.0.1"}}, {ets_lru, "ets-lru", {tag, "1.0.0"}}, {khash, "khash", {tag, "1.0.1"}}, -{snappy, "snappy", {tag, "CouchDB-1.0.1"}}, +{snappy, "snappy", {tag, "CouchDB-1.0.2"}}, {ioq, "ioq", {tag, "1.0.1"}}, %% Non-Erlang deps -- cgit v1.2.1 From 0408ccc44514a6d1f20cedacb4635e614cf94fb0 Mon Sep 17 00:00:00 2001 From: Iblis Lin Date: Sat, 20 Oct 2018 18:25:08 +0800 Subject: couchjs: show default runtime SIZE limit on help message --- src/couch/priv/couch_js/help.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/couch/priv/couch_js/help.h b/src/couch/priv/couch_js/help.h index c6d76b257..678651fd3 100644 --- a/src/couch/priv/couch_js/help.h +++ b/src/couch/priv/couch_js/help.h @@ -52,6 +52,7 @@ static const char USAGE_TEMPLATE[] = " should not be enabled for production systems)\n" " -S SIZE specify that the runtime should allow at\n" " most SIZE bytes of memory to be allocated\n" + " default is 64 MiB\n" " -u FILE path to a .uri file containing the address\n" " (or addresses) of one or more servers\n" " --eval Enable runtime code evaluation (dangerous!)\n" -- cgit v1.2.1 From 4dca28c2c8607d296bc6bd4d17735abad0a50ffa Mon Sep 17 00:00:00 2001 From: Joan Touzet Date: Mon, 22 Oct 2018 13:06:07 -0400 Subject: Remove explicit python version for dev/run (for now) --- dev/run | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev/run b/dev/run index d105140b7..a5d8fde8c 100755 --- a/dev/run +++ b/dev/run @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of -- cgit v1.2.1 From 01a7165d42901ab4f99ea3d81c50cb9d485aa4d4 Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Tue, 23 Oct 2018 03:09:08 -0400 Subject: Hard code hypothesis to avoid master versions breaking builds --- src/mango/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mango/Makefile b/src/mango/Makefile index 1b2a50452..59f4a29c2 100644 --- a/src/mango/Makefile +++ b/src/mango/Makefile @@ -46,7 +46,7 @@ test: # target: pip-install - Installs requires Python packages pip-install: pip install nose requests - pip install hypothesis + pip install hypothesis==3.79.0 .PHONY: venv -- cgit v1.2.1 From d02d6140afa0b2eca42b9d20bf2d15f6058e9be9 Mon Sep 17 00:00:00 2001 From: Garren Smith Date: Fri, 19 Oct 2018 11:13:11 +0200 Subject: expose is_system_db_name Expose is_system_db_name as a way to verify if a binary string is the name of a system database. --- src/couch/src/couch_db.erl | 13 +++++++------ src/couch/src/couch_doc.erl | 2 +- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl index 4d76ceedc..57e395179 100644 --- a/src/couch/src/couch_db.erl +++ b/src/couch/src/couch_db.erl @@ -56,6 +56,7 @@ is_db/1, is_system_db/1, is_clustered/1, + is_system_db_name/1, set_revs_limit/2, set_purge_infos_limit/2, @@ -1729,15 +1730,15 @@ validate_dbname_int(DbName, Normalized) when is_binary(DbName) -> match -> ok; nomatch -> - case is_systemdb(Normalized) of + case is_system_db_name(Normalized) of true -> ok; false -> {error, {illegal_database_name, DbName}} end end. -is_systemdb(DbName) when is_list(DbName) -> - is_systemdb(?l2b(DbName)); -is_systemdb(DbName) when is_binary(DbName) -> +is_system_db_name(DbName) when is_list(DbName) -> + is_system_db_name(?l2b(DbName)); +is_system_db_name(DbName) when is_binary(DbName) -> Normalized = normalize_dbname(DbName), Suffix = filename:basename(Normalized), case {filename:dirname(Normalized), lists:member(Suffix, ?SYSTEM_DATABASES)} of @@ -1860,7 +1861,7 @@ dbname_suffix_test_() -> [{test_name({Expected, Db}), ?_assertEqual(Expected, dbname_suffix(Db))} || {Expected, Db} <- WithExpected]. -is_systemdb_test_() -> +is_system_db_name_test_() -> Cases = lists:append([ generate_cases_with_shards("long/co$mplex-/path+/" ++ ?b2l(Db)) || Db <- ?SYSTEM_DATABASES] @@ -1869,7 +1870,7 @@ is_systemdb_test_() -> WithExpected = [{?l2b(filename:basename(filename:rootname(Arg))), Db} || {Arg, Db} <- Cases], [{test_name({Expected, Db}) ++ " in ?SYSTEM_DATABASES", - ?_assert(is_systemdb(Db))} || {Expected, Db} <- WithExpected]. + ?_assert(is_system_db_name(Db))} || {Expected, Db} <- WithExpected]. should_pass_validate_dbname(DbName) -> {test_name(DbName), ?_assertEqual(ok, validate_dbname(DbName))}. diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl index f960ec5c2..e5ad9e9f1 100644 --- a/src/couch/src/couch_doc.erl +++ b/src/couch/src/couch_doc.erl @@ -200,7 +200,7 @@ parse_revs(_) -> validate_docid(DocId, DbName) -> case DbName =:= ?l2b(config:get("mem3", "shards_db", "_dbs")) andalso - lists:member(DocId, ?SYSTEM_DATABASES) of + couch_db:is_system_db_name(DocId) of true -> ok; false -> -- cgit v1.2.1 From c3069d17f1e90c7b020d9bbe1f15f5af5408fb8b Mon Sep 17 00:00:00 2001 From: Joan Touzet Date: Tue, 23 Oct 2018 11:37:41 -0400 Subject: Fix JS/Python test harness for Python 2.6 (#1674) --- test/javascript/run | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/javascript/run b/test/javascript/run index 11f9faee2..ec12431b0 100755 --- a/test/javascript/run +++ b/test/javascript/run @@ -146,7 +146,7 @@ def main(): sys.stderr.write("=======================================================" + os.linesep) sys.stderr.write("JavaScript tests complete." + os.linesep) - sys.stderr.write(" Failed: {}. Skipped or passed: {}.".format( + sys.stderr.write(" Failed: {0}. Skipped or passed: {1}.".format( failed, passed) + os.linesep) exit(failed > 0) -- cgit v1.2.1 From 3ac1aca98737b7f986b7efb9ed10f474821fa01f Mon Sep 17 00:00:00 2001 From: jiangph Date: Wed, 24 Oct 2018 08:52:54 +0800 Subject: Allow to return with accepted for mixed nodes in cluster - for mixed nodes in cluster, i.e. nodes with different releases, it is possible that "accepted" result is returned instead of ok when purge request is sent. This commit is used to address badmatch error where "accepted" returned result is not considered. COUCHDB-3326 --- src/chttpd/src/chttpd_db.erl | 7 +++++-- src/chttpd/test/chttpd_purge_tests.erl | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 2 deletions(-) diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl index bd01b93bd..f95c3e794 100644 --- a/src/chttpd/src/chttpd_db.erl +++ b/src/chttpd/src/chttpd_db.erl @@ -524,8 +524,11 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) -> true -> ok end, couch_stats:increment_counter([couchdb, document_purges, total], length(IdsRevs2)), - {ok, Results} = fabric:purge_docs(Db, IdsRevs2, Options), - {Code, Json} = purge_results_to_json(IdsRevs2, Results), + Results2 = case fabric:purge_docs(Db, IdsRevs2, Options) of + {ok, Results} -> Results; + {accepted, Results} -> Results + end, + {Code, Json} = purge_results_to_json(IdsRevs2, Results2), send_json(Req, Code, {[{<<"purge_seq">>, null}, {<<"purged">>, {Json}}]}); db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) -> diff --git a/src/chttpd/test/chttpd_purge_tests.erl b/src/chttpd/test/chttpd_purge_tests.erl index 686552590..af1bd0b1c 100644 --- a/src/chttpd/test/chttpd_purge_tests.erl +++ b/src/chttpd/test/chttpd_purge_tests.erl @@ -70,6 +70,7 @@ purge_test_() -> [ fun test_empty_purge_request/1, fun test_ok_purge_request/1, + fun test_accepted_purge_request/1, fun test_partial_purge_request/1, fun test_mixed_purge_request/1, fun test_overmany_ids_or_revs_purge_request/1, @@ -135,6 +136,38 @@ test_ok_purge_request(Url) -> end). +test_accepted_purge_request(Url) -> + ?_test(begin + {ok, _, _, Body} = create_doc(Url, "doc1"), + {Json} = ?JSON_DECODE(Body), + Rev1 = couch_util:get_value(<<"rev">>, Json, undefined), + IdsRevsEJson = {[ + {<<"doc1">>, [Rev1]} + ]}, + IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)), + meck:new(fabric, [passthrough]), + meck:expect(fabric, purge_docs, + fun(_, _, _) -> {accepted,[{accepted,[{1, + <<57,27,64,134,152,18,73,243,40,1,141,214,135,104,79,188>>}]}]} + end + ), + {ok, Status, _, ResultBody} = test_request:post(Url ++ "/_purge/", + [?CONTENT_JSON, ?AUTH], IdsRevs), + ResultJson = ?JSON_DECODE(ResultBody), + meck:unload(fabric), + ?assert(Status =:= 202), + ?assertEqual( + {[ + {<<"purge_seq">>, null}, + {<<"purged">>, {[ + {<<"doc1">>, [Rev1]} + ]}} + ]}, + ResultJson + ) + end). + + test_partial_purge_request(Url) -> ?_test(begin {ok, _, _, Body} = create_doc(Url, "doc1"), -- cgit v1.2.1 From 91145016fe63513ab9e80100f6a4683f7aa911d7 Mon Sep 17 00:00:00 2001 From: jiangph Date: Thu, 25 Oct 2018 17:17:54 +0800 Subject: Fix get_minimum_purge_seq/1 - should pass DbName instead of db record to validate client, and also display error when client doesn't exist, and mem3:dbname/1 to get DbName for get_design_docs/1 COUCHDB_3326 --- src/couch/src/couch_db.erl | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl index 57e395179..f65900223 100644 --- a/src/couch/src/couch_db.erl +++ b/src/couch/src/couch_db.erl @@ -425,20 +425,22 @@ get_minimum_purge_seq(#db{} = Db) -> case DocId of <> -> ClientSeq = couch_util:get_value(<<"purge_seq">>, Props), + DbName = couch_db:name(Db), + % If there's a broken doc we have to keep every + % purge info until the doc is fixed or removed. + Fmt = "Invalid purge doc '~s' on ~p with purge_seq '~w'", case ClientSeq of CS when is_integer(CS), CS >= PurgeSeq - PurgeInfosLimit -> {ok, SeqAcc}; CS when is_integer(CS) -> - case purge_client_exists(Db, DocId, Props) of - true -> {ok, erlang:min(CS, SeqAcc)}; - false -> {ok, SeqAcc} + case purge_client_exists(DbName, DocId, Props) of + true -> + {ok, erlang:min(CS, SeqAcc)}; + false -> + couch_log:error(Fmt, [DocId, DbName, ClientSeq]), + {ok, SeqAcc} end; _ -> - % If there's a broken doc we have to keep every - % purge info until the doc is fixed or removed. - Fmt = "Invalid purge doc '~s' on database ~p - with purge_seq '~w'", - DbName = couch_db:name(Db), couch_log:error(Fmt, [DocId, DbName, ClientSeq]), {ok, erlang:min(OldestPurgeSeq, SeqAcc)} end; @@ -491,7 +493,7 @@ purge_client_exists(DbName, DocId, Props) -> % it exists. Fmt2 = "Failed to check purge checkpoint using document '~p' in database ~p", - couch_log:error(Fmt2, [DbName, DocId]), + couch_log:error(Fmt2, [DocId, DbName]), true end. @@ -605,8 +607,8 @@ get_db_info(Db) -> ], {ok, InfoList}. -get_design_docs(#db{name = <<"shards/", _:18/binary, DbFullName/binary>>}) -> - DbName = ?l2b(filename:rootname(filename:basename(?b2l(DbFullName)))), +get_design_docs(#db{name = <<"shards/", _/binary>> = ShardDbName}) -> + DbName = mem3:dbname(ShardDbName), {_, Ref} = spawn_monitor(fun() -> exit(fabric:design_docs(DbName)) end), receive {'DOWN', Ref, _, _, Response} -> Response -- cgit v1.2.1 From ee20ad7b374c9d7681fe799276f13c8977f1bb74 Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Thu, 25 Oct 2018 11:20:22 -0300 Subject: Handle an exception on an invalid rev in a local doc update --- src/couch/src/couch_db_updater.erl | 108 +++++++++++++++++++++++++++++-------- 1 file changed, 86 insertions(+), 22 deletions(-) diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl index 52a4d2f1b..87301d2d8 100644 --- a/src/couch/src/couch_db_updater.erl +++ b/src/couch/src/couch_db_updater.erl @@ -627,28 +627,31 @@ update_docs_int(Db, DocsList, LocalDocs, MergeConflicts, FullCommit) -> update_local_doc_revs(Docs) -> - lists:map(fun({Client, NewDoc}) -> - #doc{ - deleted = Delete, - revs = {0, PrevRevs} - } = NewDoc, - case PrevRevs of - [RevStr | _] -> - PrevRev = binary_to_integer(RevStr); - [] -> - PrevRev = 0 - end, - NewRev = case Delete of - false -> - PrevRev + 1; - true -> - 0 - end, - send_result(Client, NewDoc, {ok, {0, integer_to_binary(NewRev)}}), - NewDoc#doc{ - revs = {0, [NewRev]} - } - end, Docs). + lists:foldl(fun({Client, Doc}, Acc) -> + case increment_local_doc_revs(Doc) of + {ok, #doc{revs = {0, [NewRev]}} = NewDoc} -> + send_result(Client, Doc, {ok, {0, integer_to_binary(NewRev)}}), + [NewDoc | Acc]; + {error, Error} -> + send_result(Client, Doc, {error, Error}), + Acc + end + end, [], Docs). + + +increment_local_doc_revs(#doc{deleted = true} = Doc) -> + {ok, Doc#doc{revs = {0, [0]}}}; +increment_local_doc_revs(#doc{revs = {0, []}} = Doc) -> + {ok, Doc#doc{revs = {0, [1]}}}; +increment_local_doc_revs(#doc{revs = {0, [RevStr | _]}} = Doc) -> + try + PrevRev = binary_to_integer(RevStr), + {ok, Doc#doc{revs = {0, [PrevRev + 1]}}} + catch error:badarg -> + {error, <<"Invalid rev format">>} + end; +increment_local_doc_revs(#doc{}) -> + {error, <<"Invalid rev format">>}. purge_docs(Db, []) -> @@ -808,3 +811,64 @@ hibernate_if_no_idle_limit() -> Timeout when is_integer(Timeout) -> Timeout end. + + +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). + + +update_local_doc_revs_test_() -> + {inparallel, [ + {"Test local doc with valid rev", fun t_good_local_doc/0}, + {"Test local doc with invalid rev", fun t_bad_local_doc/0}, + {"Test deleted local doc", fun t_dead_local_doc/0} + ]}. + + +t_good_local_doc() -> + Doc = #doc{ + id = <<"_local/alice">>, + revs = {0, [<<"1">>]}, + meta = [{ref, make_ref()}] + }, + [NewDoc] = update_local_doc_revs([{self(), Doc}]), + ?assertEqual({0, [2]}, NewDoc#doc.revs), + {ok, Result} = receive_result(Doc), + ?assertEqual({ok,{0,<<"2">>}}, Result). + + +t_bad_local_doc() -> + lists:foreach(fun(BadRevs) -> + Doc = #doc{ + id = <<"_local/alice">>, + revs = BadRevs, + meta = [{ref, make_ref()}] + }, + NewDocs = update_local_doc_revs([{self(), Doc}]), + ?assertEqual([], NewDocs), + {ok, Result} = receive_result(Doc), + ?assertEqual({error,<<"Invalid rev format">>}, Result) + end, [{0, [<<"a">>]}, {1, [<<"1">>]}]). + + + +t_dead_local_doc() -> + Doc = #doc{ + id = <<"_local/alice">>, + revs = {0, [<<"122">>]}, + deleted = true, + meta = [{ref, make_ref()}] + }, + [NewDoc] = update_local_doc_revs([{self(), Doc}]), + ?assertEqual({0, [0]}, NewDoc#doc.revs), + {ok, Result} = receive_result(Doc), + ?assertEqual({ok,{0,<<"0">>}}, Result). + + +receive_result(#doc{meta = Meta}) -> + Ref = couch_util:get_value(ref, Meta), + receive + {result, _, {Ref, Result}} -> {ok, Result} + end. + +-endif. -- cgit v1.2.1 From a7ccaeb9a8e6520337e1c2223243e2e5e4627b08 Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Thu, 25 Oct 2018 12:02:49 -0300 Subject: Remove extra spaces from a test module --- src/couch/test/couchdb_update_conflicts_tests.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/couch/test/couchdb_update_conflicts_tests.erl b/src/couch/test/couchdb_update_conflicts_tests.erl index 09c2834a8..6b4a8f643 100644 --- a/src/couch/test/couchdb_update_conflicts_tests.erl +++ b/src/couch/test/couchdb_update_conflicts_tests.erl @@ -157,10 +157,10 @@ ensure_in_single_revision_leaf(DbName) -> [{ok, Doc2}] = Leaves, ?assertEqual(Doc, Doc2). - + bulk_delete_create(DbName, InitRev) -> {ok, Db} = couch_db:open_int(DbName, []), - + DeletedDoc = couch_doc:from_json_obj({[ {<<"_id">>, ?DOC_ID}, {<<"_rev">>, InitRev}, @@ -176,7 +176,7 @@ bulk_delete_create(DbName, InitRev) -> ?assertEqual(2, length([ok || {ok, _} <- Results])), [{ok, Rev1}, {ok, Rev2}] = Results, - + {ok, Db2} = couch_db:open_int(DbName, []), {ok, [{ok, Doc1}]} = couch_db:open_doc_revs( Db2, ?DOC_ID, [Rev1], [conflicts, deleted_conflicts]), -- cgit v1.2.1 From ca60a5e1fe8c1bce0cff0aaadb8581e7f7b765cd Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Thu, 25 Oct 2018 11:25:44 -0300 Subject: Write local docs in a correct tree on a bulk operation with new_edits false --- src/couch/src/couch_db.erl | 126 +++++++++++----------- src/couch/test/couchdb_update_conflicts_tests.erl | 58 +++++++++- 2 files changed, 115 insertions(+), 69 deletions(-) diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl index f65900223..9d6a5dc45 100644 --- a/src/couch/src/couch_db.erl +++ b/src/couch/src/couch_db.erl @@ -1114,69 +1114,35 @@ doc_tag(#doc{meta=Meta}) -> end. update_docs(Db, Docs0, Options, replicated_changes) -> - increment_stat(Db, [couchdb, database_writes]), Docs = tag_docs(Docs0), - DocBuckets = before_docs_update(Db, group_alike_docs(Docs)), - - case (Db#db.validate_doc_funs /= []) orelse - lists:any( - fun(#doc{id= <>}) -> true; - (#doc{atts=Atts}) -> - Atts /= [] - end, Docs) of - true -> - Ids = [Id || [#doc{id=Id}|_] <- DocBuckets], - ExistingDocs = get_full_doc_infos(Db, Ids), - {DocBuckets2, DocErrors} = - prep_and_validate_replicated_updates(Db, DocBuckets, ExistingDocs, [], []), - DocBuckets3 = [Bucket || [_|_]=Bucket <- DocBuckets2]; % remove empty buckets - false -> - DocErrors = [], - DocBuckets3 = DocBuckets + PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) -> + prep_and_validate_replicated_updates(Db0, DocBuckets0, + ExistingDocInfos, [], []) end, - DocBuckets4 = [[doc_flush_atts(Db, check_dup_atts(Doc)) - || Doc <- Bucket] || Bucket <- DocBuckets3], - {ok, []} = write_and_commit(Db, DocBuckets4, [], [merge_conflicts | Options]), + + {ok, DocBuckets, NonRepDocs, DocErrors} + = before_docs_update(Db, Docs, PrepValidateFun), + + DocBuckets2 = [[doc_flush_atts(Db, check_dup_atts(Doc)) + || Doc <- Bucket] || Bucket <- DocBuckets], + {ok, _} = write_and_commit(Db, DocBuckets2, + NonRepDocs, [merge_conflicts | Options]), {ok, DocErrors}; update_docs(Db, Docs0, Options, interactive_edit) -> - increment_stat(Db, [couchdb, database_writes]), - AllOrNothing = lists:member(all_or_nothing, Options), Docs = tag_docs(Docs0), - % Separate _local docs from normal docs - IsLocal = fun - (#doc{id= <>}) -> true; - (_) -> false + AllOrNothing = lists:member(all_or_nothing, Options), + PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) -> + prep_and_validate_updates(Db0, DocBuckets0, ExistingDocInfos, + AllOrNothing, [], []) end, - {NonRepDocs, Docs2} = lists:partition(IsLocal, Docs), - DocBuckets = before_docs_update(Db, group_alike_docs(Docs2)), - - case (Db#db.validate_doc_funs /= []) orelse - lists:any( - fun(#doc{id= <>}) -> - true; - (#doc{atts=Atts}) -> - Atts /= [] - end, Docs2) of - true -> - % lookup the doc by id and get the most recent - Ids = [Id || [#doc{id=Id}|_] <- DocBuckets], - ExistingDocInfos = get_full_doc_infos(Db, Ids), - - {DocBucketsPrepped, PreCommitFailures} = prep_and_validate_updates(Db, - DocBuckets, ExistingDocInfos, AllOrNothing, [], []), - - % strip out any empty buckets - DocBuckets2 = [Bucket || [_|_] = Bucket <- DocBucketsPrepped]; - false -> - PreCommitFailures = [], - DocBuckets2 = DocBuckets - end, + {ok, DocBuckets, NonRepDocs, DocErrors} + = before_docs_update(Db, Docs, PrepValidateFun), - if (AllOrNothing) and (PreCommitFailures /= []) -> + if (AllOrNothing) and (DocErrors /= []) -> RefErrorDict = dict:from_list([{doc_tag(Doc), Doc} || Doc <- Docs]), {aborted, lists:map(fun({Ref, Error}) -> #doc{id=Id,revs={Start,RevIds}} = dict:fetch(Ref, RefErrorDict), @@ -1184,21 +1150,22 @@ update_docs(Db, Docs0, Options, interactive_edit) -> {Pos, [RevId | _]} -> {{Id, {Pos, RevId}}, Error}; {0, []} -> {{Id, {0, <<>>}}, Error} end - end, PreCommitFailures)}; + end, DocErrors)}; true -> Options2 = if AllOrNothing -> [merge_conflicts]; true -> [] end ++ Options, - DocBuckets3 = [[ + DocBuckets2 = [[ doc_flush_atts(Db, set_new_att_revpos( check_dup_atts(Doc))) - || Doc <- B] || B <- DocBuckets2], - {DocBuckets4, IdRevs} = new_revs(DocBuckets3, [], []), + || Doc <- B] || B <- DocBuckets], + {DocBuckets3, IdRevs} = new_revs(DocBuckets2, [], []), - {ok, CommitResults} = write_and_commit(Db, DocBuckets4, NonRepDocs, Options2), + {ok, CommitResults} = write_and_commit(Db, DocBuckets3, + NonRepDocs, Options2), ResultsDict = lists:foldl(fun({Key, Resp}, ResultsAcc) -> dict:store(Key, Resp, ResultsAcc) - end, dict:from_list(IdRevs), CommitResults ++ PreCommitFailures), + end, dict:from_list(IdRevs), CommitResults ++ DocErrors), {ok, lists:map(fun(Doc) -> dict:fetch(doc_tag(Doc), ResultsDict) end, Docs)} @@ -1316,13 +1283,42 @@ prepare_doc_summaries(Db, BucketList) -> Bucket) || Bucket <- BucketList]. -before_docs_update(#db{} = Db, BucketList) -> - [lists:map( - fun(Doc) -> - DocWithBody = couch_doc:with_ejson_body(Doc), - couch_db_plugin:before_doc_update(Db, DocWithBody) - end, - Bucket) || Bucket <- BucketList]. +before_docs_update(#db{validate_doc_funs = VDFuns} = Db, Docs, PVFun) -> + increment_stat(Db, [couchdb, database_writes]), + + % Separate _local docs from normal docs + IsLocal = fun + (#doc{id= <>}) -> true; + (_) -> false + end, + {NonRepDocs, Docs2} = lists:partition(IsLocal, Docs), + + BucketList = group_alike_docs(Docs2), + + DocBuckets = lists:map(fun(Bucket) -> + lists:map(fun(Doc) -> + DocWithBody = couch_doc:with_ejson_body(Doc), + couch_db_plugin:before_doc_update(Db, DocWithBody) + end, Bucket) + end, BucketList), + + ValidatePred = fun + (#doc{id = <>}) -> true; + (#doc{atts = Atts}) -> Atts /= [] + end, + + case (VDFuns /= []) orelse lists:any(ValidatePred, Docs2) of + true -> + % lookup the doc by id and get the most recent + Ids = [Id || [#doc{id = Id} | _] <- DocBuckets], + ExistingDocs = get_full_doc_infos(Db, Ids), + {DocBuckets2, DocErrors} = PVFun(Db, DocBuckets, ExistingDocs), + % remove empty buckets + DocBuckets3 = [Bucket || Bucket <- DocBuckets2, Bucket /= []], + {ok, DocBuckets3, NonRepDocs, DocErrors}; + false -> + {ok, DocBuckets, NonRepDocs, []} + end. set_new_att_revpos(#doc{revs={RevPos,_Revs},atts=Atts0}=Doc) -> diff --git a/src/couch/test/couchdb_update_conflicts_tests.erl b/src/couch/test/couchdb_update_conflicts_tests.erl index 6b4a8f643..e92c73856 100644 --- a/src/couch/test/couchdb_update_conflicts_tests.erl +++ b/src/couch/test/couchdb_update_conflicts_tests.erl @@ -17,6 +17,7 @@ -define(i2l(I), integer_to_list(I)). -define(DOC_ID, <<"foobar">>). +-define(LOCAL_DOC_ID, <<"_local/foobar">>). -define(NUM_CLIENTS, [100, 500, 1000, 2000, 5000, 10000]). -define(TIMEOUT, 20000). @@ -52,7 +53,7 @@ view_indexes_cleanup_test_() -> fun start/0, fun test_util:stop_couch/1, [ concurrent_updates(), - couchdb_188() + bulk_docs_updates() ] } }. @@ -68,13 +69,17 @@ concurrent_updates()-> } }. -couchdb_188()-> +bulk_docs_updates()-> { - "COUCHDB-188", + "Bulk docs updates", { foreach, fun setup/0, fun teardown/1, - [fun should_bulk_create_delete_doc/1] + [ + fun should_bulk_create_delete_doc/1, + fun should_bulk_create_local_doc/1, + fun should_ignore_invalid_local_doc/1 + ] } }. @@ -91,6 +96,12 @@ should_concurrently_update_doc(NumClients, {DbName, InitRev})-> should_bulk_create_delete_doc({DbName, InitRev})-> ?_test(bulk_delete_create(DbName, InitRev)). +should_bulk_create_local_doc({DbName, _})-> + ?_test(bulk_create_local_doc(DbName)). + +should_ignore_invalid_local_doc({DbName, _})-> + ?_test(ignore_invalid_local_doc(DbName)). + concurrent_doc_update(NumClients, DbName, InitRev) -> Clients = lists:map( @@ -214,6 +225,45 @@ bulk_delete_create(DbName, InitRev) -> ?assertEqual(3, element(1, Rev2)). +bulk_create_local_doc(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + + LocalDoc = couch_doc:from_json_obj({[ + {<<"_id">>, ?LOCAL_DOC_ID}, + {<<"_rev">>, <<"0-1">>} + ]}), + + {ok, Results} = couch_db:update_docs(Db, [LocalDoc], + [], replicated_changes), + ok = couch_db:close(Db), + ?assertEqual([], Results), + + {ok, Db2} = couch_db:open_int(DbName, []), + {ok, LocalDoc1} = couch_db:open_doc_int(Db2, ?LOCAL_DOC_ID, []), + ok = couch_db:close(Db2), + ?assertEqual(?LOCAL_DOC_ID, LocalDoc1#doc.id), + ?assertEqual({0, [<<"2">>]}, LocalDoc1#doc.revs). + + +ignore_invalid_local_doc(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + + LocalDoc = couch_doc:from_json_obj({[ + {<<"_id">>, ?LOCAL_DOC_ID}, + {<<"_rev">>, <<"0-abcdef">>} + ]}), + + {ok, Results} = couch_db:update_docs(Db, [LocalDoc], + [], replicated_changes), + ok = couch_db:close(Db), + ?assertEqual([], Results), + + {ok, Db2} = couch_db:open_int(DbName, []), + Result2 = couch_db:open_doc_int(Db2, ?LOCAL_DOC_ID, []), + ok = couch_db:close(Db2), + ?assertEqual({not_found, missing}, Result2). + + spawn_client(DbName, Doc) -> spawn(fun() -> {ok, Db} = couch_db:open_int(DbName, []), -- cgit v1.2.1 From 6ee3d958c8b2162182bb8c3722ac7d3c725c345a Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Thu, 25 Oct 2018 11:27:21 -0300 Subject: Extend generation of missing id to a bulk operation with new_edits false --- src/chttpd/src/chttpd_db.erl | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl index f95c3e794..d46b5bbf2 100644 --- a/src/chttpd/src/chttpd_db.erl +++ b/src/chttpd/src/chttpd_db.erl @@ -420,19 +420,16 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req, _ -> Options = [{user_ctx,Ctx}, {w,W}] end, + Docs = lists:map(fun(JsonObj) -> + Doc = couch_doc:from_json_obj_validate(JsonObj), + validate_attachment_names(Doc), + case Doc#doc.id of + <<>> -> Doc#doc{id = couch_uuids:new()}; + _ -> Doc + end + end, DocsArray), case couch_util:get_value(<<"new_edits">>, JsonProps, true) of true -> - Docs = lists:map( - fun(JsonObj) -> - Doc = couch_doc:from_json_obj_validate(JsonObj), - validate_attachment_names(Doc), - Id = case Doc#doc.id of - <<>> -> couch_uuids:new(); - Id0 -> Id0 - end, - Doc#doc{id=Id} - end, - DocsArray), Options2 = case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of true -> [all_or_nothing|Options]; @@ -455,8 +452,6 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req, send_json(Req, 417, ErrorsJson) end; false -> - Docs = [couch_doc:from_json_obj_validate(JsonObj) || JsonObj <- DocsArray], - [validate_attachment_names(D) || D <- Docs], case fabric:update_docs(Db, Docs, [replicated_changes|Options]) of {ok, Errors} -> ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors), -- cgit v1.2.1 From 8ad49a22b198b84b062268340acc3fd84fb9bcc7 Mon Sep 17 00:00:00 2001 From: jiangph Date: Fri, 2 Nov 2018 17:12:10 +0800 Subject: Support out-of-sync in mango when doc is deleted - under situation where the document is deleted while the mrview index for this document is not updated, the returned value from mrview is {doc,null}. There is no such consideration in Mango to cause case_clause error. This fix is to consider out-of-sync between documents in database and their index and not to cause 500 when the _find endpoint is called. --- src/mango/src/mango_cursor_view.erl | 8 +++++++- src/mango/test/01-index-crud-test.py | 35 +++++++++++++++++++++++++++++++++++ src/mango/test/mango.py | 6 ++++++ 3 files changed, 48 insertions(+), 1 deletion(-) diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl index 174381e4a..b3a7f4080 100644 --- a/src/mango/src/mango_cursor_view.erl +++ b/src/mango/src/mango_cursor_view.erl @@ -229,6 +229,9 @@ view_cb({row, Row}, #mrargs{extra = Options} = Acc) -> doc = couch_util:get_value(doc, Row) }, case ViewRow#view_row.doc of + null -> + put(mango_docs_examined, get(mango_docs_examined) + 1), + maybe_send_mango_ping(); undefined -> ViewRow2 = ViewRow#view_row{ value = couch_util:get_value(value, Row) @@ -427,7 +430,10 @@ doc_member(Cursor, RowProps) -> match_doc(Selector, Doc, ExecutionStats1); Else -> Else - end + end; + null -> + ExecutionStats1 = mango_execution_stats:incr_docs_examined(ExecutionStats), + {no_match, null, {execution_stats, ExecutionStats1}} end. diff --git a/src/mango/test/01-index-crud-test.py b/src/mango/test/01-index-crud-test.py index cf5b91865..f57db39af 100644 --- a/src/mango/test/01-index-crud-test.py +++ b/src/mango/test/01-index-crud-test.py @@ -13,8 +13,24 @@ import random import mango +import copy import unittest +DOCS = [ + { + "_id": "1", + "name": "Jimi", + "age": 10, + "cars": 1 + }, + { + "_id": "2", + "name": "kate", + "age": 8, + "cars": 0 + } +] + class IndexCrudTests(mango.DbPerClass): def setUp(self): self.db.recreate() @@ -271,6 +287,25 @@ class IndexCrudTests(mango.DbPerClass): except Exception as e: self.assertEqual(e.response.status_code, 500) + def test_out_of_sync(self): + self.db.save_docs(copy.deepcopy(DOCS)) + self.db.create_index(["age"], name="age") + + selector = { + "age": { + "$gt": 0 + }, + } + docs = self.db.find(selector, + use_index="_design/a017b603a47036005de93034ff689bbbb6a873c4") + self.assertEqual(len(docs), 2) + + self.db.delete_doc("1") + + docs1 = self.db.find(selector, update="False", + use_index="_design/a017b603a47036005de93034ff689bbbb6a873c4") + self.assertEqual(len(docs1), 1) + @unittest.skipUnless(mango.has_text_service(), "requires text service") class IndexCrudTextTests(mango.DbPerClass): diff --git a/src/mango/test/mango.py b/src/mango/test/mango.py index bc12bbc68..59486c861 100644 --- a/src/mango/test/mango.py +++ b/src/mango/test/mango.py @@ -113,6 +113,12 @@ class Database(object): r.raise_for_status() return r.json() + def delete_doc(self, docid): + r = self.sess.get(self.path(docid)) + r.raise_for_status() + original_rev = r.json()['_rev'] + self.sess.delete(self.path(docid), params={"rev": original_rev}) + def ddoc_info(self, ddocid): r = self.sess.get(self.path([ddocid, "_info"])) r.raise_for_status() -- cgit v1.2.1 From f9bd22712e995c7efe8ff5032e817fbb6de9bdb7 Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Wed, 7 Nov 2018 19:19:54 +0100 Subject: port experimental change that came up in the elixir test suite branch (#1614) --- src/chttpd/src/chttpd_misc.erl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl index 7b417b442..39ffa2368 100644 --- a/src/chttpd/src/chttpd_misc.erl +++ b/src/chttpd/src/chttpd_misc.erl @@ -242,7 +242,9 @@ cancel_replication(PostBody, Ctx) -> {error, badrpc}; Else -> % Unclear what to do here -- pick the first error? - hd(Else) + % Except try ignoring any {error, not_found} responses + % because we'll always get two of those + hd(Else -- [{error, not_found}]) end end. -- cgit v1.2.1 From b0f65e8b3b036ea5acde07847fb28a8754527980 Mon Sep 17 00:00:00 2001 From: Marc Abbyad Date: Wed, 7 Nov 2018 10:50:56 -0800 Subject: Fix link in contributing doc (#1608) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e345ea487..cd3a4437c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -279,7 +279,7 @@ without needing any other steps like setting git upstreams! :sparkles: ## Thanks -Special thanks to [Hoodie][https://github.com/hoodiehq/hoodie] for the great +Special thanks to [Hoodie](https://github.com/hoodiehq/hoodie) for the great CONTRIBUTING.md template. [1]: http://mail-archives.apache.org/mod_mbox/couchdb-user/ -- cgit v1.2.1 From 33c39a9f1d9bfffdefb263953de704e1a5a1b1eb Mon Sep 17 00:00:00 2001 From: Joan Touzet Date: Wed, 7 Nov 2018 16:23:01 -0500 Subject: Copy _security object in couchup; python3 fix (#1721) --- rel/overlay/bin/couchup | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/rel/overlay/bin/couchup b/rel/overlay/bin/couchup index 6532170aa..75b7d7e94 100755 --- a/rel/overlay/bin/couchup +++ b/rel/overlay/bin/couchup @@ -172,6 +172,25 @@ def _put_filter(args, db=None): print(exc.response.text) exit(1) +def _do_security(args, db=None): + """Copies the _security object from source to target DB.""" + try: + req = requests.get( + 'http://127.0.0.1:{}/{}/_security'.format( + args['local_port'], db), + auth=args['creds']) + req.raise_for_status() + security_doc = _tojson(req) + req = requests.put( + 'http://127.0.0.1:{}/{}/_security'.format( + args['clustered_port'], db), + data=json.dumps(security_doc), + auth=args['creds']) + req.raise_for_status() + except requests.exceptions.HTTPError as exc: + print(exc.response.text) + exit(1) + def _replicate(args): args = _args(args) if args['all_dbs']: @@ -229,6 +248,11 @@ def _replicate(args): if req.get('no_changes'): if not args['quiet']: print("No changes, replication is caught up.") + + if not args['quiet']: + print('Copying _security object for ' + db + '...') + _do_security(args, db) + if not args['quiet']: print("Replication complete.") @@ -474,7 +498,11 @@ def main(argv): parser_delete.set_defaults(func=_delete) args = parser.parse_args(argv[1:]) - args.func(args) + try: + args.func(args) + except AttributeError: + parser.print_help() + sys.exit(0) if __name__ == '__main__': main(sys.argv) -- cgit v1.2.1 From 096994ba20566cb31f613bfe020ef9059e8a0399 Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Wed, 7 Nov 2018 16:45:09 -0600 Subject: Fix initial view compaction task status (#1672) This is a minor consistency issue. Currently when a view compaction starts it doesn't include the `total_changes` and `changes_done` fields until the first task status update. This is easy to miss as every other task type includes those fields from the initial task definition. The obvious trivial fix is both obvious and trivial. --- src/couch_mrview/src/couch_mrview_compactor.erl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/couch_mrview/src/couch_mrview_compactor.erl b/src/couch_mrview/src/couch_mrview_compactor.erl index 3ef11805f..9a069cec0 100644 --- a/src/couch_mrview/src/couch_mrview_compactor.erl +++ b/src/couch_mrview/src/couch_mrview_compactor.erl @@ -86,7 +86,9 @@ compact(State) -> {type, view_compaction}, {database, DbName}, {design_document, IdxName}, - {progress, 0} + {progress, 0}, + {changes_done, 0}, + {total_changes, TotalChanges} ]), BufferSize0 = config:get( -- cgit v1.2.1 From e99a7b73db416c6c0251045ed611f4dfa64bf1d0 Mon Sep 17 00:00:00 2001 From: lag-linaro Date: Thu, 8 Nov 2018 00:10:06 +0000 Subject: Use mainline rebar (v2) (#1680) --- configure | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure b/configure index 370c964ae..b9a021d53 100755 --- a/configure +++ b/configure @@ -209,7 +209,7 @@ EOF install_local_rebar() { if [ ! -x "${rootdir}/bin/rebar" ]; then if [ ! -d "${rootdir}/src/rebar" ]; then - git clone --depth 1 --branch 2.6.0-couchdb https://github.com/apache/couchdb-rebar.git ${rootdir}/src/rebar + git clone --depth 1 https://github.com/apache/couchdb-rebar.git ${rootdir}/src/rebar fi make -C ${rootdir}/src/rebar mv ${rootdir}/src/rebar/rebar ${rootdir}/bin/rebar -- cgit v1.2.1 From c157c968b349112c5b3e69a3ded7aca7996e761d Mon Sep 17 00:00:00 2001 From: Ryan Gordon Date: Thu, 8 Nov 2018 00:37:49 +0000 Subject: #1705 Add UUID attribute to welcome message (#1708) --- src/chttpd/src/chttpd_misc.erl | 1 + src/chttpd/test/chttpd_welcome_test.erl | 20 +++++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl index 39ffa2368..1a6b3cb50 100644 --- a/src/chttpd/src/chttpd_misc.erl +++ b/src/chttpd/src/chttpd_misc.erl @@ -50,6 +50,7 @@ handle_welcome_req(#httpd{method='GET'}=Req, WelcomeMessage) -> {couchdb, WelcomeMessage}, {version, list_to_binary(couch_server:get_version())}, {git_sha, list_to_binary(couch_server:get_git_sha())}, + {uuid, couch_server:get_uuid()}, {features, config:features()} ] ++ case config:get("vendor") of [] -> diff --git a/src/chttpd/test/chttpd_welcome_test.erl b/src/chttpd/test/chttpd_welcome_test.erl index b737abd7a..e427f4dff 100644 --- a/src/chttpd/test/chttpd_welcome_test.erl +++ b/src/chttpd/test/chttpd_welcome_test.erl @@ -45,12 +45,30 @@ welcome_test_() -> fun setup/0, fun teardown/1, [ fun should_have_version/1, - fun should_have_features/1 + fun should_have_features/1, + fun should_have_uuid/1 ] } } }. +should_have_uuid(Url) -> + ?_test(begin + {ok, Status, _, Body} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]), + ?assertEqual(200, Status), + {Json} = ?JSON_DECODE(Body), + CouchDB = couch_util:get_value(<<"couchdb">>, Json, undefined), + Uuid = couch_util:get_value(<<"uuid">>, Json, undefined), + Features = couch_util:get_value(<<"features">>, Json, undefined), + Sha = couch_util:get_value(<<"git_sha">>, Json, undefined), + ?assertNotEqual(Sha, undefined), + ?assertEqual(<<"Welcome">>, CouchDB), + RealUuid = couch_server:get_uuid(), + + ?assertEqual(RealUuid, Uuid), + ?assert(is_list(Features)) + end). + should_have_version(Url) -> ?_test(begin -- cgit v1.2.1