diff options
39 files changed, 823 insertions, 182 deletions
diff --git a/.gitignore b/.gitignore index 5eec70f3e..3fa860c59 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ *~ .venv .DS_Store +.vscode .rebar/ .eunit/ cover/ diff --git a/INSTALL.Unix.md b/INSTALL.Unix.md index f0baf58c9..1934e9be9 100644 --- a/INSTALL.Unix.md +++ b/INSTALL.Unix.md @@ -39,7 +39,10 @@ You should have the following installed: * Erlang OTP (>=R16B03-1, =<19.x) (http://erlang.org/) * ICU (http://icu-project.org/) * OpenSSL (http://www.openssl.org/) - * Mozilla SpiderMonkey (1.8.5) (https://developer.mozilla.org/en/docs/Mozilla/Projects/SpiderMonkey/Releases/1.8.5) + * Mozilla SpiderMonkey - either 1.8.5 or 60 + * 60 is not supported on ARM 64-bit (aarch64) at this time. + * https://developer.mozilla.org/en/docs/Mozilla/Projects/SpiderMonkey/Releases/1.8.5 + * https://archive.mozilla.org/pub/firefox/releases/60.9.0esr/source/ (src/js) * GNU Make (http://www.gnu.org/software/make/) * GNU Compiler Collection (http://gcc.gnu.org/) * libcurl (http://curl.haxx.se/libcurl/) diff --git a/build-aux/Jenkinsfile.full b/build-aux/Jenkinsfile.full index 2a298f36d..b9067a192 100644 --- a/build-aux/Jenkinsfile.full +++ b/build-aux/Jenkinsfile.full @@ -152,7 +152,8 @@ pipeline { junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml, **/src/mango/nosetests.xml, **/test/javascript/junit.xml' } cleanup { - sh 'rm -rf $COUCHDB_IO_LOG_DIR' + sh 'killall -9 beam.smp || true' + sh 'rm -rf ${WORKSPACE}/* ${COUCHDB_IO_LOG_DIR} || true' } } // post } // stage FreeBSD @@ -478,7 +479,7 @@ pipeline { } environment { platform = 'buster' - sm_ver = '60' + sm_ver = '1.8.5' } stages { stage('Build from tarball & test') { @@ -31,6 +31,7 @@ SKIP_DEPS=0 COUCHDB_USER="$(whoami 2>/dev/null || echo couchdb)" SM_VSN="1.8.5" +ARCH="$(uname -m)" . ${rootdir}/version.mk COUCHDB_VERSION=${vsn_major}.${vsn_minor}.${vsn_patch} @@ -177,6 +178,12 @@ parse_opts() { parse_opts $@ +if [ "${ARCH}" = "aarch64" ] && [ "${SM_VSN}" != "1.8.5" ] +then + echo "ERROR: SpiderMonkey 60 is known broken on ARM 64 (aarch64). Use 1.8.5 instead." + exit 1 +fi + echo "==> configuring couchdb in rel/couchdb.config" cat > rel/couchdb.config << EOF % Licensed under the Apache License, Version 2.0 (the "License"); you may not diff --git a/rebar.config.script b/rebar.config.script index 5d5a6aac3..e39a08228 100644 --- a/rebar.config.script +++ b/rebar.config.script @@ -158,7 +158,7 @@ DepDescs = [ {hyper, "hyper", {tag, "CouchDB-2.2.0-4"}}, {ibrowse, "ibrowse", {tag, "CouchDB-4.0.1-1"}}, {jiffy, "jiffy", {tag, "CouchDB-0.14.11-2"}}, -{mochiweb, "mochiweb", {tag, "v2.19.0"}}, +{mochiweb, "mochiweb", {tag, "v2.20.0"}}, {meck, "meck", {tag, "0.8.8"}} ], diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini index 5fc8e0761..764620a66 100644 --- a/rel/overlay/etc/default.ini +++ b/rel/overlay/etc/default.ini @@ -216,6 +216,12 @@ port = 6984 ; buffer_count = 2000 ; server_per_node = true ; stream_limit = 5 +; +; Use a single message to kill a group of remote workers This is +; mostly is an upgrade clause to allow operating in a mixed cluster of +; 2.x and 3.x nodes. After upgrading switch to true to save some +; network bandwidth +;use_kill_all = false ; [global_changes] ; max_event_delay = 25 @@ -245,6 +251,8 @@ iterations = 10 ; iterations for password hashing ; secret = ; users_db_public = false ; cookie_domain = example.com +; Set the SameSite cookie property for the auth cookie. If empty, the SameSite property is not set. +; same_site = ; CSP (Content Security Policy) Support for _utils [csp] @@ -315,6 +323,10 @@ os_process_limit = 100 ;index_all_disabled = false ; Default limit value for mango _find queries. ;default_limit = 25 +; Ratio between documents scanned and results matched that will +; generate a warning in the _find response. Setting this to 0 disables +; the warning. +;index_scan_warning_threshold = 10 [indexers] couch_mrview = true @@ -576,3 +588,14 @@ compaction = false ; CouchDB will use the value of `max_limit` instead. If neither is ; defined, the default is 2000 as stated here. ; max_limit_partitions = 2000 + +[reshard] +;max_jobs = 48 +;max_history = 20 +;max_retries = 1 +;retry_interval_sec = 10 +;delete_source = true +;update_shard_map_timeout_sec = 60 +;source_close_timeout_sec = 600 +;require_node_param = false +;require_range_param = false diff --git a/src/chttpd/src/chttpd_auth.erl b/src/chttpd/src/chttpd_auth.erl index 45e11905b..607f09a8a 100644 --- a/src/chttpd/src/chttpd_auth.erl +++ b/src/chttpd/src/chttpd_auth.erl @@ -55,10 +55,12 @@ party_mode_handler(#httpd{method='POST', path_parts=[<<"_session">>]} = Req) -> % See #1947 - users should always be able to attempt a login Req#httpd{user_ctx=#user_ctx{}}; party_mode_handler(Req) -> - case config:get("chttpd", "require_valid_user", "false") of - "true" -> + RequireValidUser = config:get_boolean("chttpd", "require_valid_user", false), + ExceptUp = config:get_boolean("chttpd", "require_valid_user_except_for_up", true), + case RequireValidUser andalso not ExceptUp of + true -> throw({unauthorized, <<"Authentication required.">>}); - "false" -> + false -> case config:get("admins") of [] -> Req#httpd{user_ctx = ?ADMIN_USER}; diff --git a/src/couch/priv/stats_descriptions.cfg b/src/couch/priv/stats_descriptions.cfg index ae203bb21..7c8fd94cb 100644 --- a/src/couch/priv/stats_descriptions.cfg +++ b/src/couch/priv/stats_descriptions.cfg @@ -302,3 +302,31 @@ {type, counter}, {desc, <<"number of mango queries that could not use an index">>} ]}. +{[mango, query_invalid_index], [ + {type, counter}, + {desc, <<"number of mango queries that generated an invalid index warning">>} +]}. +{[mango, too_many_docs_scanned], [ + {type, counter}, + {desc, <<"number of mango queries that generated an index scan warning">>} +]}. +{[mango, docs_examined], [ + {type, counter}, + {desc, <<"number of documents examined by mango queries coordinated by this node">>} +]}. +{[mango, quorum_docs_examined], [ + {type, counter}, + {desc, <<"number of documents examined by mango queries, using cluster quorum">>} +]}. +{[mango, results_returned], [ + {type, counter}, + {desc, <<"number of rows returned by mango queries">>} +]}. +{[mango, query_time], [ + {type, histogram}, + {desc, <<"length of time processing a mango query">>} +]}. +{[mango, evaluate_selector], [ + {type, counter}, + {desc, <<"number of mango selector evaluations">>} +]}. diff --git a/src/couch/src/couch_httpd_auth.erl b/src/couch/src/couch_httpd_auth.erl index 515ce6132..5e4450301 100644 --- a/src/couch/src/couch_httpd_auth.erl +++ b/src/couch/src/couch_httpd_auth.erl @@ -88,11 +88,6 @@ basic_name_pw(Req) -> default_authentication_handler(Req) -> default_authentication_handler(Req, couch_auth_cache). -default_authentication_handler(#httpd{path_parts=[<<"_up">>]}=Req, AuthModule) -> - case config:get_boolean("chttpd", "require_valid_user_except_for_up", false) of - true -> Req#httpd{user_ctx=?ADMIN_USER}; - _False -> default_authentication_handler(Req, AuthModule) - end; default_authentication_handler(Req, AuthModule) -> case basic_name_pw(Req) of {User, Pass} -> @@ -273,7 +268,7 @@ cookie_auth_cookie(Req, User, Secret, TimeStamp) -> Hash = crypto:hmac(sha, Secret, SessionData), mochiweb_cookies:cookie("AuthSession", couch_util:encodeBase64Url(SessionData ++ ":" ++ ?b2l(Hash)), - [{path, "/"}] ++ cookie_scheme(Req) ++ max_age() ++ cookie_domain()). + [{path, "/"}] ++ cookie_scheme(Req) ++ max_age() ++ cookie_domain() ++ same_site()). ensure_cookie_auth_secret() -> case config:get("couch_httpd_auth", "secret", undefined) of @@ -457,6 +452,20 @@ cookie_domain() -> _ -> [{domain, Domain}] end. + +same_site() -> + SameSite = config:get("couch_httpd_auth", "same_site", ""), + case string:to_lower(SameSite) of + "" -> []; + "none" -> [{same_site, none}]; + "lax" -> [{same_site, lax}]; + "strict" -> [{same_site, strict}]; + _ -> + couch_log:error("invalid config value couch_httpd_auth.same_site: ~p ",[SameSite]), + [] + end. + + reject_if_totp(User) -> case get_totp_config(User) of undefined -> diff --git a/src/couch/test/exunit/same_site_cookie_tests.exs b/src/couch/test/exunit/same_site_cookie_tests.exs new file mode 100644 index 000000000..bad32ada4 --- /dev/null +++ b/src/couch/test/exunit/same_site_cookie_tests.exs @@ -0,0 +1,44 @@ +defmodule SameSiteCookieTests do + use CouchTestCase + + @moduletag :authentication + + def get_cookie(user, pass) do + resp = Couch.post("/_session", body: %{:username => user, :password => pass}) + + true = resp.body["ok"] + resp.headers[:"set-cookie"] + end + + @tag config: [{"admins", "jan", "apple"}, {"couch_httpd_auth", "same_site", "None"}] + test "Set same_site None" do + cookie = get_cookie("jan", "apple") + assert cookie =~ "; SameSite=None" + end + + @tag config: [{"admins", "jan", "apple"}, {"couch_httpd_auth", "same_site", ""}] + test "same_site not set" do + cookie = get_cookie("jan", "apple") + assert cookie + refute cookie =~ "; SameSite=" + end + + @tag config: [{"admins", "jan", "apple"}, {"couch_httpd_auth", "same_site", "Strict"}] + test "Set same_site Strict" do + cookie = get_cookie("jan", "apple") + assert cookie =~ "; SameSite=Strict" + end + + @tag config: [{"admins", "jan", "apple"}, {"couch_httpd_auth", "same_site", "Lax"}] + test "Set same_site Lax" do + cookie = get_cookie("jan", "apple") + assert cookie =~ "; SameSite=Lax" + end + + @tag config: [{"admins", "jan", "apple"}, {"couch_httpd_auth", "same_site", "Invalid"}] + test "Set same_site invalid" do + cookie = get_cookie("jan", "apple") + assert cookie + refute cookie =~ "; SameSite=" + end +end diff --git a/src/dreyfus/src/dreyfus_fabric.erl b/src/dreyfus/src/dreyfus_fabric.erl index a953b6a38..0b25a6cc6 100644 --- a/src/dreyfus/src/dreyfus_fabric.erl +++ b/src/dreyfus/src/dreyfus_fabric.erl @@ -14,7 +14,7 @@ %% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*- -module(dreyfus_fabric). --export([get_json_docs/2, handle_error_message/6]). +-export([get_json_docs/2, handle_error_message/7]). -include_lib("couch/include/couch_db.hrl"). -include_lib("mem3/include/mem3.hrl"). @@ -36,40 +36,42 @@ callback(timeout, _Acc) -> {error, timeout}. handle_error_message({rexi_DOWN, _, {_, NodeRef}, _}, _Worker, - Counters, _Replacements, _StartFun, _StartArgs) -> - case fabric_util:remove_down_workers(Counters, NodeRef) of + Counters, _Replacements, _StartFun, _StartArgs, RingOpts) -> + case fabric_util:remove_down_workers(Counters, NodeRef, RingOpts) of {ok, NewCounters} -> {ok, NewCounters}; error -> {error, {nodedown, <<"progress not possible">>}} end; handle_error_message({rexi_EXIT, {maintenance_mode, _}}, Worker, - Counters, Replacements, StartFun, StartArgs) -> - handle_replacement(Worker, Counters, Replacements, StartFun, StartArgs); + Counters, Replacements, StartFun, StartArgs, RingOpts) -> + handle_replacement(Worker, Counters, Replacements, StartFun, StartArgs, + RingOpts); handle_error_message({rexi_EXIT, Reason}, Worker, - Counters, _Replacements, _StartFun, _StartArgs) -> - handle_error(Reason, Worker, Counters); + Counters, _Replacements, _StartFun, _StartArgs, RingOpts) -> + handle_error(Reason, Worker, Counters, RingOpts); handle_error_message({error, Reason}, Worker, - Counters, _Replacements, _StartFun, _StartArgs) -> - handle_error(Reason, Worker, Counters); + Counters, _Replacements, _StartFun, _StartArgs, RingOpts) -> + handle_error(Reason, Worker, Counters, RingOpts); handle_error_message({'EXIT', Reason}, Worker, - Counters, _Replacements, _StartFun, _StartArgs) -> - handle_error({exit, Reason}, Worker, Counters); + Counters, _Replacements, _StartFun, _StartArgs, RingOpts) -> + handle_error({exit, Reason}, Worker, Counters, RingOpts); handle_error_message(Reason, Worker, Counters, - _Replacements, _StartFun, _StartArgs) -> + _Replacements, _StartFun, _StartArgs, RingOpts) -> couch_log:error("Unexpected error during request: ~p", [Reason]), - handle_error(Reason, Worker, Counters). + handle_error(Reason, Worker, Counters, RingOpts). -handle_error(Reason, Worker, Counters0) -> +handle_error(Reason, Worker, Counters0, RingOpts) -> Counters = fabric_dict:erase(Worker, Counters0), - case fabric_view:is_progress_possible(Counters) of + case fabric_ring:is_progress_possible(Counters, RingOpts) of true -> {ok, Counters}; false -> {error, Reason} end. -handle_replacement(Worker, OldCntrs0, OldReplacements, StartFun, StartArgs) -> +handle_replacement(Worker, OldCntrs0, OldReplacements, StartFun, StartArgs, + RingOpts) -> OldCounters = lists:filter(fun({#shard{ref=R}, _}) -> R /= Worker#shard.ref end, OldCntrs0), @@ -79,12 +81,12 @@ handle_replacement(Worker, OldCntrs0, OldReplacements, StartFun, StartArgs) -> NewCounter = start_replacement(StartFun, StartArgs, Repl), fabric_dict:store(NewCounter, nil, CounterAcc) end, OldCounters, Replacements), - true = fabric_view:is_progress_possible(NewCounters), + true = fabric_ring:is_progress_possible(NewCounters, RingOpts), NewRefs = fabric_dict:fetch_keys(NewCounters), {new_refs, NewRefs, NewCounters, NewReplacements}; false -> handle_error({nodedown, <<"progress not possible">>}, - Worker, OldCounters) + Worker, OldCounters, RingOpts) end. start_replacement(StartFun, StartArgs, Shard) -> @@ -106,3 +108,98 @@ start_replacement(StartFun, StartArgs, Shard) -> {dreyfus_rpc, StartFun, [Shard#shard.name|StartArgs1]}), Shard#shard{ref = Ref}. + + +-ifdef(TEST). + +-include_lib("eunit/include/eunit.hrl"). + + +node_down_test() -> + [S1, S2, S3] = [ + mk_shard("n1", [0, 4]), + mk_shard("n1", [5, ?RING_END]), + mk_shard("n2", [0, ?RING_END]) + ], + [W1, W2, W3] = [ + S1#shard{ref = make_ref()}, + S2#shard{ref = make_ref()}, + S3#shard{ref = make_ref()} + ], + Counters1 = fabric_dict:init([W1, W2, W3], nil), + + N1 = S1#shard.node, + Msg1 = {rexi_DOWN, nil, {nil, N1}, nil}, + Res1 = handle_error_message(Msg1, nil, Counters1, nil, nil, nil, []), + ?assertEqual({ok, [{W3, nil}]}, Res1), + + {ok, Counters2} = Res1, + N2 = S3#shard.node, + Msg2 = {rexi_DOWN, nil, {nil, N2}, nil}, + Res2 = handle_error_message(Msg2, nil, Counters2, nil, nil, nil, []), + ?assertEqual({error, {nodedown, <<"progress not possible">>}}, Res2). + + +worker_error_test() -> + [S1, S2] = [ + mk_shard("n1", [0, ?RING_END]), + mk_shard("n2", [0, ?RING_END]) + ], + [W1, W2] = [S1#shard{ref = make_ref()}, S2#shard{ref = make_ref()}], + Counters1 = fabric_dict:init([W1, W2], nil), + + Res1 = handle_error(bam, W1, Counters1, []), + ?assertEqual({ok, [{W2, nil}]}, Res1), + + {ok, Counters2} = Res1, + ?assertEqual({error, boom}, handle_error(boom, W2, Counters2, [])). + + +node_down_with_partitions_test() -> + [S1, S2] = [ + mk_shard("n1", [0, 4]), + mk_shard("n2", [0, 8]) + ], + [W1, W2] = [ + S1#shard{ref = make_ref()}, + S2#shard{ref = make_ref()} + ], + Counters1 = fabric_dict:init([W1, W2], nil), + RingOpts = [{any, [S1, S2]}], + + N1 = S1#shard.node, + Msg1 = {rexi_DOWN, nil, {nil, N1}, nil}, + Res1 = handle_error_message(Msg1, nil, Counters1, nil, nil, nil, RingOpts), + ?assertEqual({ok, [{W2, nil}]}, Res1), + + {ok, Counters2} = Res1, + N2 = S2#shard.node, + Msg2 = {rexi_DOWN, nil, {nil, N2}, nil}, + Res2 = handle_error_message(Msg2, nil, Counters2, nil, nil, nil, RingOpts), + ?assertEqual({error, {nodedown, <<"progress not possible">>}}, Res2). + + +worker_error_with_partitions_test() -> + [S1, S2] = [ + mk_shard("n1", [0, 4]), + mk_shard("n2", [0, 8])], + [W1, W2] = [ + S1#shard{ref = make_ref()}, + S2#shard{ref = make_ref()} + ], + Counters1 = fabric_dict:init([W1, W2], nil), + RingOpts = [{any, [S1, S2]}], + + Res1 = handle_error(bam, W1, Counters1, RingOpts), + ?assertEqual({ok, [{W2, nil}]}, Res1), + + {ok, Counters2} = Res1, + ?assertEqual({error, boom}, handle_error(boom, W2, Counters2, RingOpts)). + + +mk_shard(Name, Range) -> + Node = list_to_atom(Name), + BName = list_to_binary(Name), + #shard{name = BName, node = Node, range = Range}. + +-endif. diff --git a/src/dreyfus/src/dreyfus_fabric_group1.erl b/src/dreyfus/src/dreyfus_fabric_group1.erl index 2d530ca7e..bdae6f040 100644 --- a/src/dreyfus/src/dreyfus_fabric_group1.erl +++ b/src/dreyfus/src/dreyfus_fabric_group1.erl @@ -27,7 +27,8 @@ top_groups, counters, start_args, - replacements + replacements, + ring_opts }). go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) -> @@ -39,6 +40,7 @@ go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) -> DesignName = dreyfus_util:get_design_docid(DDoc), dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName), Shards = dreyfus_util:get_shards(DbName, QueryArgs), + RingOpts = dreyfus_util:get_ring_opts(QueryArgs, Shards), Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, group1, [DDoc, IndexName, dreyfus_util:export(QueryArgs)]), Replacements = fabric_view:get_shard_replacements(DbName, Workers), @@ -50,7 +52,8 @@ go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) -> top_groups = [], counters = Counters, start_args = [DDoc, IndexName, QueryArgs], - replacements = Replacements + replacements = Replacements, + ring_opts = RingOpts }, try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3, @@ -89,7 +92,7 @@ handle_message(Error, Worker, State0) -> State = upgrade_state(State0), case dreyfus_fabric:handle_error_message(Error, Worker, State#state.counters, State#state.replacements, - group1, State#state.start_args) of + group1, State#state.start_args, State#state.ring_opts) of {ok, Counters} -> {ok, State#state{counters=Counters}}; {new_refs, NewRefs, NewCounters, NewReplacements} -> diff --git a/src/dreyfus/src/dreyfus_fabric_group2.erl b/src/dreyfus/src/dreyfus_fabric_group2.erl index 1239f8b74..8d864dd0c 100644 --- a/src/dreyfus/src/dreyfus_fabric_group2.erl +++ b/src/dreyfus/src/dreyfus_fabric_group2.erl @@ -29,7 +29,8 @@ top_groups, counters, start_args, - replacements + replacements, + ring_opts }). go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) -> @@ -41,6 +42,7 @@ go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) -> DesignName = dreyfus_util:get_design_docid(DDoc), dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName), Shards = dreyfus_util:get_shards(DbName, QueryArgs), + RingOpts = dreyfus_util:get_ring_opts(QueryArgs, Shards), Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, group2, [DDoc, IndexName, dreyfus_util:export(QueryArgs)]), Replacements = fabric_view:get_shard_replacements(DbName, Workers), @@ -54,7 +56,8 @@ go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) -> top_groups = [], counters = Counters, start_args = [DDoc, IndexName, QueryArgs], - replacements = Replacements + replacements = Replacements, + ring_opts = RingOpts }, try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3, @@ -102,7 +105,7 @@ handle_message(Error, Worker, State0) -> State = upgrade_state(State0), case dreyfus_fabric:handle_error_message(Error, Worker, State#state.counters, State#state.replacements, - group2, State#state.start_args) of + group2, State#state.start_args, State#state.ring_opts) of {ok, Counters} -> {ok, State#state{counters=Counters}}; {new_refs, NewRefs, NewCounters, NewReplacements} -> diff --git a/src/dreyfus/src/dreyfus_fabric_info.erl b/src/dreyfus/src/dreyfus_fabric_info.erl index 27eec8065..e217bc0ef 100644 --- a/src/dreyfus/src/dreyfus_fabric_info.erl +++ b/src/dreyfus/src/dreyfus_fabric_info.erl @@ -49,7 +49,7 @@ handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Worker, {Counters, Acc}) -> handle_message({rexi_EXIT, Reason}, Worker, {Counters, Acc}) -> NewCounters = fabric_dict:erase(Worker, Counters), - case fabric_view:is_progress_possible(NewCounters) of + case fabric_ring:is_progress_possible(NewCounters) of true -> {ok, {NewCounters, Acc}}; false -> @@ -74,7 +74,7 @@ handle_message({ok, Info}, Worker, {Counters, Acc}) -> handle_message({error, Reason}, Worker, {Counters, Acc}) -> NewCounters = fabric_dict:erase(Worker, Counters), - case fabric_view:is_progress_possible(NewCounters) of + case fabric_ring:is_progress_possible(NewCounters) of true -> {ok, {NewCounters, Acc}}; false -> @@ -82,7 +82,7 @@ handle_message({error, Reason}, Worker, {Counters, Acc}) -> end; handle_message({'EXIT', _}, Worker, {Counters, Acc}) -> NewCounters = fabric_dict:erase(Worker, Counters), - case fabric_view:is_progress_possible(NewCounters) of + case fabric_ring:is_progress_possible(NewCounters) of true -> {ok, {NewCounters, Acc}}; false -> diff --git a/src/dreyfus/src/dreyfus_fabric_search.erl b/src/dreyfus/src/dreyfus_fabric_search.erl index acf7a83ec..c0ebde1d6 100644 --- a/src/dreyfus/src/dreyfus_fabric_search.erl +++ b/src/dreyfus/src/dreyfus_fabric_search.erl @@ -27,7 +27,8 @@ top_docs, counters, start_args, - replacements + replacements, + ring_opts }). go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) -> @@ -40,10 +41,11 @@ go(DbName, DDoc, IndexName, #index_query_args{bookmark=nil}=QueryArgs) -> DesignName = dreyfus_util:get_design_docid(DDoc), dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName), Shards = dreyfus_util:get_shards(DbName, QueryArgs), + RingOpts = dreyfus_util:get_ring_opts(QueryArgs, Shards), Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, search, [DDoc, IndexName, dreyfus_util:export(QueryArgs)]), Counters = fabric_dict:init(Workers, nil), - go(DbName, DDoc, IndexName, QueryArgs, Counters, Counters); + go(DbName, DDoc, IndexName, QueryArgs, Counters, Counters, RingOpts); go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) -> Bookmark0 = try dreyfus_bookmark:unpack(DbName, QueryArgs) @@ -54,6 +56,7 @@ go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) -> Shards = dreyfus_util:get_shards(DbName, QueryArgs), LiveNodes = [node() | nodes()], LiveShards = [S || #shard{node=Node} = S <- Shards, lists:member(Node, LiveNodes)], + RingOpts = dreyful_util:get_ring_opts(QueryArgs, LiveShards), Bookmark1 = dreyfus_bookmark:add_missing_shards(Bookmark0, LiveShards), Counters0 = lists:flatmap(fun({#shard{name=Name, node=N} = Shard, After}) -> QueryArgs1 = dreyfus_util:export(QueryArgs#index_query_args{ @@ -73,14 +76,16 @@ go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) -> end end, Bookmark1), Counters = fabric_dict:init(Counters0, nil), + WorkerShards = fabric_dict:fetch_keys(Counters), + RingOpts = dreyfus_util:get_ring_opts(QueryArgs, WorkerShards), QueryArgs2 = QueryArgs#index_query_args{ bookmark = Bookmark1 }, - go(DbName, DDoc, IndexName, QueryArgs2, Counters, Bookmark1); + go(DbName, DDoc, IndexName, QueryArgs2, Counters, Bookmark1, RingOpts); go(DbName, DDoc, IndexName, OldArgs) -> go(DbName, DDoc, IndexName, dreyfus_util:upgrade(OldArgs)). -go(DbName, DDoc, IndexName, QueryArgs, Counters, Bookmark) -> +go(DbName, DDoc, IndexName, QueryArgs, Counters, Bookmark, RingOpts) -> {Workers, _} = lists:unzip(Counters), #index_query_args{ limit = Limit, @@ -94,7 +99,8 @@ go(DbName, DDoc, IndexName, QueryArgs, Counters, Bookmark) -> top_docs = #top_docs{total_hits=0,hits=[]}, counters = Counters, start_args = [DDoc, IndexName, QueryArgs], - replacements = Replacements + replacements = Replacements, + ring_opts = RingOpts }, RexiMon = fabric_util:create_monitors(Workers), try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3, @@ -154,7 +160,7 @@ handle_message(Error, Worker, State0) -> State = upgrade_state(State0), case dreyfus_fabric:handle_error_message(Error, Worker, State#state.counters, State#state.replacements, - search, State#state.start_args) of + search, State#state.start_args, State#state.ring_opts) of {ok, Counters} -> {ok, State#state{counters=Counters}}; {new_refs, NewRefs, NewCounters, NewReplacements} -> diff --git a/src/dreyfus/src/dreyfus_util.erl b/src/dreyfus/src/dreyfus_util.erl index 0a83e87bd..05ecdb621 100644 --- a/src/dreyfus/src/dreyfus_util.erl +++ b/src/dreyfus/src/dreyfus_util.erl @@ -19,7 +19,7 @@ -include_lib("mem3/include/mem3.hrl"). -include_lib("couch/include/couch_db.hrl"). --export([get_shards/2, sort/2, upgrade/1, export/1, time/2]). +-export([get_shards/2, get_ring_opts/2, sort/2, upgrade/1, export/1, time/2]). -export([in_black_list/1, in_black_list/3, maybe_deny_index/3]). -export([get_design_docid/1]). -export([ @@ -59,6 +59,15 @@ use_ushards(#index_query_args{stable=true}) -> use_ushards(#index_query_args{}) -> false. + +get_ring_opts(#index_query_args{partition = nil}, _Shards) -> + []; +get_ring_opts(#index_query_args{}, Shards) -> + Shards1 = lists:map(fun(#shard{} = S) -> + S#shard{ref = undefined} + end, Shards), + [{any, Shards1}]. + -spec sort(Order :: relevance | [any()], [#sortable{}]) -> [#sortable{}]. sort(Sort, List0) -> {List1, Stash} = stash_items(List0), @@ -418,4 +427,15 @@ stash_test() -> Unstashed = hd(unstash_items(Stashed, Stash)), ?assertEqual(Unstashed#sortable.item, bar). + +ring_opts_test() -> + Shards = [#shard{name = foo, ref = make_ref()}], + + QArgs1 = #index_query_args{partition = nil}, + ?assertEqual([], get_ring_opts(QArgs1, Shards)), + + QArgs2 = #index_query_args{partition = <<"x">>}, + ?assertMatch([{any, [#shard{name = foo, ref = undefined}]}], + get_ring_opts(QArgs2, Shards)). + -endif. diff --git a/src/fabric/src/fabric_db_partition_info.erl b/src/fabric/src/fabric_db_partition_info.erl index 2978832f0..954c52db2 100644 --- a/src/fabric/src/fabric_db_partition_info.erl +++ b/src/fabric/src/fabric_db_partition_info.erl @@ -17,15 +17,27 @@ -include_lib("fabric/include/fabric.hrl"). -include_lib("mem3/include/mem3.hrl"). + +-record(acc, { + counters, + replies, + ring_opts +}). + + go(DbName, Partition) -> - Shards = mem3:shards(DbName, <<Partition/binary, ":foo">>), + Shards = mem3:shards(DbName, couch_partition:shard_key(Partition)), Workers = fabric_util:submit_jobs(Shards, get_partition_info, [Partition]), RexiMon = fabric_util:create_monitors(Shards), Fun = fun handle_message/3, - Acc0 = {fabric_dict:init(Workers, nil), []}, + Acc0 = #acc{ + counters = fabric_dict:init(Workers, nil), + replies = [], + ring_opts = [{any, Shards}] + }, try case fabric_util:recv(Workers, #shard.ref, Fun, Acc0) of - {ok, Acc} -> {ok, Acc}; + {ok, Res} -> {ok, Res}; {timeout, {WorkersDict, _}} -> DefunctWorkers = fabric_util:remove_done_workers( WorkersDict, @@ -42,36 +54,39 @@ go(DbName, Partition) -> rexi_monitor:stop(RexiMon) end. -handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Shard, {Counters, Acc}) -> - case fabric_util:remove_down_workers(Counters, NodeRef) of +handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Shard, #acc{} = Acc) -> + #acc{counters = Counters, ring_opts = RingOpts} = Acc, + case fabric_util:remove_down_workers(Counters, NodeRef, RingOpts) of {ok, NewCounters} -> - {ok, {NewCounters, Acc}}; + {ok, Acc#acc{counters = NewCounters}}; error -> {error, {nodedown, <<"progress not possible">>}} end; -handle_message({rexi_EXIT, Reason}, Shard, {Counters, Acc}) -> +handle_message({rexi_EXIT, Reason}, Shard, #acc{} = Acc) -> + #acc{counters = Counters, ring_opts = RingOpts} = Acc, NewCounters = fabric_dict:erase(Shard, Counters), - case fabric_ring:is_progress_possible(NewCounters) of + case fabric_ring:is_progress_possible(NewCounters, RingOpts) of true -> - {ok, {NewCounters, Acc}}; + {ok, Acc#acc{counters = NewCounters}}; false -> {error, Reason} end; -handle_message({ok, Info}, #shard{dbname=Name} = Shard, {Counters, Acc}) -> - Acc2 = [Info | Acc], +handle_message({ok, Info}, #shard{dbname=Name} = Shard, #acc{} = Acc) -> + #acc{counters = Counters, replies = Replies} = Acc, + Replies1 = [Info | Replies], Counters1 = fabric_dict:erase(Shard, Counters), case fabric_dict:size(Counters1) =:= 0 of true -> - [FirstInfo | RestInfos] = Acc2, + [FirstInfo | RestInfos] = Replies1, PartitionInfo = get_max_partition_size(FirstInfo, RestInfos), {stop, [{db_name, Name} | format_partition(PartitionInfo)]}; false -> - {ok, {Counters1, Acc2}} + {ok, Acc#acc{counters = Counters1, replies = Replies1}} end; -handle_message(_, _, Acc) -> +handle_message(_, _, #acc{} = Acc) -> {ok, Acc}. @@ -97,3 +112,44 @@ format_partition(PartitionInfo) -> {value, {sizes, Size}, PartitionInfo1} = lists:keytake(sizes, 1, PartitionInfo), [{sizes, {Size}} | PartitionInfo1]. + +-ifdef(TEST). + +-include_lib("eunit/include/eunit.hrl"). + + +node_down_test() -> + [S1, S2] = [mk_shard("n1", [0, 4]), mk_shard("n2", [0, 8])], + Acc1 = #acc{ + counters = fabric_dict:init([S1, S2], nil), + ring_opts = [{any, [S1, S2]}] + }, + + N1 = S1#shard.node, + {ok, Acc2} = handle_message({rexi_DOWN, nil, {nil, N1}, nil}, nil, Acc1), + ?assertEqual([{S2, nil}], Acc2#acc.counters), + + N2 = S2#shard.node, + ?assertEqual({error, {nodedown, <<"progress not possible">>}}, + handle_message({rexi_DOWN, nil, {nil, N2}, nil}, nil, Acc2)). + + +worker_exit_test() -> + [S1, S2] = [mk_shard("n1", [0, 4]), mk_shard("n2", [0, 8])], + Acc1 = #acc{ + counters = fabric_dict:init([S1, S2], nil), + ring_opts = [{any, [S1, S2]}] + }, + + {ok, Acc2} = handle_message({rexi_EXIT, boom}, S1, Acc1), + ?assertEqual([{S2, nil}], Acc2#acc.counters), + + ?assertEqual({error, bam}, handle_message({rexi_EXIT, bam}, S2, Acc2)). + + +mk_shard(Name, Range) -> + Node = list_to_atom(Name), + BName = list_to_binary(Name), + #shard{name = BName, node = Node, range = Range}. + +-endif. diff --git a/src/fabric/src/fabric_util.erl b/src/fabric/src/fabric_util.erl index aaf0623f0..8aa14e73a 100644 --- a/src/fabric/src/fabric_util.erl +++ b/src/fabric/src/fabric_util.erl @@ -14,7 +14,7 @@ -export([submit_jobs/3, submit_jobs/4, cleanup/1, recv/4, get_db/1, get_db/2, error_info/1, update_counter/3, remove_ancestors/2, create_monitors/1, kv/2, - remove_down_workers/2, doc_id_and_rev/1]). + remove_down_workers/2, remove_down_workers/3, doc_id_and_rev/1]). -export([request_timeout/0, attachments_timeout/0, all_docs_timeout/0, view_timeout/1]). -export([log_timeout/2, remove_done_workers/2]). -export([is_users_db/1, is_replicator_db/1]). @@ -33,9 +33,12 @@ -include_lib("eunit/include/eunit.hrl"). remove_down_workers(Workers, BadNode) -> + remove_down_workers(Workers, BadNode, []). + +remove_down_workers(Workers, BadNode, RingOpts) -> Filter = fun(#shard{node = Node}, _) -> Node =/= BadNode end, NewWorkers = fabric_dict:filter(Filter, Workers), - case fabric_ring:is_progress_possible(NewWorkers) of + case fabric_ring:is_progress_possible(NewWorkers, RingOpts) of true -> {ok, NewWorkers}; false -> diff --git a/src/fabric/src/fabric_view.erl b/src/fabric/src/fabric_view.erl index 55b44e6f7..425f864c4 100644 --- a/src/fabric/src/fabric_view.erl +++ b/src/fabric/src/fabric_view.erl @@ -12,7 +12,7 @@ -module(fabric_view). --export([is_progress_possible/1, remove_overlapping_shards/2, maybe_send_row/1, +-export([remove_overlapping_shards/2, maybe_send_row/1, transform_row/1, keydict/1, extract_view/4, get_shards/2, check_down_shards/2, handle_worker_exit/3, get_shard_replacements/2, maybe_update_others/5]). @@ -46,10 +46,6 @@ handle_worker_exit(Collector, _Worker, Reason) -> {ok, Resp} = Callback({error, fabric_util:error_info(Reason)}, Acc), {error, Resp}. -%% @doc looks for a fully covered keyrange in the list of counters --spec is_progress_possible([{#shard{}, term()}]) -> boolean(). -is_progress_possible(Counters) -> - fabric_ring:is_progress_possible(Counters). -spec remove_overlapping_shards(#shard{}, [{#shard{}, any()}]) -> [{#shard{}, any()}]. @@ -416,28 +412,6 @@ fix_skip_and_limit(#mrargs{} = Args) -> remove_finalizer(Args) -> couch_mrview_util:set_extra(Args, finalizer, null). -% unit test -is_progress_possible_test() -> - EndPoint = 2 bsl 31, - T1 = [[0, EndPoint-1]], - ?assertEqual(is_progress_possible(mk_cnts(T1)),true), - T2 = [[0,10],[11,20],[21,EndPoint-1]], - ?assertEqual(is_progress_possible(mk_cnts(T2)),true), - % gap - T3 = [[0,10],[12,EndPoint-1]], - ?assertEqual(is_progress_possible(mk_cnts(T3)),false), - % outside range - T4 = [[1,10],[11,20],[21,EndPoint-1]], - ?assertEqual(is_progress_possible(mk_cnts(T4)),false), - % outside range - T5 = [[0,10],[11,20],[21,EndPoint]], - ?assertEqual(is_progress_possible(mk_cnts(T5)),false), - T6 = [[0, 10], [11, 20], [0, 5], [6, 21], [21, EndPoint - 1]], - ?assertEqual(is_progress_possible(mk_cnts(T6)), true), - % not possible, overlap is not exact - T7 = [[0, 10], [13, 20], [21, EndPoint - 1], [9, 12]], - ?assertEqual(is_progress_possible(mk_cnts(T7)), false). - remove_overlapping_shards_test() -> Cb = undefined, @@ -482,10 +456,6 @@ get_shard_replacements_test() -> ?assertEqual(Expect, Res). -mk_cnts(Ranges) -> - Shards = lists:map(fun mk_shard/1, Ranges), - orddict:from_list([{Shard,nil} || Shard <- Shards]). - mk_cnts(Ranges, NoNodes) -> orddict:from_list([{Shard,nil} || Shard <- @@ -502,10 +472,6 @@ mk_shards(NoNodes,Range,Shards) -> mk_shards(NoNodes-1,Range, [mk_shard(Name, Range) | Shards]). -mk_shard([B, E]) when is_integer(B), is_integer(E) -> - #shard{range = [B, E]}. - - mk_shard(Name, Range) -> Node = list_to_atom(Name), BName = list_to_binary(Name), diff --git a/src/ken/rebar.config.script b/src/ken/rebar.config.script index 26d6f4caa..3344206e5 100644 --- a/src/ken/rebar.config.script +++ b/src/ken/rebar.config.script @@ -11,7 +11,9 @@ % the License. HaveDreyfus = element(1, file:list_dir("../dreyfus")) == ok. -HaveHastings = element(1, file:list_dir("../hastings")) == ok. + +HastingsHome = os:getenv("HASTINGS_HOME", "../hastings"). +HaveHastings = element(1, file:list_dir(HastingsHome)) == ok. CurrOpts = case lists:keyfind(erl_opts, 1, CONFIG) of {erl_opts, Opts} -> Opts; diff --git a/src/mango/src/mango_cursor.erl b/src/mango/src/mango_cursor.erl index dc2ee74c7..29be49490 100644 --- a/src/mango/src/mango_cursor.erl +++ b/src/mango/src/mango_cursor.erl @@ -19,7 +19,8 @@ execute/3, maybe_filter_indexes_by_ddoc/2, remove_indexes_with_partial_filter_selector/1, - maybe_add_warning/3 + maybe_add_warning/4, + maybe_noop_range/2 ]). @@ -114,7 +115,7 @@ filter_indexes(Indexes0, DesignId, ViewName) -> remove_indexes_with_partial_filter_selector(Indexes) -> - FiltFun = fun(Idx) -> + FiltFun = fun(Idx) -> case mango_idx:get_partial_filter_selector(Idx) of undefined -> true; _ -> false @@ -123,6 +124,22 @@ remove_indexes_with_partial_filter_selector(Indexes) -> lists:filter(FiltFun, Indexes). +maybe_add_warning(UserFun, #cursor{index = Index, opts = Opts}, Stats, UserAcc) -> + W0 = invalid_index_warning(Index, Opts), + W1 = no_index_warning(Index), + W2 = index_scan_warning(Stats), + Warnings = lists:append([W0, W1, W2]), + case Warnings of + [] -> + UserAcc; + _ -> + WarningStr = lists:join(<<"\n">>, Warnings), + Arg = {add_key, warning, WarningStr}, + {_Go, UserAcc1} = UserFun(Arg, UserAcc), + UserAcc1 + end. + + create_cursor(Db, Indexes, Selector, Opts) -> [{CursorMod, CursorModIndexes} | _] = group_indexes_by_type(Indexes), CursorMod:create(Db, CursorModIndexes, Selector, Opts). @@ -146,46 +163,86 @@ group_indexes_by_type(Indexes) -> end, ?CURSOR_MODULES). -maybe_add_warning(UserFun, #cursor{index = Index, opts = Opts}, UserAcc) -> - NoIndexWarning = case Index#idx.type of - <<"special">> -> - <<"no matching index found, create an index to optimize query time">>; - _ -> - ok - end, - - UseIndexInvalidWarning = case lists:keyfind(use_index, 1, Opts) of - {use_index, []} -> - NoIndexWarning; - {use_index, [DesignId]} -> - case filter_indexes([Index], DesignId) of - [] -> - fmt("_design/~s was not used because it does not contain a valid index for this query.", - [ddoc_name(DesignId)]); - _ -> - NoIndexWarning - end; - {use_index, [DesignId, ViewName]} -> - case filter_indexes([Index], DesignId, ViewName) of - [] -> - fmt("_design/~s, ~s was not used because it is not a valid index for this query.", - [ddoc_name(DesignId), ViewName]); - _ -> - NoIndexWarning - end - end, - - maybe_add_warning_int(UseIndexInvalidWarning, UserFun, UserAcc). - - -maybe_add_warning_int(ok, _, UserAcc) -> - UserAcc; - -maybe_add_warning_int(Warning, UserFun, UserAcc) -> +% warn if the _all_docs index was used to fulfil a query +no_index_warning(#idx{type = Type}) when Type =:= <<"special">> -> couch_stats:increment_counter([mango, unindexed_queries]), - Arg = {add_key, warning, Warning}, - {_Go, UserAcc0} = UserFun(Arg, UserAcc), - UserAcc0. + [<<"No matching index found, create an index to optimize query time.">>]; + +no_index_warning(_) -> + []. + + +% warn if user specified an index which doesn't exist or isn't valid +% for the selector. +% In this scenario, Mango will ignore the index hint and auto-select an index. +invalid_index_warning(Index, Opts) -> + UseIndex = lists:keyfind(use_index, 1, Opts), + invalid_index_warning_int(Index, UseIndex). + + +invalid_index_warning_int(Index, {use_index, [DesignId]}) -> + Filtered = filter_indexes([Index], DesignId), + if Filtered /= [] -> []; true -> + couch_stats:increment_counter([mango, query_invalid_index]), + Reason = fmt("_design/~s was not used because it does not contain a valid index for this query.", + [ddoc_name(DesignId)]), + [Reason] + end; + +invalid_index_warning_int(Index, {use_index, [DesignId, ViewName]}) -> + Filtered = filter_indexes([Index], DesignId, ViewName), + if Filtered /= [] -> []; true -> + couch_stats:increment_counter([mango, query_invalid_index]), + Reason = fmt("_design/~s, ~s was not used because it is not a valid index for this query.", + [ddoc_name(DesignId), ViewName]), + [Reason] + end; + +invalid_index_warning_int(_, _) -> + []. + + +% warn if a large number of documents needed to be scanned per result +% returned, implying a lot of in-memory filtering +index_scan_warning(#execution_stats { + totalDocsExamined = Docs, + totalQuorumDocsExamined = DocsQuorum, + resultsReturned = ResultCount + }) -> + % Docs and DocsQuorum are mutually exclusive so it's safe to sum them + DocsScanned = Docs + DocsQuorum, + Ratio = calculate_index_scan_ratio(DocsScanned, ResultCount), + Threshold = config:get_integer("mango", "index_scan_warning_threshold", 10), + case Threshold > 0 andalso Ratio > Threshold of + true -> + couch_stats:increment_counter([mango, too_many_docs_scanned]), + Reason = <<"The number of documents examined is high in proportion to the number of results returned. Consider adding a more specific index to improve this.">>, + [Reason]; + false -> [] + end. + +% When there is an empty array for certain operators, we don't actually +% want to execute the query so we deny it by making the range [empty]. +% To clarify, we don't want this query to execute: {"$or": []}. Results should +% be empty. We do want this query to execute: {"age": 22, "$or": []}. It should +% return the same results as {"age": 22} +maybe_noop_range({[{Op, []}]}, IndexRanges) -> + Noops = [<<"$all">>, <<"$and">>, <<"$or">>, <<"$in">>], + case lists:member(Op, Noops) of + true -> + [empty]; + false -> + IndexRanges + end; +maybe_noop_range(_, IndexRanges) -> + IndexRanges. + + +calculate_index_scan_ratio(DocsScanned, 0) -> + DocsScanned; + +calculate_index_scan_ratio(DocsScanned, ResultCount) -> + DocsScanned / ResultCount. fmt(Format, Args) -> diff --git a/src/mango/src/mango_cursor_special.erl b/src/mango/src/mango_cursor_special.erl index f4a760d1c..df1f6d655 100644 --- a/src/mango/src/mango_cursor_special.erl +++ b/src/mango/src/mango_cursor_special.erl @@ -41,12 +41,14 @@ create(Db, Indexes, Selector, Opts) -> Limit = couch_util:get_value(limit, Opts, mango_opts:default_limit()), Skip = couch_util:get_value(skip, Opts, 0), Fields = couch_util:get_value(fields, Opts, all_fields), - Bookmark = couch_util:get_value(bookmark, Opts), + Bookmark = couch_util:get_value(bookmark, Opts), + + IndexRanges1 = mango_cursor:maybe_noop_range(Selector, IndexRanges), {ok, #cursor{ db = Db, index = Index, - ranges = IndexRanges, + ranges = IndexRanges1, selector = Selector, opts = Opts, limit = Limit, @@ -55,7 +57,6 @@ create(Db, Indexes, Selector, Opts) -> bookmark = Bookmark }}. - explain(Cursor) -> mango_cursor_view:explain(Cursor). diff --git a/src/mango/src/mango_cursor_text.erl b/src/mango/src/mango_cursor_text.erl index 8938f3557..43ef84e4c 100644 --- a/src/mango/src/mango_cursor_text.erl +++ b/src/mango/src/mango_cursor_text.erl @@ -92,8 +92,9 @@ execute(Cursor, UserFun, UserAcc) -> opts = Opts, execution_stats = Stats } = Cursor, + Query = mango_selector_text:convert(Selector), QueryArgs = #index_query_args{ - q = mango_selector_text:convert(Selector), + q = Query, partition = get_partition(Opts, nil), sort = sort_query(Opts, Selector), raw_bookmark = true @@ -113,7 +114,12 @@ execute(Cursor, UserFun, UserAcc) -> execution_stats = mango_execution_stats:log_start(Stats) }, try - execute(CAcc) + case Query of + <<>> -> + throw({stop, CAcc}); + _ -> + execute(CAcc) + end catch throw:{stop, FinalCAcc} -> #cacc{ @@ -126,7 +132,7 @@ execute(Cursor, UserFun, UserAcc) -> Arg = {add_key, bookmark, JsonBM}, {_Go, FinalUserAcc} = UserFun(Arg, LastUserAcc), FinalUserAcc0 = mango_execution_stats:maybe_add_stats(Opts, UserFun, Stats0, FinalUserAcc), - FinalUserAcc1 = mango_cursor:maybe_add_warning(UserFun, Cursor, FinalUserAcc0), + FinalUserAcc1 = mango_cursor:maybe_add_warning(UserFun, Cursor, Stats0, FinalUserAcc0), {ok, FinalUserAcc1} end. @@ -170,6 +176,10 @@ handle_hits(CAcc0, [{Sort, Doc} | Rest]) -> handle_hits(CAcc1, Rest). +handle_hit(CAcc0, Sort, not_found) -> + CAcc1 = update_bookmark(CAcc0, Sort), + CAcc1; + handle_hit(CAcc0, Sort, Doc) -> #cacc{ limit = Limit, @@ -178,6 +188,7 @@ handle_hit(CAcc0, Sort, Doc) -> } = CAcc0, CAcc1 = update_bookmark(CAcc0, Sort), Stats1 = mango_execution_stats:incr_docs_examined(Stats), + couch_stats:increment_counter([mango, docs_examined]), CAcc2 = CAcc1#cacc{execution_stats = Stats1}, case mango_selector:match(CAcc2#cacc.selector, Doc) of true when Skip > 0 -> diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl index f1b753bd7..240ef501d 100644 --- a/src/mango/src/mango_cursor_view.erl +++ b/src/mango/src/mango_cursor_view.erl @@ -46,10 +46,12 @@ create(Db, Indexes, Selector, Opts) -> Fields = couch_util:get_value(fields, Opts, all_fields), Bookmark = couch_util:get_value(bookmark, Opts), + IndexRanges1 = mango_cursor:maybe_noop_range(Selector, IndexRanges), + {ok, #cursor{ db = Db, index = Index, - ranges = IndexRanges, + ranges = IndexRanges1, selector = Selector, opts = Opts, limit = Limit, @@ -99,12 +101,20 @@ maybe_replace_max_json([H | T] = EndKey) when is_list(EndKey) -> maybe_replace_max_json(EndKey) -> EndKey. + base_args(#cursor{index = Idx, selector = Selector} = Cursor) -> + {StartKey, EndKey} = case Cursor#cursor.ranges of + [empty] -> + {null, null}; + _ -> + {mango_idx:start_key(Idx, Cursor#cursor.ranges), + mango_idx:end_key(Idx, Cursor#cursor.ranges)} + end, #mrargs{ view_type = map, reduce = false, - start_key = mango_idx:start_key(Idx, Cursor#cursor.ranges), - end_key = mango_idx:end_key(Idx, Cursor#cursor.ranges), + start_key = StartKey, + end_key = EndKey, include_docs = true, extra = [{callback, {?MODULE, view_cb}}, {selector, Selector}] }. @@ -145,7 +155,7 @@ execute(#cursor{db = Db, index = Idx, execution_stats = Stats} = Cursor0, UserFu {_Go, FinalUserAcc} = UserFun(Arg, LastCursor#cursor.user_acc), Stats0 = LastCursor#cursor.execution_stats, FinalUserAcc0 = mango_execution_stats:maybe_add_stats(Opts, UserFun, Stats0, FinalUserAcc), - FinalUserAcc1 = mango_cursor:maybe_add_warning(UserFun, Cursor, FinalUserAcc0), + FinalUserAcc1 = mango_cursor:maybe_add_warning(UserFun, Cursor, Stats0, FinalUserAcc0), {ok, FinalUserAcc1}; {error, Reason} -> {error, Reason} @@ -239,6 +249,7 @@ view_cb({row, Row}, #mrargs{extra = Options} = Acc) -> Doc -> put(mango_docs_examined, get(mango_docs_examined) + 1), Selector = couch_util:get_value(selector, Options), + couch_stats:increment_counter([mango, docs_examined]), case mango_selector:match(Selector, Doc) of true -> ok = rexi:stream2(ViewRow), @@ -423,6 +434,7 @@ doc_member(Cursor, RowProps) -> % an undefined doc was returned, indicating we should % perform a quorum fetch ExecutionStats1 = mango_execution_stats:incr_quorum_docs_examined(ExecutionStats), + couch_stats:increment_counter([mango, quorum_docs_examined]), Id = couch_util:get_value(id, RowProps), case mango_util:defer(fabric, open_doc, [Db, Id, Opts]) of {ok, #doc{}=DocProps} -> diff --git a/src/mango/src/mango_execution_stats.erl b/src/mango/src/mango_execution_stats.erl index 7e8afd782..5878a3190 100644 --- a/src/mango/src/mango_execution_stats.erl +++ b/src/mango/src/mango_execution_stats.erl @@ -62,6 +62,7 @@ incr_quorum_docs_examined(Stats) -> incr_results_returned(Stats) -> + couch_stats:increment_counter([mango, results_returned]), Stats#execution_stats { resultsReturned = Stats#execution_stats.resultsReturned + 1 }. @@ -81,11 +82,13 @@ log_end(Stats) -> }. -maybe_add_stats(Opts, UserFun, Stats, UserAcc) -> +maybe_add_stats(Opts, UserFun, Stats0, UserAcc) -> + Stats1 = log_end(Stats0), + couch_stats:update_histogram([mango, query_time], Stats1#execution_stats.executionTimeMs), + case couch_util:get_value(execution_stats, Opts) of true -> - Stats0 = log_end(Stats), - JSONValue = to_json(Stats0), + JSONValue = to_json(Stats1), Arg = {add_key, execution_stats, JSONValue}, {_Go, FinalUserAcc} = UserFun(Arg, UserAcc), FinalUserAcc; diff --git a/src/mango/src/mango_idx_text.erl b/src/mango/src/mango_idx_text.erl index 50f6cc866..1d4becfb3 100644 --- a/src/mango/src/mango_idx_text.erl +++ b/src/mango/src/mango_idx_text.erl @@ -126,6 +126,8 @@ columns(Idx) -> end. +is_usable(_, Selector, _) when Selector =:= {[]} -> + false; is_usable(Idx, Selector, _) -> case columns(Idx) of all_fields -> diff --git a/src/mango/src/mango_selector.erl b/src/mango/src/mango_selector.erl index fffadcd20..3ea83c220 100644 --- a/src/mango/src/mango_selector.erl +++ b/src/mango/src/mango_selector.erl @@ -52,15 +52,19 @@ normalize(Selector) -> % Match a selector against a #doc{} or EJSON value. % This assumes that the Selector has been normalized. % Returns true or false. +match(Selector, D) -> + couch_stats:increment_counter([mango, evaluate_selector]), + match_int(Selector, D). + % An empty selector matches any value. -match({[]}, _) -> +match_int({[]}, _) -> true; -match(Selector, #doc{body=Body}) -> +match_int(Selector, #doc{body=Body}) -> match(Selector, Body, fun mango_json:cmp/2); -match(Selector, {Props}) -> +match_int(Selector, {Props}) -> match(Selector, {Props}, fun mango_json:cmp/2). % Convert each operator into a normalized version as well @@ -399,10 +403,16 @@ negate({[{Field, Cond}]}) -> {[{Field, negate(Cond)}]}. +% We need to treat an empty array as always true. This will be applied +% for $or, $in, $all, $nin as well. +match({[{<<"$and">>, []}]}, _, _) -> + true; match({[{<<"$and">>, Args}]}, Value, Cmp) -> Pred = fun(SubSel) -> match(SubSel, Value, Cmp) end, lists:all(Pred, Args); +match({[{<<"$or">>, []}]}, _, _) -> + true; match({[{<<"$or">>, Args}]}, Value, Cmp) -> Pred = fun(SubSel) -> match(SubSel, Value, Cmp) end, lists:any(Pred, Args); @@ -410,6 +420,8 @@ match({[{<<"$or">>, Args}]}, Value, Cmp) -> match({[{<<"$not">>, Arg}]}, Value, Cmp) -> not match(Arg, Value, Cmp); +match({[{<<"$all">>, []}]}, _, _) -> + true; % All of the values in Args must exist in Values or % Values == hd(Args) if Args is a single element list % that contains a list. @@ -493,6 +505,8 @@ match({[{<<"$gte">>, Arg}]}, Value, Cmp) -> match({[{<<"$gt">>, Arg}]}, Value, Cmp) -> Cmp(Value, Arg) > 0; +match({[{<<"$in">>, []}]}, _, _) -> + true; match({[{<<"$in">>, Args}]}, Values, Cmp) when is_list(Values)-> Pred = fun(Arg) -> lists:foldl(fun(Value,Match) -> @@ -504,6 +518,8 @@ match({[{<<"$in">>, Args}]}, Value, Cmp) -> Pred = fun(Arg) -> Cmp(Value, Arg) == 0 end, lists:any(Pred, Args); +match({[{<<"$nin">>, []}]}, _, _) -> + true; match({[{<<"$nin">>, Args}]}, Values, Cmp) when is_list(Values)-> not match({[{<<"$in">>, Args}]}, Values, Cmp); match({[{<<"$nin">>, Args}]}, Value, Cmp) -> @@ -570,7 +586,7 @@ match({[_, _ | _] = _Props} = Sel, _Value, _Cmp) -> erlang:error({unnormalized_selector, Sel}). -% Returns true if Selector requires all +% Returns true if Selector requires all % fields in RequiredFields to exist in any matching documents. % For each condition in the selector, check @@ -600,13 +616,13 @@ has_required_fields_int(Selector, RequiredFields) when not is_list(Selector) -> % We can "see" through $and operator. Iterate % through the list of child operators. -has_required_fields_int([{[{<<"$and">>, Args}]}], RequiredFields) +has_required_fields_int([{[{<<"$and">>, Args}]}], RequiredFields) when is_list(Args) -> has_required_fields_int(Args, RequiredFields); % We can "see" through $or operator. Required fields % must be covered by all children. -has_required_fields_int([{[{<<"$or">>, Args}]} | Rest], RequiredFields) +has_required_fields_int([{[{<<"$or">>, Args}]} | Rest], RequiredFields) when is_list(Args) -> Remainder0 = lists:foldl(fun(Arg, Acc) -> % for each child test coverage against the full @@ -623,7 +639,7 @@ has_required_fields_int([{[{<<"$or">>, Args}]} | Rest], RequiredFields) % Handle $and operator where it has peers. Required fields % can be covered by any child. -has_required_fields_int([{[{<<"$and">>, Args}]} | Rest], RequiredFields) +has_required_fields_int([{[{<<"$and">>, Args}]} | Rest], RequiredFields) when is_list(Args) -> Remainder = has_required_fields_int(Args, RequiredFields), has_required_fields_int(Rest, Remainder); diff --git a/src/mango/src/mango_selector_text.erl b/src/mango/src/mango_selector_text.erl index cfa3baf6d..9e1116de6 100644 --- a/src/mango/src/mango_selector_text.erl +++ b/src/mango/src/mango_selector_text.erl @@ -205,15 +205,36 @@ convert(_Path, {Props} = Sel) when length(Props) > 1 -> erlang:error({unnormalized_selector, Sel}). -to_query({op_and, Args}) when is_list(Args) -> +to_query_nested(Args) -> QueryArgs = lists:map(fun to_query/1, Args), - ["(", mango_util:join(<<" AND ">>, QueryArgs), ")"]; + % removes empty queries that result from selectors with empty arrays + FilterFun = fun(A) -> A =/= [] andalso A =/= "()" end, + lists:filter(FilterFun, QueryArgs). + + +to_query({op_and, []}) -> + []; + +to_query({op_and, Args}) when is_list(Args) -> + case to_query_nested(Args) of + [] -> []; + QueryArgs -> ["(", mango_util:join(<<" AND ">>, QueryArgs), ")"] + end; + +to_query({op_or, []}) -> + []; to_query({op_or, Args}) when is_list(Args) -> - ["(", mango_util:join(" OR ", lists:map(fun to_query/1, Args)), ")"]; + case to_query_nested(Args) of + [] -> []; + QueryArgs -> ["(", mango_util:join(" OR ", QueryArgs), ")"] + end; to_query({op_not, {ExistsQuery, Arg}}) when is_tuple(Arg) -> - ["(", to_query(ExistsQuery), " AND NOT (", to_query(Arg), "))"]; + case to_query(Arg) of + [] -> ["(", to_query(ExistsQuery), ")"]; + Query -> ["(", to_query(ExistsQuery), " AND NOT (", Query, "))"] + end; %% For $exists:false to_query({op_not, {ExistsQuery, false}}) -> diff --git a/src/mango/src/mango_util.erl b/src/mango/src/mango_util.erl index a7347178e..0d31f15f9 100644 --- a/src/mango/src/mango_util.erl +++ b/src/mango/src/mango_util.erl @@ -344,6 +344,8 @@ has_suffix(Bin, Suffix) when is_binary(Bin), is_binary(Suffix) -> end. +join(_Sep, []) -> + []; join(_Sep, [Item]) -> [Item]; join(Sep, [Item | Rest]) -> diff --git a/src/mango/test/02-basic-find-test.py b/src/mango/test/02-basic-find-test.py index 0fc4248a8..afdba03a2 100644 --- a/src/mango/test/02-basic-find-test.py +++ b/src/mango/test/02-basic-find-test.py @@ -13,6 +13,7 @@ import mango +import user_docs class BasicFindTests(mango.UserDocsTests): diff --git a/src/mango/test/05-index-selection-test.py b/src/mango/test/05-index-selection-test.py index 3f7fb9f21..271e36176 100644 --- a/src/mango/test/05-index-selection-test.py +++ b/src/mango/test/05-index-selection-test.py @@ -84,7 +84,7 @@ class IndexSelectionTests: ddocid = "_design/age" r = self.db.find({}, use_index=ddocid, return_raw=True) self.assertEqual( - r["warning"], + r["warning"][0].lower(), "{0} was not used because it does not contain a valid index for this query.".format( ddocid ), @@ -107,7 +107,7 @@ class IndexSelectionTests: selector = {"company": "Pharmex"} r = self.db.find(selector, use_index=ddocid, return_raw=True) self.assertEqual( - r["warning"], + r["warning"][0].lower(), "{0} was not used because it does not contain a valid index for this query.".format( ddocid ), @@ -124,7 +124,7 @@ class IndexSelectionTests: resp = self.db.find(selector, use_index=[ddocid, name], return_raw=True) self.assertEqual( - resp["warning"], + resp["warning"][0].lower(), "{0}, {1} was not used because it is not a valid index for this query.".format( ddocid, name ), @@ -162,7 +162,7 @@ class IndexSelectionTests: selector, sort=["foo", "bar"], use_index=ddocid_invalid, return_raw=True ) self.assertEqual( - resp["warning"], + resp["warning"][0].lower(), "{0} was not used because it does not contain a valid index for this query.".format( ddocid_invalid ), diff --git a/src/mango/test/12-use-correct-index-test.py b/src/mango/test/12-use-correct-index-test.py index 2de88a21a..3a2f60af8 100644 --- a/src/mango/test/12-use-correct-index-test.py +++ b/src/mango/test/12-use-correct-index-test.py @@ -93,8 +93,8 @@ class ChooseCorrectIndexForDocs(mango.DbPerClass): self.assertEqual(explain_resp["index"]["type"], "special") resp = self.db.find(selector, return_raw=True) self.assertEqual( - resp["warning"], - "no matching index found, create an index to optimize query time", + resp["warning"][0].lower(), + "no matching index found, create an index to optimize query time.", ) def test_chooses_idxA(self): diff --git a/src/mango/test/21-empty-selector-tests.py b/src/mango/test/21-empty-selector-tests.py new file mode 100644 index 000000000..fda18f6e4 --- /dev/null +++ b/src/mango/test/21-empty-selector-tests.py @@ -0,0 +1,72 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. + +import json +import mango +import unittest +import user_docs +import math + + +def make_empty_selector_suite(klass): + class EmptySelectorTestCase(klass): + def test_empty(self): + resp = self.db.find({}, explain=True) + self.assertEqual(resp["index"]["type"], "special") + + def test_empty_array_or(self): + resp = self.db.find({"$or": []}, explain=True) + self.assertEqual(resp["index"]["type"], klass.INDEX_TYPE) + docs = self.db.find({"$or": []}) + assert len(docs) == 0 + + def test_empty_array_or_with_age(self): + resp = self.db.find({"age": 22, "$or": []}, explain=True) + self.assertEqual(resp["index"]["type"], klass.INDEX_TYPE) + docs = self.db.find({"age": 22, "$or": []}) + assert len(docs) == 1 + + def test_empty_array_and_with_age(self): + resp = self.db.find( + {"age": 22, "$and": [{"b": {"$all": []}}]}, explain=True + ) + self.assertEqual(resp["index"]["type"], klass.INDEX_TYPE) + docs = self.db.find({"age": 22, "$and": []}) + assert len(docs) == 1 + + def test_empty_arrays_complex(self): + resp = self.db.find({"$or": [], "a": {"$in": []}}, explain=True) + self.assertEqual(resp["index"]["type"], klass.INDEX_TYPE) + docs = self.db.find({"$or": [], "a": {"$in": []}}) + assert len(docs) == 0 + + def test_empty_nin(self): + resp = self.db.find({"favorites": {"$nin": []}}, explain=True) + self.assertEqual(resp["index"]["type"], klass.INDEX_TYPE) + docs = self.db.find({"favorites": {"$nin": []}}) + assert len(docs) == len(user_docs.DOCS) + + return EmptySelectorTestCase + + +class EmptySelectorNoIndexTests( + make_empty_selector_suite(mango.UserDocsTestsNoIndexes) +): + pass + +@unittest.skipUnless(mango.has_text_service(), "requires text service") +class EmptySelectorTextTests(make_empty_selector_suite(mango.UserDocsTextTests)): + pass + + +class EmptySelectorUserDocTests(make_empty_selector_suite(mango.UserDocsTests)): + pass diff --git a/src/mango/test/mango.py b/src/mango/test/mango.py index de8a638a8..03cb85f48 100644 --- a/src/mango/test/mango.py +++ b/src/mango/test/mango.py @@ -314,6 +314,8 @@ class DbPerClass(unittest.TestCase): class UserDocsTests(DbPerClass): + INDEX_TYPE = "json" + @classmethod def setUpClass(klass): super(UserDocsTests, klass).setUpClass() @@ -321,14 +323,16 @@ class UserDocsTests(DbPerClass): class UserDocsTestsNoIndexes(DbPerClass): + INDEX_TYPE = "special" + @classmethod def setUpClass(klass): super(UserDocsTestsNoIndexes, klass).setUpClass() - user_docs.setup(klass.db, index_type="_all_docs") + user_docs.setup(klass.db, index_type=klass.INDEX_TYPE) class UserDocsTextTests(DbPerClass): - + INDEX_TYPE = "text" DEFAULT_FIELD = None FIELDS = None @@ -338,7 +342,7 @@ class UserDocsTextTests(DbPerClass): if has_text_service(): user_docs.setup( klass.db, - index_type="text", + index_type=klass.INDEX_TYPE, default_field=klass.DEFAULT_FIELD, fields=klass.FIELDS, ) diff --git a/src/mem3/src/mem3_sync_event_listener.erl b/src/mem3/src/mem3_sync_event_listener.erl index fa353d9a8..b6fbe3279 100644 --- a/src/mem3/src/mem3_sync_event_listener.erl +++ b/src/mem3/src/mem3_sync_event_listener.erl @@ -293,6 +293,7 @@ should_terminate(Pid) -> ?assert(is_process_alive(Pid)), EventMgr = whereis(config_event), + EventMgrWasAlive = (catch is_process_alive(EventMgr)), Ref = erlang:monitor(process, Pid), @@ -305,6 +306,9 @@ should_terminate(Pid) -> {'DOWN', Ref, _, _, _} -> ok after 1000 -> + ?debugFmt("~n XKCD should_terminate EventMgrWasAlive:~p MsgQueue:~p PInfo:~p ~n", [ + EventMgrWasAlive, process_info(self(), messages), process_info(Pid) + ]), ?assert(false) end, diff --git a/test/elixir/README.md b/test/elixir/README.md index ef95e5f61..90b2fd601 100644 --- a/test/elixir/README.md +++ b/test/elixir/README.md @@ -46,8 +46,8 @@ X means done, - means partially - [ ] Port design_options.js - [ ] Port design_paths.js - [X] Port erlang_views.js - - [ ] Port etags_head.js - - [ ] Port etags_views.js + - [X] Port etags_head.js + - [ ] ~~Port etags_views.js~~ (skipped in js test suite) - [ ] Port form_submit.js - [ ] Port http.js - [X] Port invalid_docids.js diff --git a/test/elixir/lib/couch.ex b/test/elixir/lib/couch.ex index 6a63dffb0..3aef07f01 100644 --- a/test/elixir/lib/couch.ex +++ b/test/elixir/lib/couch.ex @@ -97,9 +97,9 @@ defmodule Couch do def process_options(options) do options - |> set_auth_options() - |> set_inactivity_timeout() - |> set_request_timeout() + |> set_auth_options() + |> set_inactivity_timeout() + |> set_request_timeout() end def process_request_body(body) do @@ -110,6 +110,10 @@ defmodule Couch do end end + def process_response_body(_headers, body) when body == [] do + "" + end + def process_response_body(headers, body) do content_type = headers[:"Content-Type"] @@ -137,9 +141,14 @@ defmodule Couch do end def set_inactivity_timeout(options) do - Keyword.update(options, :ibrowse, [{:inactivity_timeout, @inactivity_timeout}], fn(ibrowse) -> - Keyword.put_new(ibrowse, :inactivity_timeout, @inactivity_timeout) - end) + Keyword.update( + options, + :ibrowse, + [{:inactivity_timeout, @inactivity_timeout}], + fn ibrowse -> + Keyword.put_new(ibrowse, :inactivity_timeout, @inactivity_timeout) + end + ) end def set_request_timeout(options) do @@ -165,5 +174,4 @@ defmodule Couch do %Couch.Session{error: resp.body["error"]} end end - end diff --git a/test/elixir/test/etags_head_test.exs b/test/elixir/test/etags_head_test.exs new file mode 100644 index 000000000..9b9ff8bb0 --- /dev/null +++ b/test/elixir/test/etags_head_test.exs @@ -0,0 +1,151 @@ +defmodule EtagsHeadTest do + use CouchTestCase + + @moduletag :etags + + @tag :with_db + test "etag header on creation", context do + db_name = context[:db_name] + + resp = + Couch.put("/#{db_name}/1", + headers: ["Content-Type": "application/json"], + body: %{} + ) + + assert resp.status_code == 201 + assert Map.has_key?(resp.headers.hdrs, "etag") + end + + @tag :with_db + test "etag header on retrieval", context do + db_name = context[:db_name] + + resp = + Couch.put("/#{db_name}/1", + headers: ["Content-Type": "application/json"], + body: %{} + ) + + etag = resp.headers.hdrs["etag"] + + # get the doc and verify the headers match + resp = Couch.get("/#{db_name}/1") + assert etag == resp.headers.hdrs["etag"] + + # 'head' the doc and verify the headers match + resp = + Couch.head("/#{db_name}/1", + headers: ["if-none-match": "s"] + ) + + assert etag == resp.headers.hdrs["etag"] + end + + @tag :with_db + test "etag header on head", context do + db_name = context[:db_name] + + resp = + Couch.put("/#{db_name}/1", + headers: ["Content-Type": "application/json"], + body: %{} + ) + + etag = resp.headers.hdrs["etag"] + + # 'head' the doc and verify the headers match + resp = + Couch.head("/#{db_name}/1", + headers: ["if-none-match": "s"] + ) + + assert etag == resp.headers.hdrs["etag"] + end + + @tag :with_db + test "etags head", context do + db_name = context[:db_name] + + resp = + Couch.put("/#{db_name}/1", + headers: ["Content-Type": "application/json"], + body: %{} + ) + + assert resp.status_code == 201 + assert Map.has_key?(resp.headers.hdrs, "etag") + + etag = resp.headers.hdrs["etag"] + + # get the doc and verify the headers match + resp = Couch.get("/#{db_name}/1") + assert etag == resp.headers.hdrs["etag"] + + # 'head' the doc and verify the headers match + resp = + Couch.head("/#{db_name}/1", + headers: ["if-none-match": "s"] + ) + + assert etag == resp.headers.hdrs["etag"] + + # replace a doc + resp = + Couch.put("/#{db_name}/1", + headers: ["if-match": etag], + body: %{} + ) + + assert resp.status_code == 201 + + # extract the new ETag value + previous_etag = etag + etag = resp.headers.hdrs["etag"] + + # fail to replace a doc + resp = + Couch.put("/#{db_name}/1", + body: %{} + ) + + assert resp.status_code == 409 + + # verify get w/Etag + resp = + Couch.get("/#{db_name}/1", + headers: ["if-none-match": previous_etag] + ) + + assert resp.status_code == 200 + + resp = + Couch.get("/#{db_name}/1", + headers: ["if-none-match": etag] + ) + + assert resp.status_code == 304 + + resp = + Couch.get("/#{db_name}/1", + headers: ["if-none-match": "W/#{etag}"] + ) + + assert resp.status_code == 304 + + # fail to delete a doc + resp = + Couch.delete("/#{db_name}/1", + headers: ["if-match": previous_etag] + ) + + assert resp.status_code == 409 + + resp = + Couch.delete("/#{db_name}/1", + headers: ["if-match": etag] + ) + + assert resp.status_code == 200 + end +end diff --git a/test/javascript/tests/etags_head.js b/test/javascript/tests/etags_head.js index 9faca4af6..678479004 100644 --- a/test/javascript/tests/etags_head.js +++ b/test/javascript/tests/etags_head.js @@ -10,7 +10,9 @@ // License for the specific language governing permissions and limitations under // the License. +couchTests.elixir = true; couchTests.etags_head = function(debug) { + return console.log('done in test/elixir/test/etags_head_test.exs'); var db_name = get_random_db_name(); var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"}); db.createDb(); |