summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdam Kocoloski <kocolosk@apache.org>2021-10-27 08:17:22 -0400
committerGitHub <noreply@github.com>2021-10-27 08:17:22 -0400
commitcacd70eb82f2d16a6a023945ebbe359e7fb9d8eb (patch)
tree397eba8885cacebd91ef91fae47a6337b5c66fab
parent5aafa0656220a196bd8f34d251ed6c5399ad9c19 (diff)
parent8513c5bae1de0089e5909a51b7e15abf9af2196c (diff)
downloadcouchdb-3788-changes-rewind-nodedown.tar.gz
Merge branch '3.x' into 3788-changes-rewind-nodedown3788-changes-rewind-nodedown
-rw-r--r--src/chttpd/src/chttpd_node.erl28
-rw-r--r--src/couch/priv/couch_ejson_compare/couch_ejson_compare.c38
-rw-r--r--src/couch/src/couch_ejson_compare.erl22
-rw-r--r--src/couch/src/couch_server.erl3
-rw-r--r--src/couch/test/eunit/couch_ejson_compare_tests.erl20
-rw-r--r--src/couch/test/eunit/couch_util_tests.erl18
-rw-r--r--src/couch_prometheus/test/eunit/couch_prometheus_e2e_tests.erl10
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl20
-rw-r--r--src/custodian/src/custodian.hrl49
-rw-r--r--src/custodian/src/custodian_util.erl51
-rw-r--r--src/dreyfus/test/dreyfus_blacklist_await_test.erl4
-rw-r--r--src/dreyfus/test/dreyfus_purge_test.erl14
-rw-r--r--src/dreyfus/test/dreyfus_test_util.erl4
-rw-r--r--src/fabric/src/fabric_db_info.erl9
-rw-r--r--src/fabric/src/fabric_view_changes.erl19
-rw-r--r--src/fabric/test/eunit/fabric_db_info_tests.erl68
-rw-r--r--src/mem3/rebar.config14
-rw-r--r--src/mem3/src/mem3_bdu.erl112
-rw-r--r--src/mem3/test/eunit/mem3_bdu_test.erl280
-rw-r--r--src/mem3/test/eunit/mem3_reshard_test.erl1
-rw-r--r--test/elixir/test/basics_test.exs11
-rw-r--r--test/elixir/test/config/suite.elixir1
-rw-r--r--test/elixir/test/replication_test.exs4
23 files changed, 667 insertions, 133 deletions
diff --git a/src/chttpd/src/chttpd_node.erl b/src/chttpd/src/chttpd_node.erl
index 7486aadfe..e92a1e506 100644
--- a/src/chttpd/src/chttpd_node.erl
+++ b/src/chttpd/src/chttpd_node.erl
@@ -33,13 +33,20 @@ handle_node_req(#httpd{path_parts=[A, <<"_local">>|Rest]}=Req) ->
handle_node_req(Req#httpd{path_parts=[A, node()] ++ Rest});
% GET /_node/$node/_versions
handle_node_req(#httpd{method='GET', path_parts=[_, _Node, <<"_versions">>]}=Req) ->
- send_json(Req, 200, {[
- {erlang_version, ?l2b(?COUCHDB_ERLANG_VERSION)},
- {javascript_engine, {[
- {name, <<"spidermonkey">>},
- {version, couch_server:get_spidermonkey_version()}
- ]}}
- ]});
+ IcuVer = couch_ejson_compare:get_icu_version(),
+ UcaVer = couch_ejson_compare:get_uca_version(),
+ send_json(Req, 200, #{
+ erlang_version => ?l2b(?COUCHDB_ERLANG_VERSION),
+ collation_driver => #{
+ name => <<"libicu">>,
+ library_version => version_tuple_to_str(IcuVer),
+ collation_algorithm_version => version_tuple_to_str(UcaVer)
+ },
+ javascript_engine => #{
+ name => <<"spidermonkey">>,
+ version => couch_server:get_spidermonkey_version()
+ }
+ });
handle_node_req(#httpd{path_parts=[_, _Node, <<"_versions">>]}=Req) ->
send_method_not_allowed(Req, "GET");
@@ -322,3 +329,10 @@ run_queues() ->
[DCQ | SQs] = lists:reverse(statistics(run_queue_lengths)),
{lists:sum(SQs), DCQ}
end.
+
+version_tuple_to_str(Version) when is_tuple(Version) ->
+ List1 = tuple_to_list(Version),
+ IsZero = fun(N) -> N == 0 end,
+ List2 = lists:reverse(lists:dropwhile(IsZero, lists:reverse(List1))),
+ List3 = [erlang:integer_to_list(N) || N <- List2],
+ ?l2b(lists:join(".", List3)).
diff --git a/src/couch/priv/couch_ejson_compare/couch_ejson_compare.c b/src/couch/priv/couch_ejson_compare/couch_ejson_compare.c
index f453a295f..6e200320d 100644
--- a/src/couch/priv/couch_ejson_compare/couch_ejson_compare.c
+++ b/src/couch/priv/couch_ejson_compare/couch_ejson_compare.c
@@ -166,6 +166,40 @@ compare_strings_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
}
+ERL_NIF_TERM
+get_icu_version(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ UVersionInfo ver = {0};
+ ERL_NIF_TERM tup[U_MAX_VERSION_LENGTH] = {0};
+ int i;
+
+ u_getVersion(ver);
+
+ for (i = 0; i < U_MAX_VERSION_LENGTH; i++) {
+ tup[i] = enif_make_int(env, ver[i]);
+ }
+
+ return enif_make_tuple_from_array(env, tup, U_MAX_VERSION_LENGTH);
+}
+
+
+ERL_NIF_TERM
+get_uca_version(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ UVersionInfo ver = {0};
+ ERL_NIF_TERM tup[U_MAX_VERSION_LENGTH] = {0};
+ int i;
+
+ ucol_getUCAVersion(get_collator(), ver);
+
+ for (i = 0; i < U_MAX_VERSION_LENGTH; i++) {
+ tup[i] = enif_make_int(env, ver[i]);
+ }
+
+ return enif_make_tuple_from_array(env, tup, U_MAX_VERSION_LENGTH);
+}
+
+
int
less_json(int depth, ctx_t* ctx, ERL_NIF_TERM a, ERL_NIF_TERM b)
{
@@ -531,7 +565,9 @@ on_unload(ErlNifEnv* env, void* priv_data)
static ErlNifFunc nif_functions[] = {
{"less_nif", 2, less_json_nif},
- {"compare_strings_nif", 2, compare_strings_nif}
+ {"compare_strings_nif", 2, compare_strings_nif},
+ {"get_icu_version", 0, get_icu_version},
+ {"get_uca_version", 0, get_uca_version}
};
diff --git a/src/couch/src/couch_ejson_compare.erl b/src/couch/src/couch_ejson_compare.erl
index 8681296f1..b02b9ba7c 100644
--- a/src/couch/src/couch_ejson_compare.erl
+++ b/src/couch/src/couch_ejson_compare.erl
@@ -12,10 +12,20 @@
-module(couch_ejson_compare).
--export([less/2, less_json_ids/2, less_json/2]).
+-export([
+ less/2,
+ less_json_ids/2,
+ less_json/2,
+ get_icu_version/0,
+ get_uca_version/0
+]).
% For testing
--export([less_nif/2, less_erl/2, compare_strings_nif/2]).
+-export([
+ less_nif/2,
+ less_erl/2,
+ compare_strings_nif/2
+]).
-on_load(init/0).
@@ -51,6 +61,14 @@ less_json(A,B) ->
less(A, B) < 0.
+get_icu_version() ->
+ erlang:nif_error(get_icu_version).
+
+
+get_uca_version() ->
+ erlang:nif_error(get_uca_version).
+
+
less_nif(A, B) ->
erlang:nif_error(less_nif_load_error, [A, B]).
diff --git a/src/couch/src/couch_server.erl b/src/couch/src/couch_server.erl
index 5dc0a05f0..3c72e3357 100644
--- a/src/couch/src/couch_server.erl
+++ b/src/couch/src/couch_server.erl
@@ -189,7 +189,8 @@ maybe_add_sys_db_callbacks(DbName, Options) ->
orelse path_ends_with(DbName, UsersDbSuffix),
if
DbName == DbsDbName ->
- [sys_db | Options];
+ [{before_doc_update, fun mem3_bdu:before_doc_update/3},
+ sys_db | Options];
DbName == NodesDbName ->
[sys_db | Options];
IsReplicatorDb ->
diff --git a/src/couch/test/eunit/couch_ejson_compare_tests.erl b/src/couch/test/eunit/couch_ejson_compare_tests.erl
index 790f6e54c..1dfbad4ed 100644
--- a/src/couch/test/eunit/couch_ejson_compare_tests.erl
+++ b/src/couch/test/eunit/couch_ejson_compare_tests.erl
@@ -183,6 +183,26 @@ zero_width_chars() ->
% Regular EUnit tests
+get_icu_version_test() ->
+ Ver = couch_ejson_compare:get_icu_version(),
+ ?assertMatch({_, _, _, _}, Ver),
+ {V1, V2, V3, V4} = Ver,
+ ?assert(is_integer(V1) andalso V1 > 0),
+ ?assert(is_integer(V2) andalso V2 >= 0),
+ ?assert(is_integer(V3) andalso V3 >= 0),
+ ?assert(is_integer(V4) andalso V4 >= 0).
+
+
+get_uca_version_test() ->
+ Ver = couch_ejson_compare:get_uca_version(),
+ ?assertMatch({_, _, _, _}, Ver),
+ {V1, V2, V3, V4} = Ver,
+ ?assert(is_integer(V1) andalso V1 > 0),
+ ?assert(is_integer(V2) andalso V2 >= 0),
+ ?assert(is_integer(V3) andalso V3 >= 0),
+ ?assert(is_integer(V4) andalso V4 >= 0).
+
+
max_depth_error_list_test() ->
% NIF can handle terms with depth <= 9
Nested9 = nest_list(<<"val">>, 9),
diff --git a/src/couch/test/eunit/couch_util_tests.erl b/src/couch/test/eunit/couch_util_tests.erl
index f6d7d958a..44a5cce0a 100644
--- a/src/couch/test/eunit/couch_util_tests.erl
+++ b/src/couch/test/eunit/couch_util_tests.erl
@@ -15,24 +15,6 @@
-include_lib("couch/include/couch_eunit.hrl").
-setup() ->
- %% We cannot start driver from here since it becomes bounded to eunit
- %% master process and the next couch_server_sup:start_link call will
- %% fail because server couldn't load driver since it already is.
- %%
- %% On other hand, we cannot unload driver here due to
- %% {error, not_loaded_by_this_process} while it is. Any ideas is welcome.
- %%
- Ctx = test_util:start_couch(),
- %% config:start_link(?CONFIG_CHAIN),
- Ctx.
-
-teardown(Ctx) ->
- ok = test_util:stop_couch(Ctx),
- %% config:stop(),
- ok.
-
-
validate_callback_exists_test_() ->
{
"validate_callback_exists tests",
diff --git a/src/couch_prometheus/test/eunit/couch_prometheus_e2e_tests.erl b/src/couch_prometheus/test/eunit/couch_prometheus_e2e_tests.erl
index c862b9a9f..5b8adfd1d 100644
--- a/src/couch_prometheus/test/eunit/couch_prometheus_e2e_tests.erl
+++ b/src/couch_prometheus/test/eunit/couch_prometheus_e2e_tests.erl
@@ -85,17 +85,15 @@ node_call_prometheus_http(_) ->
Url = construct_url(?PROM_PORT),
{ok, RC1, _, _} = test_request:get(
Url,
- [?CONTENT_JSON, ?AUTH],
- []
+ [?CONTENT_JSON, ?AUTH]
),
% since this port doesn't require auth, this should work
{ok, RC2, _, _} = test_request:get(
Url,
- [?CONTENT_JSON],
- []
+ [?CONTENT_JSON]
),
delete_db(Url),
- ?_assertEqual(200, RC2).
+ ?_assertEqual({200, 200}, {RC1, RC2}).
% we don't start the http server
deny_prometheus_http(_) ->
@@ -121,8 +119,6 @@ construct_url(Port) ->
lists:concat(["http://", Addr, ":", Port, "/_node/_local/_prometheus"]).
create_db(Url) ->
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
{ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
?assert(Status =:= 201 orelse Status =:= 202).
diff --git a/src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl b/src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl
index 037f37191..a9a0fc943 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl
@@ -138,7 +138,7 @@ start_job() ->
check_active_tasks(DocsRead, DocsWritten, DocsFailed) ->
- RepTask = wait_for_task_status(),
+ RepTask = wait_for_task_status(DocsWritten),
?assertNotEqual(timeout, RepTask),
?assertEqual(DocsRead, couch_util:get_value(docs_read, RepTask)),
?assertEqual(DocsWritten, couch_util:get_value(docs_written, RepTask)),
@@ -147,7 +147,7 @@ check_active_tasks(DocsRead, DocsWritten, DocsFailed) ->
check_scheduler_jobs(DocsRead, DocsWritten, DocFailed) ->
- Info = wait_scheduler_info(),
+ Info = wait_scheduler_info(DocsWritten),
?assert(maps:is_key(<<"changes_pending">>, Info)),
?assert(maps:is_key(<<"doc_write_failures">>, Info)),
?assert(maps:is_key(<<"docs_read">>, Info)),
@@ -167,21 +167,29 @@ replication_tasks() ->
end, couch_task_status:all()).
-wait_for_task_status() ->
+wait_for_task_status(DocsWritten) ->
test_util:wait(fun() ->
case replication_tasks() of
[] -> wait;
- [RepTask] -> RepTask
+ [RepTask] ->
+ case couch_util:get_value(docs_written, RepTask) of
+ DocsWritten -> RepTask;
+ _Other -> wait
+ end
end
end).
-wait_scheduler_info() ->
+wait_scheduler_info(DocsWritten) ->
test_util:wait(fun() ->
case scheduler_jobs() of
[] -> wait;
[#{<<"info">> := null}] -> wait;
- [#{<<"info">> := Info}] -> Info
+ [#{<<"info">> := Info}] ->
+ case maps:get(<<"docs_written">>, Info, undefined) of
+ DocsWritten -> Info;
+ _Other -> wait
+ end
end
end).
diff --git a/src/custodian/src/custodian.hrl b/src/custodian/src/custodian.hrl
deleted file mode 100644
index bce22cf95..000000000
--- a/src/custodian/src/custodian.hrl
+++ /dev/null
@@ -1,49 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--define(CUSTODIAN_ID, <<"_design/custodian">>).
-
--define(CUSTODIAN_VALIDATION,
-<<"function(newDoc, oldDoc) {
- var i, range, node;
- if(newDoc['_id'].substring(0, 8) === \"_design/\") return;
- if(newDoc['_deleted'] === true) return;
- if (!newDoc.by_node) {
- throw({forbidden: \"by_node is mandatory\"});
- }
- if (!newDoc.by_range) {
- throw({forbidden: \"by_range is mandatory\"});
- }
- for (node in newDoc.by_node) {
- for (i in newDoc.by_node[node]) {
- range = newDoc.by_node[node][i];
- if(!newDoc.by_range[range]) {
- throw({forbidden: \"by_range for \" + range + \" is missing\"});
- }
- if(newDoc.by_range[range].indexOf(node) === -1) {
- throw({forbidden : \"by_range for \" + range + \" is missing \" + node});
- }
- }
- }
- for (range in newDoc.by_range) {
- for (i in newDoc.by_range[range]) {
- node = newDoc.by_range[range][i];
- if(!newDoc.by_node[node]) {
- throw({forbidden: \"by_node for \" + node + \" is missing\"});
- }
- if (newDoc.by_node[node].indexOf(range) === -1) {
- throw({forbidden: \"by_node for \" + node + \" is missing \" + range});
- }
- }
- }
-}
-">>).
diff --git a/src/custodian/src/custodian_util.erl b/src/custodian/src/custodian_util.erl
index ee217108f..ac46cb143 100644
--- a/src/custodian/src/custodian_util.erl
+++ b/src/custodian/src/custodian_util.erl
@@ -11,7 +11,6 @@
% the License.
-module(custodian_util).
--include("custodian.hrl").
-include_lib("mem3/include/mem3.hrl").
-include_lib("couch/include/couch_db.hrl").
@@ -19,6 +18,9 @@
-export([summary/0, report/0]).
-export([ensure_dbs_exists/0]).
+% Old design doc which should be cleaned up
+-define(CUSTODIAN_ID, <<"_design/custodian">>).
+
-record(state, {live, safe, n, callback, db, acc}).
%% public functions.
@@ -45,7 +47,7 @@ report() ->
ensure_dbs_exists() ->
DbName = mem3_sync:shards_db(),
{ok, Db} = mem3_util:ensure_exists(DbName),
- ensure_custodian_ddoc_exists(Db),
+ ensure_custodian_ddoc_is_deleted(Db),
{ok, Db}.
%% private functions.
@@ -180,41 +182,28 @@ count_conflicts(#full_doc_info{rev_tree = T}) ->
Leafs = [1 || {#leaf{deleted=false}, _} <- couch_key_tree:get_all_leafs(T)],
length(Leafs) - 1.
-ensure_custodian_ddoc_exists(Db) ->
+
+% Ensure the design doc which was added 3.2.0 is deleted as we switched to using a BDU
+% function instead. After a few releases this function could be removed as well
+%
+ensure_custodian_ddoc_is_deleted(Db) ->
case couch_db:open_doc(Db, ?CUSTODIAN_ID, [ejson_body]) of
{not_found, _Reason} ->
- try couch_db:update_doc(Db, custodian_ddoc(), []) of
- {ok, _} ->
- ok
- catch conflict ->
- {ok, NewDb} = couch_db:reopen(Db),
- ensure_custodian_ddoc_exists(NewDb)
- end;
+ ok;
{ok, Doc} ->
- {Props} = couch_doc:to_json_obj(Doc, []),
- Props1 = lists:keystore(<<"validate_doc_update">>, 1, Props, {<<"validate_doc_update">>, ?CUSTODIAN_VALIDATION}),
- case Props =:= Props1 of
- true ->
- ok;
- false ->
- try couch_db:update_doc(Db, couch_doc:from_json_obj({Props1}), []) of
- {ok, _} ->
- ok
- catch conflict ->
- {ok, NewDb} = couch_db:reopen(Db),
- ensure_custodian_ddoc_exists(NewDb)
- end
+ DeletedDoc = Doc#doc{deleted = true, body = {[]}},
+ try couch_db:update_doc(Db, DeletedDoc, [?ADMIN_CTX]) of
+ {ok, _} ->
+ LogMsg = "~p : deleted custodian ddoc ~s",
+ couch_log:notice(LogMsg, [?MODULE, ?CUSTODIAN_ID]),
+ ok
+ catch
+ conflict ->
+ {ok, NewDb} = couch_db:reopen(Db),
+ ensure_custodian_ddoc_is_deleted(NewDb)
end
end.
-custodian_ddoc() ->
- Props = [
- {<<"_id">>, ?CUSTODIAN_ID},
- {<<"language">>, <<"javascript">>},
- {<<"validate_doc_update">>, ?CUSTODIAN_VALIDATION}
- ],
- couch_doc:from_json_obj({Props}).
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/dreyfus/test/dreyfus_blacklist_await_test.erl b/src/dreyfus/test/dreyfus_blacklist_await_test.erl
index 28a5e7f30..82665eb02 100644
--- a/src/dreyfus/test/dreyfus_blacklist_await_test.erl
+++ b/src/dreyfus/test/dreyfus_blacklist_await_test.erl
@@ -62,8 +62,8 @@ do_not_await_1() ->
State = create_state(?DBNAME, Index, nil, nil, []),
Msg = "Index Blocked from Updating - db: ~p, ddocid: ~p name: ~p",
Return = wait_log_message(Msg, fun() ->
- {noreply, NewState} = dreyfus_index:handle_call({await, 1},
- self(), State)
+ {noreply, _NewState} = dreyfus_index:handle_call({await, 1},
+ self(), State)
end),
?assertEqual(Return, ok).
diff --git a/src/dreyfus/test/dreyfus_purge_test.erl b/src/dreyfus/test/dreyfus_purge_test.erl
index 5fa4bc90f..9b24d6f64 100644
--- a/src/dreyfus/test/dreyfus_purge_test.erl
+++ b/src/dreyfus/test/dreyfus_purge_test.erl
@@ -27,6 +27,8 @@
test_local_doc/0, test_delete_local_doc/0, test_purge_search/0]).
-compile(export_all).
+-compile(nowarn_export_all).
+
test_all() ->
test_purge_single(),
@@ -703,10 +705,14 @@ test_purge_search() ->
%private API
db_name() ->
- Nums = tuple_to_list(erlang:now()),
- Prefix = "test-db",
- Suffix = lists:concat([integer_to_list(Num) || Num <- Nums]),
- list_to_binary(Prefix ++ "-" ++ Suffix).
+ iolist_to_binary([
+ "dreyfus-test-db-", [
+ integer_to_list(I) || I <- [
+ erlang:unique_integer([positive]),
+ rand:uniform(10000)
+ ]
+ ]
+ ]).
purge_docs(DBName, DocIds) ->
IdsRevs = [{DocId, [get_rev(DBName, DocId)]} || DocId <- DocIds],
diff --git a/src/dreyfus/test/dreyfus_test_util.erl b/src/dreyfus/test/dreyfus_test_util.erl
index 631bc1047..79fd9b59d 100644
--- a/src/dreyfus/test/dreyfus_test_util.erl
+++ b/src/dreyfus/test/dreyfus_test_util.erl
@@ -1,6 +1,8 @@
-module(dreyfus_test_util).
--compile(export_all).
+-export([
+ wait_config_change/2
+]).
-include_lib("couch/include/couch_db.hrl").
diff --git a/src/fabric/src/fabric_db_info.erl b/src/fabric/src/fabric_db_info.erl
index 40da678e5..586f282c2 100644
--- a/src/fabric/src/fabric_db_info.erl
+++ b/src/fabric/src/fabric_db_info.erl
@@ -77,7 +77,7 @@ handle_message(Reason, Shard, {Counters, Resps, CInfo}) ->
build_final_response(CInfo, DbName, Responses) ->
AccF = fabric_dict:fold(fun(Shard, Info, {Seqs, PSeqs, Infos}) ->
- Seq = couch_util:get_value(update_seq, Info),
+ Seq = build_seq(Shard, Info),
PSeq = couch_util:get_value(purge_seq, Info),
{[{Shard, Seq} | Seqs], [{Shard, PSeq} | PSeqs], [Info | Infos]}
end, {[], [], []}, Responses),
@@ -89,6 +89,13 @@ build_final_response(CInfo, DbName, Responses) ->
[{db_name, DbName}] ++ Sequences ++ MergedInfos.
+build_seq(#shard{node = Node}, Info) when is_list(Info) ->
+ Seq = couch_util:get_value(update_seq, Info),
+ Uuid = couch_util:get_value(uuid, Info),
+ PrefixLen = fabric_util:get_uuid_prefix_len(),
+ {Seq, binary:part(Uuid, {0, PrefixLen}), Node}.
+
+
merge_results(Info) ->
Dict = lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end,
orddict:new(), Info),
diff --git a/src/fabric/src/fabric_view_changes.erl b/src/fabric/src/fabric_view_changes.erl
index f564a22ea..9fdbf06df 100644
--- a/src/fabric/src/fabric_view_changes.erl
+++ b/src/fabric/src/fabric_view_changes.erl
@@ -18,6 +18,9 @@
%% exported for upgrade purposes.
-export([keep_sending_changes/8]).
+%% exported for testing and remsh debugging
+-export([decode_seq/1]).
+
-include_lib("fabric/include/fabric.hrl").
-include_lib("mem3/include/mem3.hrl").
-include_lib("couch/include/couch_db.hrl").
@@ -403,6 +406,22 @@ unpack_seq_decode_term(Opaque) ->
binary_to_term(couch_util:decodeBase64Url(Opaque)).
+% This is used for testing and for remsh debugging
+%
+% Return the unpacked list of sequences from a raw update seq string. The input
+% string is expected to include the N- prefix. The result looks like:
+% [{Node, Range, {SeqNum, Uuid, EpochNode}}, ...]
+%
+-spec decode_seq(binary()) -> [tuple()].
+decode_seq(Packed) ->
+ Opaque = unpack_seq_regex_match(Packed),
+ unpack_seq_decode_term(Opaque).
+
+
+% Returns fabric_dict with {Shard, Seq} entries
+%
+-spec unpack_seqs(pos_integer() | list() | binary(), binary()) ->
+ orddict:orddict().
unpack_seqs(0, DbName) ->
fabric_dict:init(mem3:shards(DbName), 0);
diff --git a/src/fabric/test/eunit/fabric_db_info_tests.erl b/src/fabric/test/eunit/fabric_db_info_tests.erl
new file mode 100644
index 000000000..ccdafe3ae
--- /dev/null
+++ b/src/fabric/test/eunit/fabric_db_info_tests.erl
@@ -0,0 +1,68 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_db_info_tests).
+
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("mem3/include/mem3.hrl").
+
+
+-define(TDEF(A), {atom_to_list(A), fun A/0}).
+
+
+main_test_() ->
+ {
+ setup,
+ fun setup/0,
+ fun teardown/1,
+ [
+ ?TDEF(t_update_seq_has_uuids)
+ ]
+ }.
+
+
+setup() ->
+ test_util:start_couch([fabric]).
+
+
+teardown(Ctx) ->
+ meck:unload(),
+ test_util:stop_couch(Ctx).
+
+
+t_update_seq_has_uuids() ->
+ DbName = ?tempdb(),
+ ok = fabric:create_db(DbName, [{q, 1}, {n, 1}]),
+
+ {ok, Info} = fabric:get_db_info(DbName),
+ UpdateSeq = couch_util:get_value(update_seq, Info),
+ UnpackedSeq = fabric_view_changes:decode_seq(UpdateSeq),
+
+ ?assertMatch([{_, _, _}], UnpackedSeq),
+ [{Node, Range, Seq}] = UnpackedSeq,
+ ?assert(is_atom(Node)),
+ ?assertMatch([_, _], Range),
+ ?assertMatch({_, _, _}, Seq),
+ {SeqNum, SeqUuid, EpochNode} = Seq,
+ ?assert(is_integer(SeqNum)),
+ ?assert(is_binary(SeqUuid)),
+ ?assert(is_atom(EpochNode)),
+
+ {ok, UuidMap} = fabric:db_uuids(DbName),
+ PrefixLen = fabric_util:get_uuid_prefix_len(),
+ Uuids = [binary:part(Uuid, {0, PrefixLen}) || Uuid <- maps:keys(UuidMap)],
+ [UuidFromShard] = Uuids,
+ ?assertEqual(UuidFromShard, SeqUuid),
+
+ ok = fabric:delete_db(DbName, []).
diff --git a/src/mem3/rebar.config b/src/mem3/rebar.config
new file mode 100644
index 000000000..362c8785e
--- /dev/null
+++ b/src/mem3/rebar.config
@@ -0,0 +1,14 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{cover_enabled, true}.
+{cover_print_enabled, true}.
diff --git a/src/mem3/src/mem3_bdu.erl b/src/mem3/src/mem3_bdu.erl
new file mode 100644
index 000000000..bf84d4470
--- /dev/null
+++ b/src/mem3/src/mem3_bdu.erl
@@ -0,0 +1,112 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(mem3_bdu).
+
+
+-export([
+ before_doc_update/3
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+
+
+-spec before_doc_update(#doc{}, Db::any(), couch_db:update_type()) -> #doc{}.
+before_doc_update(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, _Db, _UpdateType) ->
+ % Skip design docs
+ Doc;
+
+before_doc_update(#doc{deleted = true} = Doc, _Db, _UpdateType) ->
+ % Skip deleted
+ Doc;
+
+before_doc_update(#doc{} = Doc, _Db, replicated_changes) ->
+ % Skip internal replicator updates
+ Doc;
+
+before_doc_update(#doc{} = Doc, _Db, _UpdateType) ->
+ Body1 = couch_util:json_encode(Doc#doc.body),
+ Body2 = couch_util:json_decode(Body1, [return_maps]),
+ validate(Body2),
+ Doc.
+
+
+validate(#{} = Body) ->
+ validate_key(<<"by_node">>, Body, ["by_node is mandatory"]),
+ validate_key(<<"by_range">>, Body, ["by_range is mandatory"]),
+ ByNode = maps:get(<<"by_node">>, Body),
+ case is_map(ByNode) of
+ true -> ok;
+ false -> throw({forbidden, ["by_node not an object"]})
+ end,
+ ByRange = maps:get(<<"by_range">>, Body),
+ case is_map(ByRange) of
+ true -> ok;
+ false -> throw({forbidden, ["by_range not an object"]})
+ end,
+ % "by_node": {
+ % "node1@xxx.xxx.xxx.xxx": ["00000000-1fffffff",...]
+ % ]}
+ maps:map(fun(Node, Ranges) ->
+ validate_by_node(Node, Ranges, ByRange)
+ end, ByNode),
+ % "by_range": {
+ % "00000000-1fffffff": ["node1@xxx.xxx.xxx.xxx", ...]
+ % ]}
+ maps:map(fun(Range, Nodes) ->
+ validate_by_range(Range, Nodes, ByNode)
+ end, ByRange).
+
+
+validate_by_node(Node, Ranges, ByRange) ->
+ validate_array(Ranges, ["by_node", Ranges, "value not an array"]),
+ lists:foreach(fun(Range) ->
+ validate_key(Range, ByRange, ["by_range for", Range, "missing"]),
+ Nodes = maps:get(Range, ByRange),
+ validate_member(Node, Nodes, ["by_range for", Range, "missing", Node])
+ end, Ranges).
+
+
+validate_by_range(Range, Nodes, ByNode) ->
+ validate_array(Nodes, ["by_range", Nodes, "value not an array"]),
+ lists:foreach(fun(Node) ->
+ validate_key(Node, ByNode, ["by_node for", Node, "missing"]),
+ Ranges = maps:get(Node, ByNode),
+ validate_member(Range, Ranges, ["by_node for", Node, "missing", Range])
+ end, Nodes).
+
+
+validate_array(Val, _ErrMsg) when is_list(Val) ->
+ ok;
+validate_array(_Val, ErrMsg) ->
+ throw({forbidden, errmsg(ErrMsg)}).
+
+
+validate_key(Key, #{} = Map, ErrMsg) ->
+ case maps:is_key(Key, Map) of
+ true -> ok;
+ false -> throw({forbidden, errmsg(ErrMsg)})
+ end.
+
+
+validate_member(Val, Array, ErrMsg) when is_list(Array) ->
+ case lists:member(Val, Array) of
+ true -> ok;
+ false -> throw({forbidden, errmsg(ErrMsg)})
+ end;
+validate_member(_Val, _Array, ErrMsg) ->
+ throw({forbidden, errmsg(ErrMsg)}).
+
+
+errmsg(ErrMsg) when is_list(ErrMsg) ->
+ list_to_binary(lists:join(" ", ErrMsg)).
diff --git a/src/mem3/test/eunit/mem3_bdu_test.erl b/src/mem3/test/eunit/mem3_bdu_test.erl
new file mode 100644
index 000000000..056cceda0
--- /dev/null
+++ b/src/mem3/test/eunit/mem3_bdu_test.erl
@@ -0,0 +1,280 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(mem3_bdu_test).
+
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+
+-define(TDEF_FE(Name), fun(Arg) -> {atom_to_list(Name), ?_test(Name(Arg))} end).
+
+-define(USER, "mem3_bdu_test_admin").
+-define(PASS, "pass").
+-define(AUTH, {basic_auth, {?USER, ?PASS}}).
+-define(JSON, {"Content-Type", "application/json"}).
+-define(DBS, "_node/_local/_dbs").
+
+
+setup() ->
+ Hashed = couch_passwords:hash_admin_password(?PASS),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+ Db = ?tempdb(),
+ Port = mochiweb_socket_server:get(chttpd, port),
+ Url = lists:concat(["http://", Addr, ":", Port, "/"]),
+ {Url, Db}.
+
+
+teardown({Url, Db}) ->
+ sync_delete_db(Url, Db),
+ ok = config:delete("admins", ?USER, _Persist=false).
+
+
+start_couch() ->
+ test_util:start_couch([mem3, chttpd]).
+
+
+stop_couch(Ctx) ->
+ test_util:stop_couch(Ctx).
+
+
+mem3_bdu_shard_doc_test_() ->
+ {
+ "mem3 bdu shard doc tests",
+ {
+ setup,
+ fun start_couch/0, fun stop_couch/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ ?TDEF_FE(t_can_insert_shard_map_doc),
+ ?TDEF_FE(t_missing_by_node_section),
+ ?TDEF_FE(t_by_node_not_a_map),
+ ?TDEF_FE(t_missing_by_range_section),
+ ?TDEF_FE(t_by_range_not_a_map),
+ ?TDEF_FE(t_missing_range_in_by_range),
+ ?TDEF_FE(t_missing_node_in_by_range_node_list),
+ ?TDEF_FE(t_missing_node_in_by_node),
+ ?TDEF_FE(t_missing_range_in_by_node_range_list),
+ ?TDEF_FE(t_by_node_val_not_array),
+ ?TDEF_FE(t_by_range_val_not_array),
+ ?TDEF_FE(t_design_docs_are_not_validated),
+ ?TDEF_FE(t_replicated_changes_not_validated)
+ ]
+ }
+ }
+ }.
+
+
+t_can_insert_shard_map_doc({Top, Db}) ->
+ Node = atom_to_binary(node(), utf8),
+ Range = <<"00000000-ffffffff">>,
+ ShardMap = #{
+ <<"_id">> => Db,
+ <<"by_node">> => #{Node => [Range]},
+ <<"by_range">> => #{Range => [Node]},
+ <<"suffix">> => suffix()
+ },
+ {Code, Res} = req(post, Top ++ ?DBS, ShardMap),
+ ?assertEqual(201, Code),
+ ?assertMatch(#{<<"ok">> := true}, Res).
+
+
+t_missing_by_node_section({Top, Db}) ->
+ Node = atom_to_binary(node(), utf8),
+ Range = <<"00000000-ffffffff">>,
+ ShardMap = #{
+ <<"_id">> => Db,
+ <<"by_range">> => #{Range => [Node]}
+ },
+ ?assertMatch({403, _}, req(post, Top ++ ?DBS, ShardMap)).
+
+
+t_by_node_not_a_map({Top, Db}) ->
+ Node = atom_to_binary(node(), utf8),
+ Range = <<"00000000-ffffffff">>,
+ ShardMap = #{
+ <<"_id">> => Db,
+ <<"by_node">> => 42,
+ <<"by_range">> => #{Range => [Node]}
+ },
+ ?assertMatch({403, _}, req(post, Top ++ ?DBS, ShardMap)).
+
+
+t_missing_by_range_section({Top, Db}) ->
+ Node = atom_to_binary(node(), utf8),
+ Range = <<"00000000-ffffffff">>,
+ ShardMap = #{
+ <<"_id">> => Db,
+ <<"by_node">> => #{Node => [Range]}
+ },
+ ?assertMatch({403, _}, req(post, Top ++ ?DBS, ShardMap)).
+
+
+t_by_range_not_a_map({Top, Db}) ->
+ Node = atom_to_binary(node(), utf8),
+ Range = <<"00000000-ffffffff">>,
+ ShardMap = #{
+ <<"_id">> => Db,
+ <<"by_node">> => #{Node => [Range]},
+ <<"by_range">> => 42
+ },
+ ?assertMatch({403, _}, req(post, Top ++ ?DBS, ShardMap)).
+
+
+t_missing_range_in_by_range({Top, Db}) ->
+ Node = atom_to_binary(node(), utf8),
+ Range = <<"00000000-ffffffff">>,
+ ShardMap = #{
+ <<"_id">> => Db,
+ <<"by_node">> => #{Node => [Range]},
+ <<"by_range">> => #{<<"xyz">> => [Node]}
+ },
+ ?assertMatch({403, _}, req(post, Top ++ ?DBS, ShardMap)).
+
+
+t_missing_node_in_by_range_node_list({Top, Db}) ->
+ Node = atom_to_binary(node(), utf8),
+ Range = <<"00000000-ffffffff">>,
+ ShardMap = #{
+ <<"_id">> => Db,
+ <<"by_node">> => #{Node => [Range]},
+ <<"by_range">> => #{Range => [<<"xyz">>]}
+ },
+ ?assertMatch({403, _}, req(post, Top ++ ?DBS, ShardMap)).
+
+
+t_missing_node_in_by_node({Top, Db}) ->
+ Node = atom_to_binary(node(), utf8),
+ Range = <<"00000000-ffffffff">>,
+ ShardMap = #{
+ <<"_id">> => Db,
+ <<"by_node">> => #{<<"xyz">> => [Range]},
+ <<"by_range">> => #{Range => [Node]}
+ },
+ ?assertMatch({403, _}, req(post, Top ++ ?DBS, ShardMap)).
+
+
+t_missing_range_in_by_node_range_list({Top, Db}) ->
+ Node = atom_to_binary(node(), utf8),
+ Range = <<"00000000-ffffffff">>,
+ ShardMap = #{
+ <<"_id">> => Db,
+ <<"by_node">> => #{Node => [<<"xyz">>]},
+ <<"by_range">> => #{Range => [Node]}
+ },
+ ?assertMatch({403, _}, req(post, Top ++ ?DBS, ShardMap)).
+
+
+t_by_node_val_not_array({Top, Db}) ->
+ Node = atom_to_binary(node(), utf8),
+ Range = <<"00000000-ffffffff">>,
+ ShardMap = #{
+ <<"_id">> => Db,
+ <<"by_node">> => #{Node => 42},
+ <<"by_range">> => #{Range => [Node]}
+ },
+ ?assertMatch({403, _}, req(post, Top ++ ?DBS, ShardMap)).
+
+
+t_by_range_val_not_array({Top, Db}) ->
+ Node = atom_to_binary(node(), utf8),
+ Range = <<"00000000-ffffffff">>,
+ ShardMap = #{
+ <<"_id">> => Db,
+ <<"by_node">> => #{Node => [Range]},
+ <<"by_range">> => #{Range => 42}
+ },
+ ?assertMatch({403, _}, req(post, Top ++ ?DBS, ShardMap)).
+
+
+t_design_docs_are_not_validated({Top, _}) ->
+ DDoc = #{<<"_id">> => <<"_design/ddoc_bdu_test">>},
+ {Code, Res} = req(post, Top ++ ?DBS, DDoc),
+ ?assertEqual(201, Code),
+ #{<<"rev">> := Rev} = Res,
+ Deleted = #{
+ <<"id">> => <<"_design/ddoc_bdu_test">>,
+ <<"_rev">> => Rev,
+ <<"_deleted">> => true
+ },
+ ?assertMatch({200, _}, req(post, Top ++ ?DBS, Deleted)).
+
+
+t_replicated_changes_not_validated({Top, Db}) ->
+ Node = atom_to_binary(node(), utf8),
+ Range = <<"00000000-ffffffff">>,
+ ShardMap = #{
+ <<"_id">> => Db,
+ <<"by_node">> => #{Node => [Range]},
+ % missing <<"by_range">>, we can tollerate it
+ % and not crash the backend
+ <<"suffix">> => suffix(),
+ <<"_rev">> => <<"1-abc">>,
+ <<"_revisions">> => #{
+ <<"ids">> => [<<"abc">>],
+ <<"start">> => 1
+ }
+ },
+ Docs = #{
+ <<"docs">> => [ShardMap],
+ <<"new_edits">> => false
+ },
+ {Code, Res} = req(post, Top ++ ?DBS ++ "/_bulk_docs", Docs),
+ ?assertEqual(201, Code),
+ ?assertEqual([], Res),
+ Deleted = #{
+ <<"id">> => Db,
+ <<"_rev">> => <<"1-abc">>,
+ <<"_deleted">> => true
+ },
+ ?assertMatch({200, _}, req(post, Top ++ ?DBS, Deleted)).
+
+
+delete_db(Top, Db) when is_binary(Db) ->
+ Url = Top ++ binary_to_list(Db),
+ case test_request:get(Url, [?AUTH]) of
+ {ok, 404, _, _} ->
+ not_found;
+ {ok, 200, _, _} ->
+ {ok, 200, _, _} = test_request:delete(Url, [?AUTH]),
+ ok
+ end.
+
+
+sync_delete_db(Top, Db) when is_binary(Db) ->
+ delete_db(Top, Db),
+ try
+ Shards = mem3:local_shards(Db),
+ ShardNames = [mem3:name(S) || S <- Shards],
+ [couch_server:delete(N, [?ADMIN_CTX]) || N <- ShardNames],
+ ok
+ catch
+ error:database_does_not_exist ->
+ ok
+ end.
+
+
+req(Method, Url, #{} = Body) ->
+ req(Method, Url, jiffy:encode(Body));
+
+req(Method, Url, Body) ->
+ Headers = [?JSON, ?AUTH],
+ {ok, Code, _, Res} = test_request:request(Method, Url, Headers, Body),
+ {Code, jiffy:decode(Res, [return_maps])}.
+
+
+suffix() ->
+ integer_to_list(erlang:system_time(second)).
diff --git a/src/mem3/test/eunit/mem3_reshard_test.erl b/src/mem3/test/eunit/mem3_reshard_test.erl
index 1122590ae..65f2b4bb0 100644
--- a/src/mem3/test/eunit/mem3_reshard_test.erl
+++ b/src/mem3/test/eunit/mem3_reshard_test.erl
@@ -519,7 +519,6 @@ target_reset_in_initial_copy(#{db1 := Db}) ->
split_an_incomplete_shard_map(#{db1 := Db}) ->
{timeout, ?TIMEOUT, ?_test(begin
- [#shard{} = Src] = lists:sort(mem3:local_shards(Db)),
[#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
meck:expect(mem3_util, calculate_max_n, 1, 0),
?assertMatch({error, {not_enough_shard_copies, _}},
diff --git a/test/elixir/test/basics_test.exs b/test/elixir/test/basics_test.exs
index e6fb20938..abc66ca40 100644
--- a/test/elixir/test/basics_test.exs
+++ b/test/elixir/test/basics_test.exs
@@ -58,6 +58,17 @@ defmodule BasicsTest do
assert context[:db_name] in Couch.get("/_all_dbs").body, "Db name in _all_dbs"
end
+ @tag :with_db
+ test "Limit and skip should work in _all_dbs", context do
+ db = context[:db_name]
+ db_count = length(Couch.get("/_all_dbs").body)
+ assert db_count > 0
+ assert Couch.get("/_all_dbs?limit=0").body == []
+ assert length(Couch.get("/_all_dbs?limit=1").body) >= 1
+ assert length(Couch.get("/_all_dbs?skip=1").body) == (db_count - 1)
+ assert [db] == Couch.get("/_all_dbs?start_key=\"#{db}\"&limit=1").body
+ end
+
test "Database name with '+' should encode to '+'", _context do
set_config({"chttpd", "decode_plus_to_space", "false"})
diff --git a/test/elixir/test/config/suite.elixir b/test/elixir/test/config/suite.elixir
index cfb32f2b7..2e97553ee 100644
--- a/test/elixir/test/config/suite.elixir
+++ b/test/elixir/test/config/suite.elixir
@@ -59,6 +59,7 @@
"Database name with '%2B' should encode to '+'",
"Database name with '+' should encode to '+'",
"Database should be in _all_dbs",
+ "Limit and skip should work in _all_dbs",
"Default headers are returned for doc with open_revs=all",
"Empty database should have zero docs",
"Make sure you can do a seq=true option",
diff --git a/test/elixir/test/replication_test.exs b/test/elixir/test/replication_test.exs
index 12057d75b..b2e30ab95 100644
--- a/test/elixir/test/replication_test.exs
+++ b/test/elixir/test/replication_test.exs
@@ -1753,8 +1753,8 @@ defmodule ReplicationTest do
def cmp_json(lhs, rhs), do: lhs == rhs
def seq_to_shards(seq) do
- for {_node, range, update_seq} <- decode_seq(seq) do
- {range, update_seq}
+ for {_node, range, {seq_num, uuid, epoch}} <- decode_seq(seq) do
+ {range, seq_num}
end
end