summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Vatamaniuc <vatamane@apache.org>2019-10-22 18:33:30 -0400
committerNick Vatamaniuc <vatamane@apache.org>2019-10-24 10:02:04 -0400
commita620626b4307a328c6d471abac7f7a78da8355fd (patch)
treecc4de82fcd63a24a6a69aabb4d62b1ceee552cf0
parentf3d572caf112598d12d705f98dffb8cdbb2bec97 (diff)
downloadcouchdb-chunkify-local-docs.tar.gz
Chunkify local docschunkify-local-docs
Previously local docs were not chunkified and it was possible for replications which checkpointed a few dozen times to create local documents above the 100KB limit. Documents are chunkiefied according to the same scheme as the regular docs -- rev values are in a main `?DB_LOCAL_DOCS` subspace, and doc body chunks in a separate `?DB_LOCAL_DOC_BODIES` subspace that looks like: {?DB_LOCAL_DOC_BODIES, DocId, ChunkId} = BinaryChunk where `ChunkId` is an incrementing integer and BinaryChunk is a 100KB chunk of the term_to_binary of the body. We also go to some lengths to read and silently upgrade docs written with the old encoding. Upgrades happen on doc writes as a first step, to ensure stats update logic is not affected.
-rw-r--r--src/fabric/include/fabric2.hrl1
-rw-r--r--src/fabric/src/fabric2_db.erl14
-rw-r--r--src/fabric/src/fabric2_fdb.erl81
-rw-r--r--src/fabric/test/fabric2_doc_crud_tests.erl107
-rw-r--r--src/fabric/test/fabric2_local_doc_fold_tests.erl304
-rw-r--r--test/elixir/test/basics_test.exs2
6 files changed, 488 insertions, 21 deletions
diff --git a/src/fabric/include/fabric2.hrl b/src/fabric/include/fabric2.hrl
index fe11e6b8d..a5c12aef3 100644
--- a/src/fabric/include/fabric2.hrl
+++ b/src/fabric/include/fabric2.hrl
@@ -47,6 +47,7 @@
-define(DB_LOCAL_DOCS, 22).
-define(DB_ATTS, 23).
-define(DB_VIEWS, 24).
+-define(DB_LOCAL_DOC_BODIES, 25).
% Versions
diff --git a/src/fabric/src/fabric2_db.erl b/src/fabric/src/fabric2_db.erl
index 9ef0bd358..e2674a480 100644
--- a/src/fabric/src/fabric2_db.erl
+++ b/src/fabric/src/fabric2_db.erl
@@ -782,7 +782,14 @@ fold_design_docs(Db, UserFun, UserAcc0, Options1) ->
fold_docs(Db, UserFun, UserAcc0, Options2).
-fold_local_docs(Db, UserFun, UserAcc0, Options) ->
+fold_local_docs(Db, UserFun, UserAcc0, Options0) ->
+ % This is mostly for testing and sanity checking. When calling from a test
+ % namespace will be automatically set. We also assert when called from the
+ % API the correct namespace was set
+ Options = case lists:keyfind(namespace, 1, Options0) of
+ {namespace, <<"_local">>} -> Options0;
+ false -> [{namespace, <<"_local">>} | Options0]
+ end,
fabric2_fdb:transactional(Db, fun(TxDb) ->
try
#{
@@ -796,12 +803,11 @@ fold_local_docs(Db, UserFun, UserAcc0, Options) ->
UserAcc2 = fabric2_fdb:fold_range(TxDb, Prefix, fun({K, V}, Acc) ->
{DocId} = erlfdb_tuple:unpack(K, Prefix),
- LDoc = fabric2_fdb:get_local_doc(TxDb, DocId, V),
- #doc{revs = {Pos, [Rev]}} = LDoc,
+ Rev = fabric2_fdb:get_local_doc_rev(TxDb, DocId, V),
maybe_stop(UserFun({row, [
{id, DocId},
{key, DocId},
- {value, {[{rev, couch_doc:rev_to_str({Pos, Rev})}]}}
+ {value, {[{rev, couch_doc:rev_to_str({0, Rev})}]}}
]}, Acc))
end, UserAcc1, Options),
diff --git a/src/fabric/src/fabric2_fdb.erl b/src/fabric/src/fabric2_fdb.erl
index 2ccde1cb4..891437009 100644
--- a/src/fabric/src/fabric2_fdb.erl
+++ b/src/fabric/src/fabric2_fdb.erl
@@ -46,7 +46,7 @@
get_doc_body_future/3,
get_doc_body_wait/4,
get_local_doc/2,
- get_local_doc/3,
+ get_local_doc_rev/3,
write_doc/6,
write_local_doc/2,
@@ -519,13 +519,29 @@ get_local_doc(#{} = Db0, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId) ->
} = Db = ensure_current(Db0),
Key = erlfdb_tuple:pack({?DB_LOCAL_DOCS, DocId}, DbPrefix),
- Val = erlfdb:wait(erlfdb:get(Tx, Key)),
- fdb_to_local_doc(Db, DocId, Val).
+ Rev = erlfdb:wait(erlfdb:get(Tx, Key)),
+ Prefix = erlfdb_tuple:pack({?DB_LOCAL_DOC_BODIES, DocId}, DbPrefix),
+ Future = erlfdb:get_range_startswith(Tx, Prefix),
+ Chunks = lists:map(fun({_K, V}) -> V end, erlfdb:wait(Future)),
-get_local_doc(#{} = Db, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId, Val)
- when is_binary(Val) orelse Val =:= not_found ->
- fdb_to_local_doc(ensure_current(Db), DocId, Val).
+ fdb_to_local_doc(Db, DocId, Rev, Chunks).
+
+
+% Compatibility clause for an older encoding format using term_to_binary
+get_local_doc_rev(_Db0, <<?LOCAL_DOC_PREFIX, _/binary>> = _DocId,
+ <<131, _/binary>> = Val) ->
+ {Rev, _} = binary_to_term(Val, [safe]),
+ Rev;
+
+get_local_doc_rev(_Db0, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId,
+ <<_/binary>> = Val) ->
+ try binary_to_integer(Val) of
+ IntVal when IntVal >= 0 -> Val;
+ _ -> erlang:error({invalid_local_doc_rev, DocId, Val})
+ catch
+ error:badarg -> erlang:error({invalid_local_doc_rev, DocId, Val})
+ end.
write_doc(#{} = Db0, Doc, NewWinner0, OldWinner, ToUpdate, ToRemove) ->
@@ -647,19 +663,31 @@ write_doc(#{} = Db0, Doc, NewWinner0, OldWinner, ToUpdate, ToRemove) ->
write_local_doc(#{} = Db0, Doc) ->
#{
- tx := Tx
+ tx := Tx,
+ db_prefix := DbPrefix
} = Db = ensure_current(Db0),
- {LDocKey, LDocVal} = local_doc_to_fdb(Db, Doc),
+ Id = Doc#doc.id,
+
+ {LDocKey, LDocVal, Rows} = local_doc_to_fdb(Db, Doc),
WasDeleted = case erlfdb:wait(erlfdb:get(Tx, LDocKey)) of
<<_/binary>> -> false;
not_found -> true
end,
+ BPrefix = erlfdb_tuple:pack({?DB_LOCAL_DOC_BODIES, Id}, DbPrefix),
+
case Doc#doc.deleted of
- true -> erlfdb:clear(Tx, LDocKey);
- false -> erlfdb:set(Tx, LDocKey, LDocVal)
+ true ->
+ erlfdb:clear(Tx, LDocKey),
+ erlfdb:clear_range_startswith(Tx, BPrefix);
+ false ->
+ erlfdb:set(Tx, LDocKey, LDocVal),
+ % Make sure to clear the whole range, in case there was a larger
+ % document body there before.
+ erlfdb:clear_range_startswith(Tx, BPrefix),
+ lists:foreach(fun({K, V}) -> erlfdb:set(Tx, K, V) end, Rows)
end,
case {WasDeleted, Doc#doc.deleted} of
@@ -1066,26 +1094,45 @@ local_doc_to_fdb(Db, #doc{} = Doc) ->
body = Body
} = Doc,
+ Key = erlfdb_tuple:pack({?DB_LOCAL_DOCS, Id}, DbPrefix),
+
StoreRev = case Rev of
_ when is_integer(Rev) -> integer_to_binary(Rev);
_ when is_binary(Rev) -> Rev
end,
- Key = erlfdb_tuple:pack({?DB_LOCAL_DOCS, Id}, DbPrefix),
- Val = {StoreRev, Body},
- {Key, term_to_binary(Val, [{minor_version, 1}])}.
+ BVal = term_to_binary(Body, [{minor_version, 1}]),
+ {Rows, _} = lists:mapfoldl(fun(Chunk, ChunkId) ->
+ K = erlfdb_tuple:pack({?DB_LOCAL_DOC_BODIES, Id, ChunkId}, DbPrefix),
+ {{K, Chunk}, ChunkId + 1}
+ end, 0, chunkify_binary(BVal)),
+
+ {Key, StoreRev, Rows}.
-fdb_to_local_doc(_Db, DocId, Bin) when is_binary(Bin) ->
- {Rev, Body} = binary_to_term(Bin, [safe]),
+fdb_to_local_doc(_Db, DocId, <<131, _/binary>> = Val, []) ->
+ % This is an upgrade clause for the old encoding. We allow reading the old
+ % value and will perform an upgrade of the storage format on an update.
+ {Rev, Body} = binary_to_term(Val, [safe]),
#doc{
id = DocId,
revs = {0, [Rev]},
deleted = false,
body = Body
};
-fdb_to_local_doc(_Db, _DocId, not_found) ->
- {not_found, missing}.
+
+fdb_to_local_doc(_Db, _DocId, not_found, []) ->
+ {not_found, missing};
+
+fdb_to_local_doc(_Db, DocId, Rev, Rows) when is_list(Rows), is_binary(Rev) ->
+ BodyBin = iolist_to_binary(Rows),
+ Body = binary_to_term(BodyBin, [safe]),
+ #doc{
+ id = DocId,
+ revs = {0, [Rev]},
+ deleted = false,
+ body = Body
+ }.
chunkify_binary(Data) ->
diff --git a/src/fabric/test/fabric2_doc_crud_tests.erl b/src/fabric/test/fabric2_doc_crud_tests.erl
index 3cb380827..255efefdc 100644
--- a/src/fabric/test/fabric2_doc_crud_tests.erl
+++ b/src/fabric/test/fabric2_doc_crud_tests.erl
@@ -16,6 +16,7 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("eunit/include/eunit.hrl").
+-include("fabric2.hrl").
doc_crud_test_() ->
@@ -61,6 +62,9 @@ doc_crud_test_() ->
fun recreate_local_doc/1,
fun create_local_doc_bad_rev/1,
fun create_local_doc_random_rev/1,
+ fun create_a_large_local_doc/1,
+ fun create_2_large_local_docs/1,
+ fun local_doc_with_previous_encoding/1,
fun before_doc_update_skips_local_docs/1
]}
}
@@ -765,6 +769,109 @@ create_local_doc_random_rev({Db, _}) ->
?assertEqual(Doc5#doc{revs = {0, [<<"2">>]}}, Doc6).
+create_a_large_local_doc({Db, _}) ->
+ UUID = fabric2_util:uuid(),
+ LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+ Body = << <<"x">> || _ <- lists:seq(1, 300000) >>,
+ Doc1 = #doc{
+ id = LDocId,
+ revs = {0, []},
+ body = Body
+ },
+ ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc1)),
+ {ok, Doc2} = fabric2_db:open_doc(Db, Doc1#doc.id, []),
+ ?assertEqual(Doc1#doc{revs = {0, [<<"1">>]}}, Doc2),
+
+ % Read via fold_local_docs
+ {ok, Result} = fabric2_db:fold_local_docs(Db, fun(Data, Acc) ->
+ case Data of
+ {row, [{id, DocId} | _]} when LDocId =:= DocId ->
+ {ok, [Data | Acc]};
+ _ ->
+ {ok, Acc}
+ end
+ end, [], []),
+ ?assertEqual([{row, [
+ {id, LDocId},
+ {key, LDocId},
+ {value, {[{rev, <<"0-1">>}]}}
+ ]}], Result).
+
+
+create_2_large_local_docs({Db, _}) ->
+ % Create a large doc then overwrite with a smaller one. The reason is to
+ % ensure the previous one correctly clears its range before writting the
+ % new smaller one it its place.
+ UUID = fabric2_util:uuid(),
+ LDocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+ Body1 = << <<"x">> || _ <- lists:seq(1, 400000) >>,
+ Body2 = << <<"y">> || _ <- lists:seq(1, 150000) >>,
+
+ Doc1 = #doc{
+ id = LDocId,
+ revs = {0, []},
+ body = Body1
+ },
+
+ ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc1)),
+
+ Doc2 = Doc1#doc{body = Body2},
+ ?assertEqual({ok, {0, <<"1">>}}, fabric2_db:update_doc(Db, Doc2)),
+
+ {ok, Doc3} = fabric2_db:open_doc(Db, LDocId, []),
+ ?assertEqual(Doc2#doc{revs = {0, [<<"1">>]}}, Doc3).
+
+
+local_doc_with_previous_encoding({Db, _}) ->
+ #{db_prefix := DbPrefix} = Db,
+
+ Id = <<"_local/old_doc">>,
+ Body = {[{<<"x">>, 5}]},
+ Rev = <<"1">>,
+ Key = erlfdb_tuple:pack({?DB_LOCAL_DOCS, Id}, DbPrefix),
+
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
+ #{tx := Tx} = TxDb,
+ Term = term_to_binary({Rev, Body}, [{minor_version, 1}]),
+ ok = erlfdb:set(Tx, Key, Term)
+ end),
+
+ % Read old doc
+ {ok, Doc1} = fabric2_db:open_doc(Db, Id, []),
+ ?assertEqual({0, [<<"1">>]}, Doc1#doc.revs),
+ ?assertEqual({[{<<"x">>, 5}]}, Doc1#doc.body),
+
+ % Read via fold_local_docs.
+ {ok, Result} = fabric2_db:fold_local_docs(Db, fun(Data, Acc) ->
+ case Data of
+ {row, [{id, DocId} | _]} when Id =:= DocId ->
+ {ok, [Data | Acc]};
+ _ ->
+ {ok, Acc}
+ end
+ end, [], []),
+ ?assertEqual([{row, [
+ {id, Id},
+ {key, Id},
+ {value, {[{rev, <<"0-1">>}]}}
+ ]}], Result),
+
+ % Update doc
+ NewBody = {[{<<"y">>, 6}]},
+ Doc2 = Doc1#doc{body = NewBody},
+ ?assertEqual({ok, {0, <<"2">>}}, fabric2_db:update_doc(Db, Doc2)),
+ {ok, Doc3} = fabric2_db:open_doc(Db, Doc2#doc.id, []),
+ ?assertEqual({0, [<<"2">>]}, Doc3#doc.revs),
+ ?assertEqual(NewBody, Doc3#doc.body),
+
+ % Old doc now has only the rev number in it
+ OldDocBin = fabric2_fdb:transactional(Db, fun(TxDb) ->
+ #{tx := Tx} = TxDb,
+ erlfdb:wait(erlfdb:get(Tx, Key))
+ end),
+ ?assertEqual(<<"2">> , OldDocBin).
+
+
before_doc_update_skips_local_docs({Db0, _}) ->
BduFun = fun(Doc, _, _) ->
diff --git a/src/fabric/test/fabric2_local_doc_fold_tests.erl b/src/fabric/test/fabric2_local_doc_fold_tests.erl
new file mode 100644
index 000000000..82203b433
--- /dev/null
+++ b/src/fabric/test/fabric2_local_doc_fold_tests.erl
@@ -0,0 +1,304 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric2_local_doc_fold_tests).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+
+-define(DOC_COUNT, 50).
+
+%% eunit implementation of {with, Tests} doesn't detect test name correctly
+with(Tests) ->
+ fun(ArgsTuple) ->
+ [{Name, ?_test(Fun(ArgsTuple))} || {Name, Fun} <- Tests]
+ ++
+ [{Name, {timeout, Timeout, ?_test(Fun(ArgsTuple))}} || {Name, Timeout, Fun} <- Tests]
+ end.
+
+-define(NAMED(A), {atom_to_list(A), fun A/1}).
+-define(WITH_TIMEOUT(Timeout, A), {atom_to_list(A), Timeout, fun A/1}).
+
+doc_fold_test_() ->
+ {
+ "Test local document fold operations",
+ {
+ setup,
+ fun setup/0,
+ fun cleanup/1,
+ with([
+ ?NAMED(fold_local_docs_basic),
+ ?NAMED(fold_local_docs_rev),
+ ?NAMED(fold_local_docs_with_start_key),
+ ?NAMED(fold_local_docs_with_end_key),
+ ?NAMED(fold_local_docs_with_both_keys_the_same),
+ ?WITH_TIMEOUT(15000, fold_local_docs_with_different_keys),
+ ?NAMED(fold_local_docs_with_limit),
+ ?NAMED(fold_local_docs_with_skip),
+ ?NAMED(fold_local_docs_with_skip_and_limit)
+ ])
+ }
+ }.
+
+
+setup() ->
+ Ctx = test_util:start_couch([fabric]),
+ {ok, Db} = fabric2_db:create(?tempdb(), [{user_ctx, ?ADMIN_USER}]),
+ DocIdRevs = lists:map(fun(Val) ->
+ UUID = fabric2_util:uuid(),
+ DocId = <<?LOCAL_DOC_PREFIX, UUID/binary>>,
+ % Every 10th doc is large to force the doc to be chunkified
+ BigChunk = << <<"x">> || _ <- lists:seq(1, 200000) >>,
+ Body = case Val rem 10 == 0 of
+ true -> {[{<<"value">>, BigChunk}]};
+ false -> {[{<<"value">>, Val}]}
+ end,
+ Doc = #doc{
+ id = DocId,
+ body = Body
+ },
+ {ok, Rev} = fabric2_db:update_doc(Db, Doc, []),
+ {DocId, {[{rev, couch_doc:rev_to_str(Rev)}]}}
+ end, lists:seq(1, ?DOC_COUNT)),
+ {Db, lists:sort(DocIdRevs), Ctx}.
+
+
+cleanup({Db, _DocIdRevs, Ctx}) ->
+ ok = fabric2_db:delete(fabric2_db:name(Db), []),
+ test_util:stop_couch(Ctx).
+
+
+fold_local_docs_basic({Db, DocIdRevs, _}) ->
+ {ok, {?DOC_COUNT, Rows}} = fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], []),
+ ?assertEqual(DocIdRevs, lists:reverse(Rows)).
+
+
+fold_local_docs_rev({Db, DocIdRevs, _}) ->
+ Opts = [{dir, rev}],
+ {ok, {?DOC_COUNT, Rows}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts),
+ ?assertEqual(DocIdRevs, Rows).
+
+
+fold_local_docs_with_start_key({Db, DocIdRevs, _}) ->
+ {StartKey, _} = hd(DocIdRevs),
+ Opts = [{start_key, StartKey}],
+ {ok, {?DOC_COUNT, Rows}}
+ = fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts),
+ ?assertEqual(DocIdRevs, lists:reverse(Rows)),
+ if length(DocIdRevs) == 1 -> ok; true ->
+ fold_local_docs_with_start_key({Db, tl(DocIdRevs), nil})
+ end.
+
+
+fold_local_docs_with_end_key({Db, DocIdRevs, _}) ->
+ RevDocIdRevs = lists:reverse(DocIdRevs),
+ {EndKey, _} = hd(RevDocIdRevs),
+ Opts = [{end_key, EndKey}],
+ {ok, {?DOC_COUNT, Rows}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts),
+ ?assertEqual(RevDocIdRevs, Rows),
+ if length(DocIdRevs) == 1 -> ok; true ->
+ fold_local_docs_with_end_key({Db, lists:reverse(tl(RevDocIdRevs)), nil})
+ end.
+
+
+fold_local_docs_with_both_keys_the_same({Db, DocIdRevs, _}) ->
+ lists:foreach(fun({DocId, _} = Row) ->
+ check_all_combos(Db, DocId, DocId, [Row])
+ end, DocIdRevs).
+
+
+fold_local_docs_with_different_keys({Db, DocIdRevs, _}) ->
+ lists:foreach(fun(_) ->
+ {StartKey, EndKey, Rows} = pick_range(DocIdRevs),
+ check_all_combos(Db, StartKey, EndKey, Rows)
+ end, lists:seq(1, 100)).
+
+
+fold_local_docs_with_limit({Db, DocIdRevs, _}) ->
+ lists:foreach(fun(Limit) ->
+ Opts1 = [{limit, Limit}],
+ {ok, {?DOC_COUNT, Rows1}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts1),
+ ?assertEqual(lists:sublist(DocIdRevs, Limit), lists:reverse(Rows1)),
+
+ Opts2 = [{dir, rev} | Opts1],
+ {ok, {?DOC_COUNT, Rows2}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts2),
+ ?assertEqual(
+ lists:sublist(lists:reverse(DocIdRevs), Limit),
+ lists:reverse(Rows2)
+ )
+ end, lists:seq(0, 51)).
+
+
+fold_local_docs_with_skip({Db, DocIdRevs, _}) ->
+ lists:foreach(fun(Skip) ->
+ Opts1 = [{skip, Skip}],
+ {ok, {?DOC_COUNT, Rows1}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts1),
+ Expect1 = case Skip > length(DocIdRevs) of
+ true -> [];
+ false -> lists:nthtail(Skip, DocIdRevs)
+ end,
+ ?assertEqual(Expect1, lists:reverse(Rows1)),
+
+ Opts2 = [{dir, rev} | Opts1],
+ {ok, {?DOC_COUNT, Rows2}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts2),
+ Expect2 = case Skip > length(DocIdRevs) of
+ true -> [];
+ false -> lists:nthtail(Skip, lists:reverse(DocIdRevs))
+ end,
+ ?assertEqual(Expect2, lists:reverse(Rows2))
+ end, lists:seq(0, 51)).
+
+
+fold_local_docs_with_skip_and_limit({Db, DocIdRevs, _}) ->
+ lists:foreach(fun(_) ->
+ check_skip_and_limit(Db, [], DocIdRevs),
+ check_skip_and_limit(Db, [{dir, rev}], lists:reverse(DocIdRevs))
+ end, lists:seq(1, 100)).
+
+
+check_all_combos(Db, StartKey, EndKey, Rows) ->
+ Opts1 = make_opts(fwd, StartKey, EndKey, true),
+ {ok, {?DOC_COUNT, Rows1}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts1),
+ ?assertEqual(lists:reverse(Rows), Rows1),
+ check_skip_and_limit(Db, Opts1, Rows),
+
+ Opts2 = make_opts(fwd, StartKey, EndKey, false),
+ {ok, {?DOC_COUNT, Rows2}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts2),
+ Expect2 = if EndKey == undefined -> lists:reverse(Rows); true ->
+ lists:reverse(all_but_last(Rows))
+ end,
+ ?assertEqual(Expect2, Rows2),
+ check_skip_and_limit(Db, Opts2, lists:reverse(Expect2)),
+
+ Opts3 = make_opts(rev, StartKey, EndKey, true),
+ {ok, {?DOC_COUNT, Rows3}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts3),
+ ?assertEqual(Rows, Rows3),
+ check_skip_and_limit(Db, Opts3, lists:reverse(Rows)),
+
+ Opts4 = make_opts(rev, StartKey, EndKey, false),
+ {ok, {?DOC_COUNT, Rows4}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], Opts4),
+ Expect4 = if StartKey == undefined -> Rows; true ->
+ tl(Rows)
+ end,
+ ?assertEqual(Expect4, Rows4),
+ check_skip_and_limit(Db, Opts4, lists:reverse(Expect4)).
+
+
+check_skip_and_limit(Db, Opts, []) ->
+ Skip = rand:uniform(?DOC_COUNT + 1) - 1,
+ Limit = rand:uniform(?DOC_COUNT + 1) - 1,
+ NewOpts = [{skip, Skip}, {limit, Limit} | Opts],
+ {ok, {?DOC_COUNT, OutRows}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], NewOpts),
+ ?assertEqual([], OutRows);
+
+check_skip_and_limit(Db, Opts, Rows) ->
+ Skip = rand:uniform(length(Rows) + 1) - 1,
+ Limit = rand:uniform(?DOC_COUNT + 1 - Skip) - 1,
+
+ ExpectRows = case Skip >= length(Rows) of
+ true ->
+ [];
+ false ->
+ lists:sublist(lists:nthtail(Skip, Rows), Limit)
+ end,
+
+ SkipLimitOpts = [{skip, Skip}, {limit, Limit} | Opts],
+ {ok, {?DOC_COUNT, RevRows}} =
+ fabric2_db:fold_local_docs(Db, fun fold_fun/2, [], SkipLimitOpts),
+ OutRows = lists:reverse(RevRows),
+ ?assertEqual(ExpectRows, OutRows).
+
+
+make_opts(fwd, StartKey, EndKey, InclusiveEnd) ->
+ DirOpts = case rand:uniform() =< 0.50 of
+ true -> [{dir, fwd}];
+ false -> []
+ end,
+ StartOpts = case StartKey of
+ undefined -> [];
+ <<_/binary>> -> [{start_key, StartKey}]
+ end,
+ EndOpts = case EndKey of
+ undefined -> [];
+ <<_/binary>> when InclusiveEnd -> [{end_key, EndKey}];
+ <<_/binary>> -> [{end_key_gt, EndKey}]
+ end,
+ DirOpts ++ StartOpts ++ EndOpts;
+make_opts(rev, StartKey, EndKey, InclusiveEnd) ->
+ BaseOpts = make_opts(fwd, EndKey, StartKey, InclusiveEnd),
+ [{dir, rev}] ++ BaseOpts -- [{dir, fwd}].
+
+
+all_but_last([]) ->
+ [];
+all_but_last([_]) ->
+ [];
+all_but_last(Rows) ->
+ lists:sublist(Rows, length(Rows) - 1).
+
+
+pick_range(DocIdRevs) ->
+ {StartKey, StartRow, RestRows} = pick_start_key(DocIdRevs),
+ {EndKey, EndRow, RowsBetween} = pick_end_key(RestRows),
+ {StartKey, EndKey, StartRow ++ RowsBetween ++ EndRow}.
+
+
+pick_start_key(Rows) ->
+ case rand:uniform() =< 0.1 of
+ true ->
+ {undefined, [], Rows};
+ false ->
+ Idx = rand:uniform(length(Rows)),
+ {DocId, _} = Row = lists:nth(Idx, Rows),
+ {DocId, [Row], lists:nthtail(Idx, Rows)}
+ end.
+
+
+pick_end_key([]) ->
+ {undefined, [], []};
+
+pick_end_key(Rows) ->
+ case rand:uniform() =< 0.1 of
+ true ->
+ {undefined, [], Rows};
+ false ->
+ Idx = rand:uniform(length(Rows)),
+ {DocId, _} = Row = lists:nth(Idx, Rows),
+ Tail = lists:nthtail(Idx, Rows),
+ {DocId, [Row], Rows -- [Row | Tail]}
+ end.
+
+
+fold_fun({meta, Meta}, _Acc) ->
+ Total = fabric2_util:get_value(total, Meta),
+ {ok, {Total, []}};
+fold_fun({row, Row}, {Total, Rows}) ->
+ RowId = fabric2_util:get_value(id, Row),
+ RowId = fabric2_util:get_value(key, Row),
+ RowRev = fabric2_util:get_value(value, Row),
+ {ok, {Total, [{RowId, RowRev} | Rows]}};
+fold_fun(complete, Acc) ->
+ {ok, Acc}.
diff --git a/test/elixir/test/basics_test.exs b/test/elixir/test/basics_test.exs
index a14035d58..dda2a0c15 100644
--- a/test/elixir/test/basics_test.exs
+++ b/test/elixir/test/basics_test.exs
@@ -454,6 +454,8 @@ defmodule BasicsTest do
assert Map.has_key?(val, "rev")
# Add _local/doc5
+ # Use a body > 100Kb to tests local docs chunkifier
+ body = %{:b => String.duplicate("b", 110_000)}
assert Couch.put("/#{db_name}/_local/doc5", body: body).body["ok"]
resp = Couch.get("/#{db_name}/_local_docs")
assert resp.status_code == 200