summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorILYA Khlopotov <iilyak@apache.org>2020-05-20 12:50:46 -0700
committerILYA Khlopotov <iilyak@apache.org>2020-05-20 13:02:36 -0700
commitfad38281474813f8479c7fb71862555b7f381755 (patch)
treeb5253f807e259fb5df27cc6e4404e3ef6def4b7d
parent6f2417e1af712b3720cf6c07713d7751cbc9fbef (diff)
downloadcouchdb-fad38281474813f8479c7fb71862555b7f381755.tar.gz
Fix handling of limit query parameter
-rw-r--r--src/chttpd/test/exunit/pagination_test.exs49
-rw-r--r--src/couch_views/src/couch_views_http.erl63
2 files changed, 93 insertions, 19 deletions
diff --git a/src/chttpd/test/exunit/pagination_test.exs b/src/chttpd/test/exunit/pagination_test.exs
index fcb8f9add..140a5dc88 100644
--- a/src/chttpd/test/exunit/pagination_test.exs
+++ b/src/chttpd/test/exunit/pagination_test.exs
@@ -384,6 +384,55 @@ defmodule Couch.Test.Pagination do
assert resp.status_code == 200, "got error #{inspect(resp.body)}"
end
+ test ": _all_docs?page_size=4 should respect limit", ctx do
+ %{session: session, db_name: db_name} = ctx
+
+ resp =
+ Couch.Session.get(session, "/#{db_name}/_all_docs",
+ query: %{page_size: ctx.page_size, limit: ctx.page_size - 2}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert length(resp.body["rows"]) == ctx.page_size - 2
+ assert not Map.has_key?(resp.body, "next")
+
+ resp =
+ Couch.Session.get(session, "/#{db_name}/_all_docs",
+ query: %{page_size: ctx.page_size, limit: ctx.page_size - 1}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert length(resp.body["rows"]) == ctx.page_size - 1
+ assert not Map.has_key?(resp.body, "next")
+
+ resp =
+ Couch.Session.get(session, "/#{db_name}/_all_docs",
+ query: %{page_size: ctx.page_size, limit: ctx.page_size}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert length(resp.body["rows"]) == ctx.page_size
+ assert not Map.has_key?(resp.body, "next")
+
+ resp =
+ Couch.Session.get(session, "/#{db_name}/_all_docs",
+ query: %{page_size: ctx.page_size, limit: ctx.page_size + 1}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert length(resp.body["rows"]) == ctx.page_size
+ assert Map.has_key?(resp.body, "next")
+
+ resp =
+ Couch.Session.get(session, "/#{db_name}/_all_docs",
+ query: %{page_size: ctx.page_size, limit: ctx.page_size + 2}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert length(resp.body["rows"]) == ctx.page_size
+ assert Map.has_key?(resp.body, "next")
+ end
+
test ": _all_docs/queries should limit number of queries", ctx do
queries = %{
queries: [%{}, %{}, %{}, %{}, %{}]
diff --git a/src/couch_views/src/couch_views_http.erl b/src/couch_views/src/couch_views_http.erl
index ae6725649..b9bc2b3c0 100644
--- a/src/couch_views/src/couch_views_http.erl
+++ b/src/couch_views/src/couch_views_http.erl
@@ -108,7 +108,7 @@ paginated_cb({meta, Meta}, #vacc{}=VAcc) ->
paginated(Req, EtagTerm, #mrargs{page_size = PageSize} = Args, KeyFun, Fun) ->
Etag = couch_httpd:make_etag(EtagTerm),
chttpd:etag_respond(Req, Etag, fun() ->
- hd(do_paginated(PageSize, [set_limit(Args)], KeyFun, Fun))
+ hd(do_paginated(PageSize, [Args], KeyFun, Fun))
end).
@@ -124,10 +124,10 @@ do_paginated(PageSize, QueriesArgs, KeyFun, Fun) when is_list(QueriesArgs) ->
{_N, Results} = lists:foldl(fun(Args0, {Limit, Acc}) ->
case Limit > 0 of
true ->
- Args = set_limit(Args0#mrargs{page_size = Limit}),
+ {OriginalLimit, Args} = set_limit(Args0#mrargs{page_size = Limit}),
{Meta, Items} = Fun(Args),
Result = maybe_add_bookmark(
- PageSize, Args, Meta, Items, KeyFun),
+ OriginalLimit, PageSize, Args, Meta, Items, KeyFun),
#{total_rows := Total} = Result,
{Limit - Total, [Result | Acc]};
false ->
@@ -143,10 +143,9 @@ do_paginated(PageSize, QueriesArgs, KeyFun, Fun) when is_list(QueriesArgs) ->
lists:reverse(Results).
-maybe_add_bookmark(PageSize, Args0, Response, Items, KeyFun) ->
- #mrargs{page_size = Limit} = Args0,
- Args = Args0#mrargs{page_size = PageSize},
- case check_completion(Limit, Items) of
+maybe_add_bookmark(OriginalLimit, PageSize, Args0, Response, Items, KeyFun) ->
+ #mrargs{page_size = RequestedLimit} = Args0,
+ case check_completion(OriginalLimit, RequestedLimit, Items) of
{Rows, nil} ->
maps:merge(Response, #{
rows => Rows,
@@ -157,6 +156,7 @@ maybe_add_bookmark(PageSize, Args0, Response, Items, KeyFun) ->
if is_binary(NextKey) -> ok; true ->
throw("Provided KeyFun should return binary")
end,
+ Args = Args0#mrargs{page_size = PageSize},
Bookmark = bookmark_encode(Args#mrargs{start_key=NextKey}),
maps:merge(Response, #{
rows => Rows,
@@ -168,14 +168,23 @@ maybe_add_bookmark(PageSize, Args0, Response, Items, KeyFun) ->
set_limit(#mrargs{page_size = PageSize, limit = Limit} = Args)
when is_integer(PageSize) andalso Limit > PageSize ->
- Args#mrargs{limit = PageSize + 1};
+ {Limit, Args#mrargs{limit = PageSize + 1}};
set_limit(#mrargs{page_size = PageSize, limit = Limit} = Args)
when is_integer(PageSize) ->
- Args#mrargs{limit = Limit + 1}.
+ {Limit, Args#mrargs{limit = Limit + 1}}.
-check_completion(Limit, Items) when length(Items) > Limit ->
+check_completion(OriginalLimit, RequestedLimit, Items)
+ when is_integer(OriginalLimit) andalso OriginalLimit =< RequestedLimit ->
+ {Rows, _} = split(OriginalLimit, Items),
+ {Rows, nil};
+
+check_completion(_OriginalLimit, RequestedLimit, Items) ->
+ split(RequestedLimit, Items).
+
+
+split(Limit, Items) when length(Items) > Limit ->
case lists:split(Limit, Items) of
{Head, [NextItem | _]} ->
{Head, NextItem};
@@ -183,7 +192,7 @@ check_completion(Limit, Items) when length(Items) > Limit ->
{Head, nil}
end;
-check_completion(_Limit, Items) ->
+split(_Limit, Items) ->
{Items, nil}.
@@ -258,35 +267,51 @@ bookmark_encode_decode_test() ->
check_completion_test() ->
?assertEqual(
{[], nil},
- check_completion(1, [])
+ check_completion(100, 1, [])
),
?assertEqual(
{[1], nil},
- check_completion(1, [1])
+ check_completion(100, 1, [1])
),
?assertEqual(
{[1], 2},
- check_completion(1, [1, 2])
+ check_completion(100, 1, [1, 2])
),
?assertEqual(
{[1], 2},
- check_completion(1, [1, 2, 3])
+ check_completion(100, 1, [1, 2, 3])
),
?assertEqual(
{[1, 2], nil},
- check_completion(3, [1, 2])
+ check_completion(100, 3, [1, 2])
),
?assertEqual(
{[1, 2, 3], nil},
- check_completion(3, [1, 2, 3])
+ check_completion(100, 3, [1, 2, 3])
),
?assertEqual(
{[1, 2, 3], 4},
- check_completion(3, [1, 2, 3, 4])
+ check_completion(100, 3, [1, 2, 3, 4])
),
?assertEqual(
{[1, 2, 3], 4},
- check_completion(3, [1, 2, 3, 4, 5])
+ check_completion(100, 3, [1, 2, 3, 4, 5])
+ ),
+ ?assertEqual(
+ {[1], nil},
+ check_completion(1, 1, [1])
+ ),
+ ?assertEqual(
+ {[1, 2], nil},
+ check_completion(2, 3, [1, 2])
+ ),
+ ?assertEqual(
+ {[1, 2], nil},
+ check_completion(2, 3, [1, 2, 3])
+ ),
+ ?assertEqual(
+ {[1, 2], nil},
+ check_completion(2, 3, [1, 2, 3, 4, 5])
),
ok.
-endif. \ No newline at end of file