summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoriilyak <iilyak@users.noreply.github.com>2020-05-21 05:33:25 -0700
committerGitHub <noreply@github.com>2020-05-21 05:33:25 -0700
commit9e3f47535e2aa9a9416230a0fb335a5a8e0b533e (patch)
treea20b0fd9c4af0790a9bf85b3242cd22e9e3d102d
parent6c7d203413d33e074c009e46a9e8122683e9e826 (diff)
parentfad38281474813f8479c7fb71862555b7f381755 (diff)
downloadcouchdb-9e3f47535e2aa9a9416230a0fb335a5a8e0b533e.tar.gz
Merge pull request #2896 from cloudant/pagination-api-fix-limit
Fix handling of limit query parameter
-rw-r--r--src/chttpd/test/exunit/pagination_test.exs49
-rw-r--r--src/couch_views/src/couch_views_http.erl63
2 files changed, 93 insertions, 19 deletions
diff --git a/src/chttpd/test/exunit/pagination_test.exs b/src/chttpd/test/exunit/pagination_test.exs
index fcb8f9add..140a5dc88 100644
--- a/src/chttpd/test/exunit/pagination_test.exs
+++ b/src/chttpd/test/exunit/pagination_test.exs
@@ -384,6 +384,55 @@ defmodule Couch.Test.Pagination do
assert resp.status_code == 200, "got error #{inspect(resp.body)}"
end
+ test ": _all_docs?page_size=4 should respect limit", ctx do
+ %{session: session, db_name: db_name} = ctx
+
+ resp =
+ Couch.Session.get(session, "/#{db_name}/_all_docs",
+ query: %{page_size: ctx.page_size, limit: ctx.page_size - 2}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert length(resp.body["rows"]) == ctx.page_size - 2
+ assert not Map.has_key?(resp.body, "next")
+
+ resp =
+ Couch.Session.get(session, "/#{db_name}/_all_docs",
+ query: %{page_size: ctx.page_size, limit: ctx.page_size - 1}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert length(resp.body["rows"]) == ctx.page_size - 1
+ assert not Map.has_key?(resp.body, "next")
+
+ resp =
+ Couch.Session.get(session, "/#{db_name}/_all_docs",
+ query: %{page_size: ctx.page_size, limit: ctx.page_size}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert length(resp.body["rows"]) == ctx.page_size
+ assert not Map.has_key?(resp.body, "next")
+
+ resp =
+ Couch.Session.get(session, "/#{db_name}/_all_docs",
+ query: %{page_size: ctx.page_size, limit: ctx.page_size + 1}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert length(resp.body["rows"]) == ctx.page_size
+ assert Map.has_key?(resp.body, "next")
+
+ resp =
+ Couch.Session.get(session, "/#{db_name}/_all_docs",
+ query: %{page_size: ctx.page_size, limit: ctx.page_size + 2}
+ )
+
+ assert resp.status_code == 200, "got error #{inspect(resp.body)}"
+ assert length(resp.body["rows"]) == ctx.page_size
+ assert Map.has_key?(resp.body, "next")
+ end
+
test ": _all_docs/queries should limit number of queries", ctx do
queries = %{
queries: [%{}, %{}, %{}, %{}, %{}]
diff --git a/src/couch_views/src/couch_views_http.erl b/src/couch_views/src/couch_views_http.erl
index ae6725649..b9bc2b3c0 100644
--- a/src/couch_views/src/couch_views_http.erl
+++ b/src/couch_views/src/couch_views_http.erl
@@ -108,7 +108,7 @@ paginated_cb({meta, Meta}, #vacc{}=VAcc) ->
paginated(Req, EtagTerm, #mrargs{page_size = PageSize} = Args, KeyFun, Fun) ->
Etag = couch_httpd:make_etag(EtagTerm),
chttpd:etag_respond(Req, Etag, fun() ->
- hd(do_paginated(PageSize, [set_limit(Args)], KeyFun, Fun))
+ hd(do_paginated(PageSize, [Args], KeyFun, Fun))
end).
@@ -124,10 +124,10 @@ do_paginated(PageSize, QueriesArgs, KeyFun, Fun) when is_list(QueriesArgs) ->
{_N, Results} = lists:foldl(fun(Args0, {Limit, Acc}) ->
case Limit > 0 of
true ->
- Args = set_limit(Args0#mrargs{page_size = Limit}),
+ {OriginalLimit, Args} = set_limit(Args0#mrargs{page_size = Limit}),
{Meta, Items} = Fun(Args),
Result = maybe_add_bookmark(
- PageSize, Args, Meta, Items, KeyFun),
+ OriginalLimit, PageSize, Args, Meta, Items, KeyFun),
#{total_rows := Total} = Result,
{Limit - Total, [Result | Acc]};
false ->
@@ -143,10 +143,9 @@ do_paginated(PageSize, QueriesArgs, KeyFun, Fun) when is_list(QueriesArgs) ->
lists:reverse(Results).
-maybe_add_bookmark(PageSize, Args0, Response, Items, KeyFun) ->
- #mrargs{page_size = Limit} = Args0,
- Args = Args0#mrargs{page_size = PageSize},
- case check_completion(Limit, Items) of
+maybe_add_bookmark(OriginalLimit, PageSize, Args0, Response, Items, KeyFun) ->
+ #mrargs{page_size = RequestedLimit} = Args0,
+ case check_completion(OriginalLimit, RequestedLimit, Items) of
{Rows, nil} ->
maps:merge(Response, #{
rows => Rows,
@@ -157,6 +156,7 @@ maybe_add_bookmark(PageSize, Args0, Response, Items, KeyFun) ->
if is_binary(NextKey) -> ok; true ->
throw("Provided KeyFun should return binary")
end,
+ Args = Args0#mrargs{page_size = PageSize},
Bookmark = bookmark_encode(Args#mrargs{start_key=NextKey}),
maps:merge(Response, #{
rows => Rows,
@@ -168,14 +168,23 @@ maybe_add_bookmark(PageSize, Args0, Response, Items, KeyFun) ->
set_limit(#mrargs{page_size = PageSize, limit = Limit} = Args)
when is_integer(PageSize) andalso Limit > PageSize ->
- Args#mrargs{limit = PageSize + 1};
+ {Limit, Args#mrargs{limit = PageSize + 1}};
set_limit(#mrargs{page_size = PageSize, limit = Limit} = Args)
when is_integer(PageSize) ->
- Args#mrargs{limit = Limit + 1}.
+ {Limit, Args#mrargs{limit = Limit + 1}}.
-check_completion(Limit, Items) when length(Items) > Limit ->
+check_completion(OriginalLimit, RequestedLimit, Items)
+ when is_integer(OriginalLimit) andalso OriginalLimit =< RequestedLimit ->
+ {Rows, _} = split(OriginalLimit, Items),
+ {Rows, nil};
+
+check_completion(_OriginalLimit, RequestedLimit, Items) ->
+ split(RequestedLimit, Items).
+
+
+split(Limit, Items) when length(Items) > Limit ->
case lists:split(Limit, Items) of
{Head, [NextItem | _]} ->
{Head, NextItem};
@@ -183,7 +192,7 @@ check_completion(Limit, Items) when length(Items) > Limit ->
{Head, nil}
end;
-check_completion(_Limit, Items) ->
+split(_Limit, Items) ->
{Items, nil}.
@@ -258,35 +267,51 @@ bookmark_encode_decode_test() ->
check_completion_test() ->
?assertEqual(
{[], nil},
- check_completion(1, [])
+ check_completion(100, 1, [])
),
?assertEqual(
{[1], nil},
- check_completion(1, [1])
+ check_completion(100, 1, [1])
),
?assertEqual(
{[1], 2},
- check_completion(1, [1, 2])
+ check_completion(100, 1, [1, 2])
),
?assertEqual(
{[1], 2},
- check_completion(1, [1, 2, 3])
+ check_completion(100, 1, [1, 2, 3])
),
?assertEqual(
{[1, 2], nil},
- check_completion(3, [1, 2])
+ check_completion(100, 3, [1, 2])
),
?assertEqual(
{[1, 2, 3], nil},
- check_completion(3, [1, 2, 3])
+ check_completion(100, 3, [1, 2, 3])
),
?assertEqual(
{[1, 2, 3], 4},
- check_completion(3, [1, 2, 3, 4])
+ check_completion(100, 3, [1, 2, 3, 4])
),
?assertEqual(
{[1, 2, 3], 4},
- check_completion(3, [1, 2, 3, 4, 5])
+ check_completion(100, 3, [1, 2, 3, 4, 5])
+ ),
+ ?assertEqual(
+ {[1], nil},
+ check_completion(1, 1, [1])
+ ),
+ ?assertEqual(
+ {[1, 2], nil},
+ check_completion(2, 3, [1, 2])
+ ),
+ ?assertEqual(
+ {[1, 2], nil},
+ check_completion(2, 3, [1, 2, 3])
+ ),
+ ?assertEqual(
+ {[1, 2], nil},
+ check_completion(2, 3, [1, 2, 3, 4, 5])
),
ok.
-endif. \ No newline at end of file