summaryrefslogtreecommitdiff
path: root/src/couch_replicator/src/couch_replicator_httpd.erl
diff options
context:
space:
mode:
Diffstat (limited to 'src/couch_replicator/src/couch_replicator_httpd.erl')
-rw-r--r--src/couch_replicator/src/couch_replicator_httpd.erl77
1 files changed, 63 insertions, 14 deletions
diff --git a/src/couch_replicator/src/couch_replicator_httpd.erl b/src/couch_replicator/src/couch_replicator_httpd.erl
index 6b4347267..364d09858 100644
--- a/src/couch_replicator/src/couch_replicator_httpd.erl
+++ b/src/couch_replicator/src/couch_replicator_httpd.erl
@@ -13,6 +13,12 @@
-module(couch_replicator_httpd).
-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+
+-export([
+ handle_req/1,
+ handle_scheduler_req/1
+]).
-import(couch_httpd, [
send_json/2,
@@ -24,13 +30,68 @@
to_binary/1
]).
--export([handle_req/1]).
+
+-define(DEFAULT_TASK_LIMIT, 100).
+-define(REPDB, <<"_replicator">>).
+
+
+handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"jobs">>]}=Req) ->
+ Limit = couch_replicator_httpd_util:parse_int_param(Req, "limit",
+ ?DEFAULT_TASK_LIMIT, 0, infinity),
+ Skip = couch_replicator_httpd_util:parse_int_param(Req, "skip", 0, 0,
+ infinity),
+ {Replies, _BadNodes} = rpc:multicall(couch_replicator_scheduler, jobs, []),
+ Flatlist = lists:concat(Replies),
+ % couch_replicator_scheduler:job_ejson/1 guarantees {id, Id} to be the
+ % the first item in the list
+ Sorted = lists:sort(fun({[{id,A}|_]},{[{id,B}|_]}) -> A =< B end, Flatlist),
+ Total = length(Sorted),
+ Offset = min(Skip, Total),
+ Sublist = lists:sublist(Sorted, Offset+1, Limit),
+ Sublist1 = [couch_replicator_httpd_util:update_db_name(Task)
+ || Task <- Sublist],
+ send_json(Req, {[{total_rows, Total}, {offset, Offset}, {jobs, Sublist1}]});
+handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"jobs">>,JobId]}=Req) ->
+ case couch_replicator:job(JobId) of
+ {ok, JobInfo} ->
+ send_json(Req, couch_replicator_httpd_util:update_db_name(JobInfo));
+ {error, not_found} ->
+ throw(not_found)
+ end;
+handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"docs">>]}=Req) ->
+ VArgs0 = couch_mrview_http:parse_params(Req, undefined),
+ StatesQs = chttpd:qs_value(Req, "states"),
+ States = couch_replicator_httpd_util:parse_replication_state_filter(StatesQs),
+ VArgs1 = VArgs0#mrargs{
+ view_type = map,
+ include_docs = true,
+ reduce = false,
+ extra = [{filter_states, States}]
+ },
+ VArgs2 = couch_mrview_util:validate_args(VArgs1),
+ Opts = [{user_ctx, Req#httpd.user_ctx}],
+ Db = ?REPDB,
+ Max = chttpd:chunked_response_buffer_size(),
+ Acc = couch_replicator_httpd_util:docs_acc_new(Req, Db, Max),
+ Cb = fun couch_replicator_httpd_util:docs_cb/2,
+ {ok, RAcc} = couch_replicator_fabric:docs(Db, Opts, VArgs2, Cb, Acc),
+ {ok, couch_replicator_httpd_util:docs_acc_response(RAcc)};
+handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"docs">>,DocId]}=Req) ->
+ UserCtx = Req#httpd.user_ctx,
+ case couch_replicator:doc(?REPDB, DocId, UserCtx#user_ctx.roles) of
+ {ok, DocInfo} ->
+ send_json(Req, couch_replicator_httpd_util:update_db_name(DocInfo));
+ {error, not_found} ->
+ throw(not_found)
+ end;
+handle_scheduler_req(Req) ->
+ send_method_not_allowed(Req, "GET,HEAD").
handle_req(#httpd{method = 'POST', user_ctx = UserCtx} = Req) ->
couch_httpd:validate_ctype(Req, "application/json"),
RepDoc = {Props} = couch_httpd:json_body_obj(Req),
- validate_rep_props(Props),
+ couch_replicator_httpd_utils:validate_rep_props(Props),
case couch_replicator:replicate(RepDoc, UserCtx) of
{error, {Error, Reason}} ->
send_json(
@@ -51,15 +112,3 @@ handle_req(#httpd{method = 'POST', user_ctx = UserCtx} = Req) ->
handle_req(Req) ->
send_method_not_allowed(Req, "POST").
-
-validate_rep_props([]) ->
- ok;
-validate_rep_props([{<<"query_params">>, {Params}}|Rest]) ->
- lists:foreach(fun
- ({_,V}) when is_binary(V) -> ok;
- ({K,_}) -> throw({bad_request,
- <<K/binary," value must be a string.">>})
- end, Params),
- validate_rep_props(Rest);
-validate_rep_props([_|Rest]) ->
- validate_rep_props(Rest).