summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobert Newson <rnewson@apache.org>2019-12-02 19:15:49 +0000
committerGitHub <noreply@github.com>2019-12-02 19:15:49 +0000
commit08a9a5ffe6b2dcfeb480c4cdb15d60ececea8442 (patch)
treefc75d5c9ed034ca9ed4ed9db43d04fbe2c65c4c5
parent2d734f38096bd72cd4c6bed3059453799d1a521f (diff)
parentba0c203860678965a341af361138039eca10b7f3 (diff)
downloadcouchdb-08a9a5ffe6b2dcfeb480c4cdb15d60ececea8442.tar.gz
Merge pull request #2336 from apache/1523-bye-bye-5986-rnewson-4
1523 bye bye 5986
-rw-r--r--.travis.yml1
-rw-r--r--rel/overlay/etc/local.ini1
-rw-r--r--src/chttpd/src/chttpd.erl2
-rw-r--r--src/chttpd/src/chttpd_httpd_handlers.erl2
-rw-r--r--src/chttpd/src/chttpd_misc.erl224
-rw-r--r--src/chttpd/src/chttpd_node.erl272
-rw-r--r--src/couch/src/couch.app.src3
-rw-r--r--src/couch/src/couch_httpd.erl99
-rw-r--r--src/couch/src/couch_secondary_sup.erl13
-rw-r--r--src/couch/test/eunit/chttpd_endpoints_tests.erl2
-rw-r--r--test/javascript/tests/proxyauth.js2
11 files changed, 356 insertions, 265 deletions
diff --git a/.travis.yml b/.travis.yml
index f28dcf77b..0e1a54a0e 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -56,6 +56,7 @@ before_script:
- ./configure -c --disable-docs --disable-fauxton
- python3.6 -m venv /tmp/.venv
- source /tmp/.venv/bin/activate
+ - pip install requests
script:
- make check
diff --git a/rel/overlay/etc/local.ini b/rel/overlay/etc/local.ini
index ecc97f466..ac51b7b56 100644
--- a/rel/overlay/etc/local.ini
+++ b/rel/overlay/etc/local.ini
@@ -30,6 +30,7 @@
;socket_options = [{sndbuf, 262144}, {nodelay, true}]
[httpd]
+enable = true ; for the test suites
; NOTE that this only configures the "backend" node-local port, not the
; "frontend" clustered port. You probably don't want to change anything in
; this section.
diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl
index 87fb34158..adde0730f 100644
--- a/src/chttpd/src/chttpd.erl
+++ b/src/chttpd/src/chttpd.erl
@@ -742,6 +742,8 @@ start_chunked_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers0) ->
end,
{ok, Resp}.
+send_chunk({remote, _Pid, _Ref} = Resp, Data) ->
+ couch_httpd:send_chunk(Resp, Data);
send_chunk(Resp, Data) ->
Resp:write_chunk(Data),
{ok, Resp}.
diff --git a/src/chttpd/src/chttpd_httpd_handlers.erl b/src/chttpd/src/chttpd_httpd_handlers.erl
index 000f29b2f..5e86ea87d 100644
--- a/src/chttpd/src/chttpd_httpd_handlers.erl
+++ b/src/chttpd/src/chttpd_httpd_handlers.erl
@@ -21,7 +21,7 @@ url_handler(<<"_all_dbs">>) -> fun chttpd_misc:handle_all_dbs_req/1;
url_handler(<<"_dbs_info">>) -> fun chttpd_misc:handle_dbs_info_req/1;
url_handler(<<"_active_tasks">>) -> fun chttpd_misc:handle_task_status_req/1;
url_handler(<<"_scheduler">>) -> fun couch_replicator_httpd:handle_scheduler_req/1;
-url_handler(<<"_node">>) -> fun chttpd_misc:handle_node_req/1;
+url_handler(<<"_node">>) -> fun chttpd_node:handle_node_req/1;
url_handler(<<"_reload_query_servers">>) -> fun chttpd_misc:handle_reload_query_servers_req/1;
url_handler(<<"_replicate">>) -> fun chttpd_misc:handle_replicate_req/1;
url_handler(<<"_uuids">>) -> fun chttpd_misc:handle_uuids_req/1;
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index 17122bf85..ffb5295b5 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -15,20 +15,17 @@
-export([
handle_all_dbs_req/1,
handle_dbs_info_req/1,
- handle_node_req/1,
handle_favicon_req/1,
handle_favicon_req/2,
handle_replicate_req/1,
handle_reload_query_servers_req/1,
- handle_system_req/1,
handle_task_status_req/1,
handle_up_req/1,
handle_utils_dir_req/1,
handle_utils_dir_req/2,
handle_uuids_req/1,
handle_welcome_req/1,
- handle_welcome_req/2,
- get_stats/0
+ handle_welcome_req/2
]).
-include_lib("couch/include/couch_db.hrl").
@@ -275,218 +272,6 @@ handle_uuids_req(Req) ->
couch_httpd_misc_handlers:handle_uuids_req(Req).
-% Node-specific request handler (_config and _stats)
-% Support _local meaning this node
-handle_node_req(#httpd{path_parts=[_, <<"_local">>]}=Req) ->
- send_json(Req, 200, {[{name, node()}]});
-handle_node_req(#httpd{path_parts=[A, <<"_local">>|Rest]}=Req) ->
- handle_node_req(Req#httpd{path_parts=[A, node()] ++ Rest});
-% GET /_node/$node/_config
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>]}=Req) ->
- Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) ->
- case dict:is_key(Section, Acc) of
- true ->
- dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
- false ->
- dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
- end
- end, dict:new(), call_node(Node, config, all, [])),
- KVs = dict:fold(fun(Section, Values, Acc) ->
- [{list_to_binary(Section), {Values}} | Acc]
- end, [], Grouped),
- send_json(Req, 200, {KVs});
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>]}=Req) ->
- send_method_not_allowed(Req, "GET");
-% GET /_node/$node/_config/Section
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>, Section]}=Req) ->
- KVs = [{list_to_binary(Key), list_to_binary(Value)}
- || {Key, Value} <- call_node(Node, config, get, [Section])],
- send_json(Req, 200, {KVs});
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section]}=Req) ->
- send_method_not_allowed(Req, "GET");
-% PUT /_node/$node/_config/Section/Key
-% "value"
-handle_node_req(#httpd{method='PUT', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
- couch_util:check_config_blacklist(Section),
- Value = couch_util:trim(chttpd:json_body(Req)),
- Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
- OldValue = call_node(Node, config, get, [Section, Key, ""]),
- case call_node(Node, config, set, [Section, Key, ?b2l(Value), Persist]) of
- ok ->
- send_json(Req, 200, list_to_binary(OldValue));
- {error, Reason} ->
- chttpd:send_error(Req, {bad_request, Reason})
- end;
-% GET /_node/$node/_config/Section/Key
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
- case call_node(Node, config, get, [Section, Key, undefined]) of
- undefined ->
- throw({not_found, unknown_config_value});
- Value ->
- send_json(Req, 200, list_to_binary(Value))
- end;
-% DELETE /_node/$node/_config/Section/Key
-handle_node_req(#httpd{method='DELETE',path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
- couch_util:check_config_blacklist(Section),
- Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
- case call_node(Node, config, get, [Section, Key, undefined]) of
- undefined ->
- throw({not_found, unknown_config_value});
- OldValue ->
- case call_node(Node, config, delete, [Section, Key, Persist]) of
- ok ->
- send_json(Req, 200, list_to_binary(OldValue));
- {error, Reason} ->
- chttpd:send_error(Req, {bad_request, Reason})
- end
- end;
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section, _Key]}=Req) ->
- send_method_not_allowed(Req, "GET,PUT,DELETE");
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section, _Key | _]}=Req) ->
- chttpd:send_error(Req, not_found);
-% GET /_node/$node/_stats
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_stats">> | Path]}=Req) ->
- flush(Node, Req),
- Stats0 = call_node(Node, couch_stats, fetch, []),
- Stats = couch_stats_httpd:transform_stats(Stats0),
- Nested = couch_stats_httpd:nest(Stats),
- EJSON0 = couch_stats_httpd:to_ejson(Nested),
- EJSON1 = couch_stats_httpd:extract_path(Path, EJSON0),
- chttpd:send_json(Req, EJSON1);
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_stats">>]}=Req) ->
- send_method_not_allowed(Req, "GET");
-% GET /_node/$node/_system
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_system">>]}=Req) ->
- Stats = call_node(Node, chttpd_misc, get_stats, []),
- EJSON = couch_stats_httpd:to_ejson(Stats),
- send_json(Req, EJSON);
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_system">>]}=Req) ->
- send_method_not_allowed(Req, "GET");
-% POST /_node/$node/_restart
-handle_node_req(#httpd{method='POST', path_parts=[_, Node, <<"_restart">>]}=Req) ->
- call_node(Node, init, restart, []),
- send_json(Req, 200, {[{ok, true}]});
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_restart">>]}=Req) ->
- send_method_not_allowed(Req, "POST");
-handle_node_req(#httpd{path_parts=[_]}=Req) ->
- chttpd:send_error(Req, {bad_request, <<"Incomplete path to _node request">>});
-handle_node_req(#httpd{path_parts=[_, _Node]}=Req) ->
- chttpd:send_error(Req, {bad_request, <<"Incomplete path to _node request">>});
-handle_node_req(Req) ->
- chttpd:send_error(Req, not_found).
-
-
-call_node(Node0, Mod, Fun, Args) when is_binary(Node0) ->
- Node1 = try
- list_to_existing_atom(?b2l(Node0))
- catch
- error:badarg ->
- throw({not_found, <<"no such node: ", Node0/binary>>})
- end,
- call_node(Node1, Mod, Fun, Args);
-call_node(Node, Mod, Fun, Args) when is_atom(Node) ->
- case rpc:call(Node, Mod, Fun, Args) of
- {badrpc, nodedown} ->
- Reason = ?l2b(io_lib:format("~s is down", [Node])),
- throw({error, {nodedown, Reason}});
- Else ->
- Else
- end.
-
-flush(Node, Req) ->
- case couch_util:get_value("flush", chttpd:qs(Req)) of
- "true" ->
- call_node(Node, couch_stats_aggregator, flush, []);
- _Else ->
- ok
- end.
-
-% Note: this resource is exposed on the backdoor interface, but it's in chttpd
-% because it's not couch trunk
-handle_system_req(Req) ->
- Stats = get_stats(),
- EJSON = couch_stats_httpd:to_ejson(Stats),
- send_json(Req, EJSON).
-
-get_stats() ->
- Other = erlang:memory(system) - lists:sum([X || {_,X} <-
- erlang:memory([atom, code, binary, ets])]),
- Memory = [{other, Other} | erlang:memory([atom, atom_used, processes,
- processes_used, binary, code, ets])],
- {NumberOfGCs, WordsReclaimed, _} = statistics(garbage_collection),
- {{input, Input}, {output, Output}} = statistics(io),
- {CF, CDU} = db_pid_stats(),
- MessageQueues0 = [{couch_file, {CF}}, {couch_db_updater, {CDU}}],
- MessageQueues = MessageQueues0 ++ message_queues(registered()),
- [
- {uptime, couch_app:uptime() div 1000},
- {memory, {Memory}},
- {run_queue, statistics(run_queue)},
- {ets_table_count, length(ets:all())},
- {context_switches, element(1, statistics(context_switches))},
- {reductions, element(1, statistics(reductions))},
- {garbage_collection_count, NumberOfGCs},
- {words_reclaimed, WordsReclaimed},
- {io_input, Input},
- {io_output, Output},
- {os_proc_count, couch_proc_manager:get_proc_count()},
- {stale_proc_count, couch_proc_manager:get_stale_proc_count()},
- {process_count, erlang:system_info(process_count)},
- {process_limit, erlang:system_info(process_limit)},
- {message_queues, {MessageQueues}},
- {internal_replication_jobs, mem3_sync:get_backlog()},
- {distribution, {get_distribution_stats()}}
- ].
-
-db_pid_stats() ->
- {monitors, M} = process_info(whereis(couch_stats_process_tracker), monitors),
- Candidates = [Pid || {process, Pid} <- M],
- CouchFiles = db_pid_stats(couch_file, Candidates),
- CouchDbUpdaters = db_pid_stats(couch_db_updater, Candidates),
- {CouchFiles, CouchDbUpdaters}.
-
-db_pid_stats(Mod, Candidates) ->
- Mailboxes = lists:foldl(
- fun(Pid, Acc) ->
- case process_info(Pid, [message_queue_len, dictionary]) of
- undefined ->
- Acc;
- PI ->
- Dictionary = proplists:get_value(dictionary, PI, []),
- case proplists:get_value('$initial_call', Dictionary) of
- {Mod, init, 1} ->
- case proplists:get_value(message_queue_len, PI) of
- undefined -> Acc;
- Len -> [Len|Acc]
- end;
- _ ->
- Acc
- end
- end
- end, [], Candidates
- ),
- format_pid_stats(Mailboxes).
-
-format_pid_stats([]) ->
- [];
-format_pid_stats(Mailboxes) ->
- Sorted = lists:sort(Mailboxes),
- Count = length(Sorted),
- [
- {count, Count},
- {min, hd(Sorted)},
- {max, lists:nth(Count, Sorted)},
- {'50', lists:nth(round(Count * 0.5), Sorted)},
- {'90', lists:nth(round(Count * 0.9), Sorted)},
- {'99', lists:nth(round(Count * 0.99), Sorted)}
- ].
-
-get_distribution_stats() ->
- lists:map(fun({Node, Socket}) ->
- {ok, Stats} = inet:getstat(Socket),
- {Node, {Stats}}
- end, erlang:system_info(dist_ctrl)).
-
handle_up_req(#httpd{method='GET'} = Req) ->
case config:get("couchdb", "maintenance_mode") of
"true" ->
@@ -506,13 +291,6 @@ handle_up_req(#httpd{method='GET'} = Req) ->
handle_up_req(Req) ->
send_method_not_allowed(Req, "GET,HEAD").
-message_queues(Registered) ->
- lists:map(fun(Name) ->
- Type = message_queue_len,
- {Type, Length} = process_info(whereis(Name), Type),
- {Name, Length}
- end, Registered).
-
get_docroot() ->
% if the env var isn’t set, let’s not throw an error, but
% assume the current working dir is what we want
diff --git a/src/chttpd/src/chttpd_node.erl b/src/chttpd/src/chttpd_node.erl
new file mode 100644
index 000000000..202070279
--- /dev/null
+++ b/src/chttpd/src/chttpd_node.erl
@@ -0,0 +1,272 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_node).
+-compile(tuple_calls).
+
+-export([
+ handle_node_req/1,
+ get_stats/0
+]).
+
+-include_lib("couch/include/couch_db.hrl").
+
+-import(chttpd,
+ [send_json/2,send_json/3,send_method_not_allowed/2,
+ send_chunk/2,start_chunked_response/3]).
+
+% Node-specific request handler (_config and _stats)
+% Support _local meaning this node
+handle_node_req(#httpd{path_parts=[_, <<"_local">>]}=Req) ->
+ send_json(Req, 200, {[{name, node()}]});
+handle_node_req(#httpd{path_parts=[A, <<"_local">>|Rest]}=Req) ->
+ handle_node_req(Req#httpd{path_parts=[A, node()] ++ Rest});
+% GET /_node/$node/_config
+handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>]}=Req) ->
+ Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) ->
+ case dict:is_key(Section, Acc) of
+ true ->
+ dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
+ false ->
+ dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
+ end
+ end, dict:new(), call_node(Node, config, all, [])),
+ KVs = dict:fold(fun(Section, Values, Acc) ->
+ [{list_to_binary(Section), {Values}} | Acc]
+ end, [], Grouped),
+ send_json(Req, 200, {KVs});
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>]}=Req) ->
+ send_method_not_allowed(Req, "GET");
+% GET /_node/$node/_config/Section
+handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>, Section]}=Req) ->
+ KVs = [{list_to_binary(Key), list_to_binary(Value)}
+ || {Key, Value} <- call_node(Node, config, get, [Section])],
+ send_json(Req, 200, {KVs});
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section]}=Req) ->
+ send_method_not_allowed(Req, "GET");
+% PUT /_node/$node/_config/Section/Key
+% "value"
+handle_node_req(#httpd{method='PUT', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
+ couch_util:check_config_blacklist(Section),
+ Value = couch_util:trim(chttpd:json_body(Req)),
+ Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
+ OldValue = call_node(Node, config, get, [Section, Key, ""]),
+ case call_node(Node, config, set, [Section, Key, ?b2l(Value), Persist]) of
+ ok ->
+ send_json(Req, 200, list_to_binary(OldValue));
+ {error, Reason} ->
+ chttpd:send_error(Req, {bad_request, Reason})
+ end;
+% GET /_node/$node/_config/Section/Key
+handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
+ case call_node(Node, config, get, [Section, Key, undefined]) of
+ undefined ->
+ throw({not_found, unknown_config_value});
+ Value ->
+ send_json(Req, 200, list_to_binary(Value))
+ end;
+% DELETE /_node/$node/_config/Section/Key
+handle_node_req(#httpd{method='DELETE',path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
+ couch_util:check_config_blacklist(Section),
+ Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
+ case call_node(Node, config, get, [Section, Key, undefined]) of
+ undefined ->
+ throw({not_found, unknown_config_value});
+ OldValue ->
+ case call_node(Node, config, delete, [Section, Key, Persist]) of
+ ok ->
+ send_json(Req, 200, list_to_binary(OldValue));
+ {error, Reason} ->
+ chttpd:send_error(Req, {bad_request, Reason})
+ end
+ end;
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section, _Key]}=Req) ->
+ send_method_not_allowed(Req, "GET,PUT,DELETE");
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section, _Key | _]}=Req) ->
+ chttpd:send_error(Req, not_found);
+% GET /_node/$node/_stats
+handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_stats">> | Path]}=Req) ->
+ flush(Node, Req),
+ Stats0 = call_node(Node, couch_stats, fetch, []),
+ Stats = couch_stats_httpd:transform_stats(Stats0),
+ Nested = couch_stats_httpd:nest(Stats),
+ EJSON0 = couch_stats_httpd:to_ejson(Nested),
+ EJSON1 = couch_stats_httpd:extract_path(Path, EJSON0),
+ chttpd:send_json(Req, EJSON1);
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_stats">>]}=Req) ->
+ send_method_not_allowed(Req, "GET");
+% GET /_node/$node/_system
+handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_system">>]}=Req) ->
+ Stats = call_node(Node, chttpd_node, get_stats, []),
+ EJSON = couch_stats_httpd:to_ejson(Stats),
+ send_json(Req, EJSON);
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_system">>]}=Req) ->
+ send_method_not_allowed(Req, "GET");
+% POST /_node/$node/_restart
+handle_node_req(#httpd{method='POST', path_parts=[_, Node, <<"_restart">>]}=Req) ->
+ call_node(Node, init, restart, []),
+ send_json(Req, 200, {[{ok, true}]});
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_restart">>]}=Req) ->
+ send_method_not_allowed(Req, "POST");
+handle_node_req(#httpd{path_parts=[_, Node | PathParts],
+ mochi_req=MochiReq0}) ->
+ % strip /_node/{node} from Req0 before descending further
+ RawUri = MochiReq0:get(raw_path),
+ {_, Query, Fragment} = mochiweb_util:urlsplit_path(RawUri),
+ NewPath0 = "/" ++ lists:join("/", [?b2l(P) || P <- PathParts]),
+ NewRawPath = mochiweb_util:urlunsplit_path({NewPath0, Query, Fragment}),
+ MaxSize = config:get_integer("httpd", "max_http_request_size", 4294967296),
+ NewOpts = [{body, MochiReq0:recv_body(MaxSize)} | MochiReq0:get(opts)],
+ Ref = erlang:make_ref(),
+ MochiReq = mochiweb_request:new({remote, self(), Ref},
+ NewOpts,
+ MochiReq0:get(method),
+ NewRawPath,
+ MochiReq0:get(version),
+ MochiReq0:get(headers)),
+ call_node(Node, couch_httpd, handle_request, [MochiReq]),
+ recv_loop(Ref, MochiReq0);
+handle_node_req(#httpd{path_parts=[_]}=Req) ->
+ chttpd:send_error(Req, {bad_request, <<"Incomplete path to _node request">>});
+handle_node_req(Req) ->
+ chttpd:send_error(Req, not_found).
+
+recv_loop(Ref, ReqResp) ->
+ receive
+ {Ref, Code, Headers, _Args, start_response} ->
+ recv_loop(Ref, ReqResp:start({Code, Headers}));
+ {Ref, Code, Headers, chunked, respond} ->
+ Resp = ReqResp:respond({Code, Headers, chunked}),
+ recv_loop(Ref, Resp);
+ {Ref, Code, Headers, Args, respond} ->
+ Resp = ReqResp:respond({Code, Headers, Args}),
+ {ok, Resp};
+ {Ref, chunk, <<>>} ->
+ ReqResp:write_chunk(<<>>),
+ {ok, ReqResp};
+ {Ref, chunk, Data} ->
+ ReqResp:write_chunk(Data),
+ recv_loop(Ref, ReqResp);
+ _Else ->
+ recv_loop(Ref, ReqResp)
+ end.
+
+call_node(Node0, Mod, Fun, Args) when is_binary(Node0) ->
+ Node1 = try
+ list_to_existing_atom(?b2l(Node0))
+ catch
+ error:badarg ->
+ throw({not_found, <<"no such node: ", Node0/binary>>})
+ end,
+ call_node(Node1, Mod, Fun, Args);
+call_node(Node, Mod, Fun, Args) when is_atom(Node) ->
+ case rpc:call(Node, Mod, Fun, Args) of
+ {badrpc, nodedown} ->
+ Reason = ?l2b(io_lib:format("~s is down", [Node])),
+ throw({error, {nodedown, Reason}});
+ Else ->
+ Else
+ end.
+
+flush(Node, Req) ->
+ case couch_util:get_value("flush", chttpd:qs(Req)) of
+ "true" ->
+ call_node(Node, couch_stats_aggregator, flush, []);
+ _Else ->
+ ok
+ end.
+
+get_stats() ->
+ Other = erlang:memory(system) - lists:sum([X || {_,X} <-
+ erlang:memory([atom, code, binary, ets])]),
+ Memory = [{other, Other} | erlang:memory([atom, atom_used, processes,
+ processes_used, binary, code, ets])],
+ {NumberOfGCs, WordsReclaimed, _} = statistics(garbage_collection),
+ {{input, Input}, {output, Output}} = statistics(io),
+ {CF, CDU} = db_pid_stats(),
+ MessageQueues0 = [{couch_file, {CF}}, {couch_db_updater, {CDU}}],
+ MessageQueues = MessageQueues0 ++ message_queues(registered()),
+ [
+ {uptime, couch_app:uptime() div 1000},
+ {memory, {Memory}},
+ {run_queue, statistics(run_queue)},
+ {ets_table_count, length(ets:all())},
+ {context_switches, element(1, statistics(context_switches))},
+ {reductions, element(1, statistics(reductions))},
+ {garbage_collection_count, NumberOfGCs},
+ {words_reclaimed, WordsReclaimed},
+ {io_input, Input},
+ {io_output, Output},
+ {os_proc_count, couch_proc_manager:get_proc_count()},
+ {stale_proc_count, couch_proc_manager:get_stale_proc_count()},
+ {process_count, erlang:system_info(process_count)},
+ {process_limit, erlang:system_info(process_limit)},
+ {message_queues, {MessageQueues}},
+ {internal_replication_jobs, mem3_sync:get_backlog()},
+ {distribution, {get_distribution_stats()}}
+ ].
+
+db_pid_stats() ->
+ {monitors, M} = process_info(whereis(couch_stats_process_tracker), monitors),
+ Candidates = [Pid || {process, Pid} <- M],
+ CouchFiles = db_pid_stats(couch_file, Candidates),
+ CouchDbUpdaters = db_pid_stats(couch_db_updater, Candidates),
+ {CouchFiles, CouchDbUpdaters}.
+
+db_pid_stats(Mod, Candidates) ->
+ Mailboxes = lists:foldl(
+ fun(Pid, Acc) ->
+ case process_info(Pid, [message_queue_len, dictionary]) of
+ undefined ->
+ Acc;
+ PI ->
+ Dictionary = proplists:get_value(dictionary, PI, []),
+ case proplists:get_value('$initial_call', Dictionary) of
+ {Mod, init, 1} ->
+ case proplists:get_value(message_queue_len, PI) of
+ undefined -> Acc;
+ Len -> [Len|Acc]
+ end;
+ _ ->
+ Acc
+ end
+ end
+ end, [], Candidates
+ ),
+ format_pid_stats(Mailboxes).
+
+format_pid_stats([]) ->
+ [];
+format_pid_stats(Mailboxes) ->
+ Sorted = lists:sort(Mailboxes),
+ Count = length(Sorted),
+ [
+ {count, Count},
+ {min, hd(Sorted)},
+ {max, lists:nth(Count, Sorted)},
+ {'50', lists:nth(round(Count * 0.5), Sorted)},
+ {'90', lists:nth(round(Count * 0.9), Sorted)},
+ {'99', lists:nth(round(Count * 0.99), Sorted)}
+ ].
+
+get_distribution_stats() ->
+ lists:map(fun({Node, Socket}) ->
+ {ok, Stats} = inet:getstat(Socket),
+ {Node, {Stats}}
+ end, erlang:system_info(dist_ctrl)).
+
+message_queues(Registered) ->
+ lists:map(fun(Name) ->
+ Type = message_queue_len,
+ {Type, Length} = process_info(whereis(Name), Type),
+ {Name, Length}
+ end, Registered).
diff --git a/src/couch/src/couch.app.src b/src/couch/src/couch.app.src
index 706b439fa..2b642c085 100644
--- a/src/couch/src/couch.app.src
+++ b/src/couch/src/couch.app.src
@@ -60,8 +60,7 @@
{"_uuids", "{couch_httpd_misc_handlers, handle_uuids_req}"},
{"_stats", "{couch_stats_httpd, handle_stats_req}"},
{"_session", "{couch_httpd_auth, handle_session_req}"},
- {"_plugins", "{couch_plugins_httpd, handle_req}"},
- {"_system", "{chttpd_misc, handle_system_req}"}
+ {"_plugins", "{couch_plugins_httpd, handle_req}"}
]},
{ httpd_db_handlers, [
{"_all_docs", "{couch_mrview_http, handle_all_docs_req}"},
diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
index 10b44d16d..65291e3da 100644
--- a/src/couch/src/couch_httpd.erl
+++ b/src/couch/src/couch_httpd.erl
@@ -37,7 +37,8 @@
-export([validate_host/1]).
-export([validate_bind_address/1]).
-export([check_max_request_length/1]).
-
+-export([handle_request/1]).
+-export([set_auth_handlers/0]).
-define(HANDLER_NAME_IN_MODULE_POS, 6).
-define(MAX_DRAIN_BYTES, 1048576).
@@ -104,38 +105,14 @@ start_link(Name, Options) ->
Else -> Else
end,
ok = validate_bind_address(BindAddress),
- DefaultFun = make_arity_1_fun("{couch_httpd_db, handle_request}"),
-
- {ok, HttpdGlobalHandlers} = application:get_env(httpd_global_handlers),
-
- UrlHandlersList = lists:map(
- fun({UrlKey, SpecStr}) ->
- {?l2b(UrlKey), make_arity_1_fun(SpecStr)}
- end, HttpdGlobalHandlers),
-
- {ok, HttpdDbHandlers} = application:get_env(httpd_db_handlers),
-
- DbUrlHandlersList = lists:map(
- fun({UrlKey, SpecStr}) ->
- {?l2b(UrlKey), make_arity_2_fun(SpecStr)}
- end, HttpdDbHandlers),
- {ok, HttpdDesignHandlers} = application:get_env(httpd_design_handlers),
-
- DesignUrlHandlersList = lists:map(
- fun({UrlKey, SpecStr}) ->
- {?l2b(UrlKey), make_arity_3_fun(SpecStr)}
- end, HttpdDesignHandlers),
-
- UrlHandlers = dict:from_list(UrlHandlersList),
- DbUrlHandlers = dict:from_list(DbUrlHandlersList),
- DesignUrlHandlers = dict:from_list(DesignUrlHandlersList),
{ok, ServerOptions} = couch_util:parse_term(
config:get("httpd", "server_options", "[]")),
{ok, SocketOptions} = couch_util:parse_term(
config:get("httpd", "socket_options", "[]")),
set_auth_handlers(),
+ Handlers = get_httpd_handlers(),
% ensure uuid is set so that concurrent replications
% get the same value.
@@ -148,9 +125,7 @@ start_link(Name, Options) ->
_ ->
ok = mochiweb_socket:setopts(Req:get(socket), SocketOptions)
end,
- apply(?MODULE, handle_request, [
- Req, DefaultFun, UrlHandlers, DbUrlHandlers, DesignUrlHandlers
- ])
+ apply(?MODULE, handle_request, [Req | Handlers])
end,
% set mochiweb options
@@ -187,6 +162,34 @@ set_auth_handlers() ->
auth_handler_name(SpecStr) ->
lists:nth(?HANDLER_NAME_IN_MODULE_POS, re:split(SpecStr, "[\\W_]", [])).
+get_httpd_handlers() ->
+ {ok, HttpdGlobalHandlers} = application:get_env(couch, httpd_global_handlers),
+
+ UrlHandlersList = lists:map(
+ fun({UrlKey, SpecStr}) ->
+ {?l2b(UrlKey), make_arity_1_fun(SpecStr)}
+ end, HttpdGlobalHandlers),
+
+ {ok, HttpdDbHandlers} = application:get_env(couch, httpd_db_handlers),
+
+ DbUrlHandlersList = lists:map(
+ fun({UrlKey, SpecStr}) ->
+ {?l2b(UrlKey), make_arity_2_fun(SpecStr)}
+ end, HttpdDbHandlers),
+
+ {ok, HttpdDesignHandlers} = application:get_env(couch, httpd_design_handlers),
+
+ DesignUrlHandlersList = lists:map(
+ fun({UrlKey, SpecStr}) ->
+ {?l2b(UrlKey), make_arity_3_fun(SpecStr)}
+ end, HttpdDesignHandlers),
+
+ UrlHandlers = dict:from_list(UrlHandlersList),
+ DbUrlHandlers = dict:from_list(DbUrlHandlersList),
+ DesignUrlHandlers = dict:from_list(DesignUrlHandlersList),
+ DefaultFun = make_arity_1_fun("{couch_httpd_db, handle_request}"),
+ [DefaultFun, UrlHandlers, DbUrlHandlers, DesignUrlHandlers].
+
% SpecStr is a string like "{my_module, my_fun}"
% or "{my_module, my_fun, <<"my_arg">>}"
make_arity_1_fun(SpecStr) ->
@@ -217,6 +220,11 @@ make_arity_3_fun(SpecStr) ->
make_fun_spec_strs(SpecStr) ->
re:split(SpecStr, "(?<=})\\s*,\\s*(?={)", [{return, list}]).
+handle_request(MochiReq) ->
+ Body = proplists:get_value(body, MochiReq:get(opts)),
+ erlang:put(mochiweb_request_body, Body),
+ apply(?MODULE, handle_request, [MochiReq | get_httpd_handlers()]).
+
handle_request(MochiReq, DefaultFun, UrlHandlers, DbUrlHandlers,
DesignUrlHandlers) ->
%% reset rewrite count for new request
@@ -256,7 +264,7 @@ handle_request_int(MochiReq, DefaultFun,
MochiReq:get(method),
RawUri,
MochiReq:get(version),
- MochiReq:get(peer),
+ peer(MochiReq),
mochiweb_headers:to_list(MochiReq:get(headers))
]),
@@ -299,7 +307,7 @@ handle_request_int(MochiReq, DefaultFun,
HttpReq = #httpd{
mochi_req = MochiReq,
- peer = MochiReq:get(peer),
+ peer = peer(MochiReq),
method = Method,
requested_path_parts =
[?l2b(unquote(Part)) || Part <- string:tokens(RequestedPath, "/")],
@@ -746,6 +754,9 @@ start_chunked_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers0) ->
end,
{ok, Resp}.
+send_chunk({remote, Pid, Ref} = Resp, Data) ->
+ Pid ! {Ref, chunk, Data},
+ {ok, Resp};
send_chunk(Resp, Data) ->
case iolist_size(Data) of
0 -> ok; % do nothing
@@ -753,6 +764,9 @@ send_chunk(Resp, Data) ->
end,
{ok, Resp}.
+last_chunk({remote, Pid, Ref} = Resp) ->
+ Pid ! {Ref, chunk, <<>>},
+ {ok, Resp};
last_chunk(Resp) ->
Resp:write_chunk([]),
{ok, Resp}.
@@ -1175,9 +1189,18 @@ before_response(Req0, Code0, Headers0, {json, JsonObj}) ->
before_response(Req0, Code0, Headers0, Args0) ->
chttpd_plugin:before_response(Req0, Code0, Headers0, Args0).
-respond_(#httpd{mochi_req = MochiReq}, Code, Headers, _Args, start_response) ->
+respond_(#httpd{mochi_req = MochiReq} = Req, Code, Headers, Args, Type) ->
+ case MochiReq:get(socket) of
+ {remote, Pid, Ref} ->
+ Pid ! {Ref, Code, Headers, Args, Type},
+ {remote, Pid, Ref};
+ _Else ->
+ http_respond_(Req, Code, Headers, Args, Type)
+ end.
+
+http_respond_(#httpd{mochi_req = MochiReq}, Code, Headers, _Args, start_response) ->
MochiReq:start_response({Code, Headers});
-respond_(#httpd{mochi_req = MochiReq}, 413, Headers, Args, Type) ->
+http_respond_(#httpd{mochi_req = MochiReq}, 413, Headers, Args, Type) ->
% Special handling for the 413 response. Make sure the socket is closed as
% we don't know how much data was read before the error was thrown. Also
% drain all the data in the receive buffer to avoid connction being reset
@@ -1189,9 +1212,17 @@ respond_(#httpd{mochi_req = MochiReq}, 413, Headers, Args, Type) ->
Socket = MochiReq:get(socket),
mochiweb_socket:recv(Socket, ?MAX_DRAIN_BYTES, ?MAX_DRAIN_TIME_MSEC),
Result;
-respond_(#httpd{mochi_req = MochiReq}, Code, Headers, Args, Type) ->
+http_respond_(#httpd{mochi_req = MochiReq}, Code, Headers, Args, Type) ->
MochiReq:Type({Code, Headers, Args}).
+peer(MochiReq) ->
+ case MochiReq:get(socket) of
+ {remote, Pid, _} ->
+ node(Pid);
+ _ ->
+ MochiReq:get(peer)
+ end.
+
%%%%%%%% module tests below %%%%%%%%
-ifdef(TEST).
diff --git a/src/couch/src/couch_secondary_sup.erl b/src/couch/src/couch_secondary_sup.erl
index 0f46ec85f..9c7d414d0 100644
--- a/src/couch/src/couch_secondary_sup.erl
+++ b/src/couch/src/couch_secondary_sup.erl
@@ -30,14 +30,18 @@ init([]) ->
{index_server, {couch_index_server, start_link, []}},
{query_servers, {couch_proc_manager, start_link, []}},
{vhosts, {couch_httpd_vhost, start_link, []}},
- {httpd, {couch_httpd, start_link, []}},
{uuids, {couch_uuids, start, []}},
{auth_cache, {couch_auth_cache, start_link, []}}
],
+ MaybeHttp = case http_enabled() of
+ true -> [{httpd, {couch_httpd, start_link, []}}];
+ false -> couch_httpd:set_auth_handlers(), []
+ end,
+
MaybeHttps = case https_enabled() of
true -> [{httpsd, {chttpd, start_link, [https]}}];
- _False -> []
+ false -> []
end,
Children = SecondarySupervisors ++ [
@@ -52,10 +56,13 @@ init([]) ->
[Module]}
end
|| {Name, Spec}
- <- Daemons ++ MaybeHttps, Spec /= ""],
+ <- Daemons ++ MaybeHttp ++ MaybeHttps, Spec /= ""],
{ok, {{one_for_one, 50, 3600},
couch_epi:register_service(couch_db_epi, Children)}}.
+http_enabled() ->
+ config:get_boolean("httpd", "enable", false).
+
https_enabled() ->
% 1. [ssl] enable = true | false
% 2. if [daemons] httpsd == {chttpd, start_link, [https]} -> pretend true as well
diff --git a/src/couch/test/eunit/chttpd_endpoints_tests.erl b/src/couch/test/eunit/chttpd_endpoints_tests.erl
index 9b7430823..6433d3d89 100644
--- a/src/couch/test/eunit/chttpd_endpoints_tests.erl
+++ b/src/couch/test/eunit/chttpd_endpoints_tests.erl
@@ -43,7 +43,7 @@ handlers(url_handler) ->
{<<"_all_dbs">>, chttpd_misc, handle_all_dbs_req},
{<<"_dbs_info">>, chttpd_misc, handle_dbs_info_req},
{<<"_active_tasks">>, chttpd_misc, handle_task_status_req},
- {<<"_node">>, chttpd_misc, handle_node_req},
+ {<<"_node">>, chttpd_node, handle_node_req},
{<<"_reload_query_servers">>, chttpd_misc, handle_reload_query_servers_req},
{<<"_replicate">>, chttpd_misc, handle_replicate_req},
{<<"_uuids">>, chttpd_misc, handle_uuids_req},
diff --git a/test/javascript/tests/proxyauth.js b/test/javascript/tests/proxyauth.js
index c60c24e46..cc75faaf3 100644
--- a/test/javascript/tests/proxyauth.js
+++ b/test/javascript/tests/proxyauth.js
@@ -108,7 +108,7 @@ couchTests.proxyauth = function(debug) {
run_on_modified_server(
[{section: "httpd",
key: "authentication_handlers",
- value:"{chttpd_auth, proxy_authentification_handler}, {chttpd_auth, default_authentication_handler}"},
+ value:"{chttpd_auth, proxy_authentication_handler}, {chttpd_auth, default_authentication_handler}"},
{section: "chttpd_auth",
key: "authentication_db",
value: users_db_name},