summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoan Touzet <joant@atypical.net>2019-07-29 19:08:02 -0400
committerJoan Touzet <joant@atypical.net>2019-08-02 02:58:51 -0400
commit044e57815f7bfc407f15d5514e6689e3cd0e9dae (patch)
tree8651425473d7d4698ed4ae06243732ef765610f5
parent29d484e45054c4b40f6b3a223298c8a31914f90d (diff)
downloadcouchdb-044e57815f7bfc407f15d5514e6689e3cd0e9dae.tar.gz
[WIP] Deprecate port 5986
This introduces a new chttpd_node module, under which all unclustered functions now live. System DB and shard-level requests bypass mem3 and fabric, using rpc:call to call into couch_db, couch_server, etc.
-rw-r--r--src/chttpd/src/chttpd_httpd_handlers.erl2
-rw-r--r--src/chttpd/src/chttpd_misc.erl229
-rw-r--r--src/chttpd/src/chttpd_node.erl321
-rw-r--r--src/couch/src/couch.app.src3
-rw-r--r--src/couch/src/couch_httpd.erl67
5 files changed, 365 insertions, 257 deletions
diff --git a/src/chttpd/src/chttpd_httpd_handlers.erl b/src/chttpd/src/chttpd_httpd_handlers.erl
index 000f29b2f..5e86ea87d 100644
--- a/src/chttpd/src/chttpd_httpd_handlers.erl
+++ b/src/chttpd/src/chttpd_httpd_handlers.erl
@@ -21,7 +21,7 @@ url_handler(<<"_all_dbs">>) -> fun chttpd_misc:handle_all_dbs_req/1;
url_handler(<<"_dbs_info">>) -> fun chttpd_misc:handle_dbs_info_req/1;
url_handler(<<"_active_tasks">>) -> fun chttpd_misc:handle_task_status_req/1;
url_handler(<<"_scheduler">>) -> fun couch_replicator_httpd:handle_scheduler_req/1;
-url_handler(<<"_node">>) -> fun chttpd_misc:handle_node_req/1;
+url_handler(<<"_node">>) -> fun chttpd_node:handle_node_req/1;
url_handler(<<"_reload_query_servers">>) -> fun chttpd_misc:handle_reload_query_servers_req/1;
url_handler(<<"_replicate">>) -> fun chttpd_misc:handle_replicate_req/1;
url_handler(<<"_uuids">>) -> fun chttpd_misc:handle_uuids_req/1;
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index 819d7820e..d107334fd 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -15,27 +15,24 @@
-export([
handle_all_dbs_req/1,
handle_dbs_info_req/1,
- handle_node_req/1,
handle_favicon_req/1,
handle_favicon_req/2,
handle_replicate_req/1,
handle_reload_query_servers_req/1,
- handle_system_req/1,
handle_task_status_req/1,
handle_up_req/1,
handle_utils_dir_req/1,
handle_utils_dir_req/2,
handle_uuids_req/1,
handle_welcome_req/1,
- handle_welcome_req/2,
- get_stats/0
+ handle_welcome_req/2
]).
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
-import(chttpd,
- [send_json/2,send_json/3,send_method_not_allowed/2,
+ [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
send_chunk/2,start_chunked_response/3]).
-define(MAX_DB_NUM_FOR_DBS_INFO, 100).
@@ -266,219 +263,6 @@ handle_reload_query_servers_req(Req) ->
handle_uuids_req(Req) ->
couch_httpd_misc_handlers:handle_uuids_req(Req).
-
-% Node-specific request handler (_config and _stats)
-% Support _local meaning this node
-handle_node_req(#httpd{path_parts=[_, <<"_local">>]}=Req) ->
- send_json(Req, 200, {[{name, node()}]});
-handle_node_req(#httpd{path_parts=[A, <<"_local">>|Rest]}=Req) ->
- handle_node_req(Req#httpd{path_parts=[A, node()] ++ Rest});
-% GET /_node/$node/_config
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>]}=Req) ->
- Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) ->
- case dict:is_key(Section, Acc) of
- true ->
- dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
- false ->
- dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
- end
- end, dict:new(), call_node(Node, config, all, [])),
- KVs = dict:fold(fun(Section, Values, Acc) ->
- [{list_to_binary(Section), {Values}} | Acc]
- end, [], Grouped),
- send_json(Req, 200, {KVs});
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>]}=Req) ->
- send_method_not_allowed(Req, "GET");
-% GET /_node/$node/_config/Section
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>, Section]}=Req) ->
- KVs = [{list_to_binary(Key), list_to_binary(Value)}
- || {Key, Value} <- call_node(Node, config, get, [Section])],
- send_json(Req, 200, {KVs});
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section]}=Req) ->
- send_method_not_allowed(Req, "GET");
-% PUT /_node/$node/_config/Section/Key
-% "value"
-handle_node_req(#httpd{method='PUT', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
- couch_util:check_config_blacklist(Section),
- Value = couch_util:trim(chttpd:json_body(Req)),
- Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
- OldValue = call_node(Node, config, get, [Section, Key, ""]),
- case call_node(Node, config, set, [Section, Key, ?b2l(Value), Persist]) of
- ok ->
- send_json(Req, 200, list_to_binary(OldValue));
- {error, Reason} ->
- chttpd:send_error(Req, {bad_request, Reason})
- end;
-% GET /_node/$node/_config/Section/Key
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
- case call_node(Node, config, get, [Section, Key, undefined]) of
- undefined ->
- throw({not_found, unknown_config_value});
- Value ->
- send_json(Req, 200, list_to_binary(Value))
- end;
-% DELETE /_node/$node/_config/Section/Key
-handle_node_req(#httpd{method='DELETE',path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
- couch_util:check_config_blacklist(Section),
- Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
- case call_node(Node, config, get, [Section, Key, undefined]) of
- undefined ->
- throw({not_found, unknown_config_value});
- OldValue ->
- case call_node(Node, config, delete, [Section, Key, Persist]) of
- ok ->
- send_json(Req, 200, list_to_binary(OldValue));
- {error, Reason} ->
- chttpd:send_error(Req, {bad_request, Reason})
- end
- end;
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section, _Key]}=Req) ->
- send_method_not_allowed(Req, "GET,PUT,DELETE");
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section, _Key | _]}=Req) ->
- chttpd:send_error(Req, not_found);
-% GET /_node/$node/_stats
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_stats">> | Path]}=Req) ->
- flush(Node, Req),
- Stats0 = call_node(Node, couch_stats, fetch, []),
- Stats = couch_stats_httpd:transform_stats(Stats0),
- Nested = couch_stats_httpd:nest(Stats),
- EJSON0 = couch_stats_httpd:to_ejson(Nested),
- EJSON1 = couch_stats_httpd:extract_path(Path, EJSON0),
- chttpd:send_json(Req, EJSON1);
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_stats">>]}=Req) ->
- send_method_not_allowed(Req, "GET");
-% GET /_node/$node/_system
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_system">>]}=Req) ->
- Stats = call_node(Node, chttpd_misc, get_stats, []),
- EJSON = couch_stats_httpd:to_ejson(Stats),
- send_json(Req, EJSON);
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_system">>]}=Req) ->
- send_method_not_allowed(Req, "GET");
-% POST /_node/$node/_restart
-handle_node_req(#httpd{method='POST', path_parts=[_, Node, <<"_restart">>]}=Req) ->
- call_node(Node, init, restart, []),
- send_json(Req, 200, {[{ok, true}]});
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_restart">>]}=Req) ->
- send_method_not_allowed(Req, "POST");
-handle_node_req(#httpd{path_parts=[_]}=Req) ->
- chttpd:send_error(Req, {bad_request, <<"Incomplete path to _node request">>});
-handle_node_req(#httpd{path_parts=[_, _Node]}=Req) ->
- chttpd:send_error(Req, {bad_request, <<"Incomplete path to _node request">>});
-handle_node_req(Req) ->
- chttpd:send_error(Req, not_found).
-
-
-call_node(Node0, Mod, Fun, Args) when is_binary(Node0) ->
- Node1 = try
- list_to_existing_atom(?b2l(Node0))
- catch
- error:badarg ->
- throw({not_found, <<"no such node: ", Node0/binary>>})
- end,
- call_node(Node1, Mod, Fun, Args);
-call_node(Node, Mod, Fun, Args) when is_atom(Node) ->
- case rpc:call(Node, Mod, Fun, Args) of
- {badrpc, nodedown} ->
- Reason = ?l2b(io_lib:format("~s is down", [Node])),
- throw({error, {nodedown, Reason}});
- Else ->
- Else
- end.
-
-flush(Node, Req) ->
- case couch_util:get_value("flush", chttpd:qs(Req)) of
- "true" ->
- call_node(Node, couch_stats_aggregator, flush, []);
- _Else ->
- ok
- end.
-
-% Note: this resource is exposed on the backdoor interface, but it's in chttpd
-% because it's not couch trunk
-handle_system_req(Req) ->
- Stats = get_stats(),
- EJSON = couch_stats_httpd:to_ejson(Stats),
- send_json(Req, EJSON).
-
-get_stats() ->
- Other = erlang:memory(system) - lists:sum([X || {_,X} <-
- erlang:memory([atom, code, binary, ets])]),
- Memory = [{other, Other} | erlang:memory([atom, atom_used, processes,
- processes_used, binary, code, ets])],
- {NumberOfGCs, WordsReclaimed, _} = statistics(garbage_collection),
- {{input, Input}, {output, Output}} = statistics(io),
- {CF, CDU} = db_pid_stats(),
- MessageQueues0 = [{couch_file, {CF}}, {couch_db_updater, {CDU}}],
- MessageQueues = MessageQueues0 ++ message_queues(registered()),
- [
- {uptime, couch_app:uptime() div 1000},
- {memory, {Memory}},
- {run_queue, statistics(run_queue)},
- {ets_table_count, length(ets:all())},
- {context_switches, element(1, statistics(context_switches))},
- {reductions, element(1, statistics(reductions))},
- {garbage_collection_count, NumberOfGCs},
- {words_reclaimed, WordsReclaimed},
- {io_input, Input},
- {io_output, Output},
- {os_proc_count, couch_proc_manager:get_proc_count()},
- {stale_proc_count, couch_proc_manager:get_stale_proc_count()},
- {process_count, erlang:system_info(process_count)},
- {process_limit, erlang:system_info(process_limit)},
- {message_queues, {MessageQueues}},
- {internal_replication_jobs, mem3_sync:get_backlog()},
- {distribution, {get_distribution_stats()}}
- ].
-
-db_pid_stats() ->
- {monitors, M} = process_info(whereis(couch_stats_process_tracker), monitors),
- Candidates = [Pid || {process, Pid} <- M],
- CouchFiles = db_pid_stats(couch_file, Candidates),
- CouchDbUpdaters = db_pid_stats(couch_db_updater, Candidates),
- {CouchFiles, CouchDbUpdaters}.
-
-db_pid_stats(Mod, Candidates) ->
- Mailboxes = lists:foldl(
- fun(Pid, Acc) ->
- case process_info(Pid, [message_queue_len, dictionary]) of
- undefined ->
- Acc;
- PI ->
- Dictionary = proplists:get_value(dictionary, PI, []),
- case proplists:get_value('$initial_call', Dictionary) of
- {Mod, init, 1} ->
- case proplists:get_value(message_queue_len, PI) of
- undefined -> Acc;
- Len -> [Len|Acc]
- end;
- _ ->
- Acc
- end
- end
- end, [], Candidates
- ),
- format_pid_stats(Mailboxes).
-
-format_pid_stats([]) ->
- [];
-format_pid_stats(Mailboxes) ->
- Sorted = lists:sort(Mailboxes),
- Count = length(Sorted),
- [
- {count, Count},
- {min, hd(Sorted)},
- {max, lists:nth(Count, Sorted)},
- {'50', lists:nth(round(Count * 0.5), Sorted)},
- {'90', lists:nth(round(Count * 0.9), Sorted)},
- {'99', lists:nth(round(Count * 0.99), Sorted)}
- ].
-
-get_distribution_stats() ->
- lists:map(fun({Node, Socket}) ->
- {ok, Stats} = inet:getstat(Socket),
- {Node, {Stats}}
- end, erlang:system_info(dist_ctrl)).
-
handle_up_req(#httpd{method='GET'} = Req) ->
case config:get("couchdb", "maintenance_mode") of
"true" ->
@@ -498,14 +282,7 @@ handle_up_req(#httpd{method='GET'} = Req) ->
handle_up_req(Req) ->
send_method_not_allowed(Req, "GET,HEAD").
-message_queues(Registered) ->
- lists:map(fun(Name) ->
- Type = message_queue_len,
- {Type, Length} = process_info(whereis(Name), Type),
- {Name, Length}
- end, Registered).
-
get_docroot() ->
- % if the env var isn’t set, let’s not throw an error, but
+ % if the env var isn't set, let's not throw an error, but
% assume the current working dir is what we want
os:getenv("COUCHDB_FAUXTON_DOCROOT", "").
diff --git a/src/chttpd/src/chttpd_node.erl b/src/chttpd/src/chttpd_node.erl
new file mode 100644
index 000000000..74ff99b82
--- /dev/null
+++ b/src/chttpd/src/chttpd_node.erl
@@ -0,0 +1,321 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_node).
+
+-export([
+ handle_node_req/1,
+ do_db_req/4,
+ get_stats/0
+]).
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl").
+
+-import(chttpd,
+ [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
+ send_error/2,send_chunk/2,start_chunked_response/3]).
+
+% Node-specific (unclustered) request handlers
+
+% Support _local meaning this node
+handle_node_req(#httpd{path_parts=[_, <<"_local">>]}=Req) ->
+ send_json(Req, 200, {[{name, node()}]});
+handle_node_req(#httpd{path_parts=[A, <<"_local">>|Rest]}=Req) ->
+ handle_node_req(Req#httpd{path_parts=[A, node()] ++ Rest});
+
+% GET /_node/$node/_config
+handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>]}=Req) ->
+ Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) ->
+ case dict:is_key(Section, Acc) of
+ true ->
+ dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
+ false ->
+ dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
+ end
+ end, dict:new(), call_node(Node, config, all, [])),
+ KVs = dict:fold(fun(Section, Values, Acc) ->
+ [{list_to_binary(Section), {Values}} | Acc]
+ end, [], Grouped),
+ send_json(Req, 200, {KVs});
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>]}=Req) ->
+ send_method_not_allowed(Req, "GET");
+% GET /_node/$node/_config/Section
+handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>, Section]}=Req) ->
+ KVs = [{list_to_binary(Key), list_to_binary(Value)}
+ || {Key, Value} <- call_node(Node, config, get, [Section])],
+ send_json(Req, 200, {KVs});
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section]}=Req) ->
+ send_method_not_allowed(Req, "GET");
+% PUT /_node/$node/_config/Section/Key
+% "value"
+handle_node_req(#httpd{method='PUT', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
+ couch_util:check_config_blacklist(Section),
+ Value = couch_util:trim(chttpd:json_body(Req)),
+ Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
+ OldValue = call_node(Node, config, get, [Section, Key, ""]),
+ case call_node(Node, config, set, [Section, Key, ?b2l(Value), Persist]) of
+ ok ->
+ send_json(Req, 200, list_to_binary(OldValue));
+ {error, Reason} ->
+ send_error(Req, {bad_request, Reason})
+ end;
+% GET /_node/$node/_config/Section/Key
+handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
+ case call_node(Node, config, get, [Section, Key, undefined]) of
+ undefined ->
+ throw({not_found, unknown_config_value});
+ Value ->
+ send_json(Req, 200, list_to_binary(Value))
+ end;
+% DELETE /_node/$node/_config/Section/Key
+handle_node_req(#httpd{method='DELETE',path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
+ couch_util:check_config_blacklist(Section),
+ Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
+ case call_node(Node, config, get, [Section, Key, undefined]) of
+ undefined ->
+ throw({not_found, unknown_config_value});
+ OldValue ->
+ case call_node(Node, config, delete, [Section, Key, Persist]) of
+ ok ->
+ send_json(Req, 200, list_to_binary(OldValue));
+ {error, Reason} ->
+ send_error(Req, {bad_request, Reason})
+ end
+ end;
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section, _Key]}=Req) ->
+ send_method_not_allowed(Req, "GET,PUT,DELETE");
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section, _Key | _]}=Req) ->
+ send_error(Req, not_found);
+
+% GET /_node/$node/_stats
+handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_stats">> | Path]}=Req) ->
+ flush(Node, Req),
+ Stats0 = call_node(Node, couch_stats, fetch, []),
+ Stats = couch_stats_httpd:transform_stats(Stats0),
+ Nested = couch_stats_httpd:nest(Stats),
+ EJSON0 = couch_stats_httpd:to_ejson(Nested),
+ EJSON1 = couch_stats_httpd:extract_path(Path, EJSON0),
+ send_json(Req, EJSON1);
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_stats">>]}=Req) ->
+ send_method_not_allowed(Req, "GET");
+
+% GET /_node/$node/_system
+handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_system">>]}=Req) ->
+ Stats = call_node(Node, chttpd_node, get_stats, []),
+ EJSON = couch_stats_httpd:to_ejson(Stats),
+ send_json(Req, EJSON);
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_system">>]}=Req) ->
+ send_method_not_allowed(Req, "GET");
+
+% POST /_node/$node/_restart
+handle_node_req(#httpd{method='POST', path_parts=[_, Node, <<"_restart">>]}=Req) ->
+ call_node(Node, init, restart, []),
+ send_json(Req, 200, {[{ok, true}]});
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_restart">>]}=Req) ->
+ send_method_not_allowed(Req, "POST");
+
+% GET /_node/$node/_all_dbs
+handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_all_dbs">>]}=Req) ->
+ {ok, DbNames} = call_node(Node, couch_server, all_databases, []),
+ send_json(Req, DbNames);
+handle_node_req(#httpd{path_parts=[_, _Node, <<"_all_dbs">>]}=Req) ->
+ send_method_not_allowed(Req, "GET");
+
+% /_node/$node/{db} and /_node/$node/{db}/...
+handle_node_req(#httpd{path_parts=[_, _Node]}=Req) ->
+ send_error(Req, {bad_request, <<"Incomplete path to _node request">>});
+handle_node_req(#httpd{path_parts=[_, Node | PathParts],
+ mochi_req=MochiReq0}=Req0) ->
+ % strip /_node/{node} from Req0 before descending further
+ RawUri = MochiReq0:get(raw_path),
+ {_, Query, Fragment} = mochiweb_util:urlsplit_path(RawUri),
+ NewPath0 = ?l2b("/" ++ string:join([?b2l(P) || P <- PathParts], [$\/])),
+ NewRawPath = mochiweb_util:urlunsplit_path({NewPath0, Query, Fragment}),
+ MochiReq = mochiweb_request:new(MochiReq0:get(socket),
+ % MochiReq:get(opts),
+ MochiReq0:get(method),
+ NewRawPath,
+ MochiReq0:get(version),
+ MochiReq0:get(headers)),
+ Req = Req0#httpd{
+ mochi_req = MochiReq,
+ path_parts = PathParts
+ },
+ handle_node_db_req(Req, Node);
+
+% Abnormal _node requests follow
+handle_node_req(#httpd{path_parts=[_]}=Req) ->
+ send_error(Req, {bad_request, <<"Incomplete path to _node request">>});
+handle_node_req(Req) ->
+ send_error(Req, not_found).
+
+
+% Unclustered system db or shard requests
+handle_node_db_req(#httpd{method=Method,
+ path_parts=[DbName|RestParts],
+ user_ctx=Ctx}=Req,
+ Node) ->
+ chttpd:verify_is_server_admin(Req),
+ %DbsDbName = config:get("mem3", "shards_db", "_dbs"),
+ %NodesDbName = config:get("mem3", "nodes_db", "_nodes"),
+ case {Method, DbName, RestParts} of
+ {'GET', DbName, []} ->
+ case call_node(Node, chttpd_node, do_db_req,
+ [Ctx, DbName, couch_db, get_db_info]) of
+ {ok, DbInfo} ->
+ send_json(Req, {DbInfo});
+ {error, {_, _}=Error} ->
+ send_error(Req, Error);
+ {_, _}=Error ->
+ send_error(Req, Error)
+ end;
+ {_, DbName, []} ->
+ send_method_not_allowed(Req, "GET");
+ %{'GET', DbName, [<<"_all_docs">>]} ->
+ % ...
+ %{'POST', DbName, [<<"_compact">>]} ->
+ % ...
+ %{_, DbName, [DocName]} ->
+ % %only support doc CRUD in _dbs/_nodes, and _info endpoint on all
+ %{'GET', DbName, [<<"_design">>, DDoc, <<"_info">>]} ->
+ % %individual view shard info stats
+ {_, _, _} ->
+ send_error(Req, {bad_request, <<"invalid _node request">>})
+ end.
+
+% below adapted from old couch_httpd_db
+% all of these run on the requested node
+do_db_req(Ctx, DbName, Mod, Fun) ->
+ case couch_db:open(DbName, [{user_ctx, Ctx}]) of
+ {ok, Db} ->
+ try
+ erlang:apply(Mod, Fun, [Db])
+ after
+ catch couch_db:close(Db)
+ end;
+ Error ->
+ throw(Error)
+ end.
+
+
+call_node(Node0, Mod, Fun, Args) when is_binary(Node0) ->
+ Node1 = try
+ list_to_existing_atom(?b2l(Node0))
+ catch
+ error:badarg ->
+ throw({not_found, <<"no such node: ", Node0/binary>>})
+ end,
+ call_node(Node1, Mod, Fun, Args);
+call_node(Node, Mod, Fun, Args) when is_atom(Node) ->
+ couch_log:error("In call_node ~p ~p ~p ~p\n", [Node, Mod, Fun, Args]),
+ case rpc:call(Node, Mod, Fun, Args) of
+ {badrpc, nodedown} ->
+ Reason = ?l2b(io_lib:format("~s is down", [Node])),
+ throw({error, {nodedown, Reason}});
+ Else ->
+ Else
+ end.
+
+flush(Node, Req) ->
+ case couch_util:get_value("flush", chttpd:qs(Req)) of
+ "true" ->
+ call_node(Node, couch_stats_aggregator, flush, []);
+ _Else ->
+ ok
+ end.
+
+get_stats() ->
+ Other = erlang:memory(system) - lists:sum([X || {_,X} <-
+ erlang:memory([atom, code, binary, ets])]),
+ Memory = [{other, Other} | erlang:memory([atom, atom_used, processes,
+ processes_used, binary, code, ets])],
+ {NumberOfGCs, WordsReclaimed, _} = statistics(garbage_collection),
+ {{input, Input}, {output, Output}} = statistics(io),
+ {CF, CDU} = db_pid_stats(),
+ MessageQueues0 = [{couch_file, {CF}}, {couch_db_updater, {CDU}}],
+ MessageQueues = MessageQueues0 ++ message_queues(registered()),
+ [
+ {uptime, couch_app:uptime() div 1000},
+ {memory, {Memory}},
+ {run_queue, statistics(run_queue)},
+ {ets_table_count, length(ets:all())},
+ {context_switches, element(1, statistics(context_switches))},
+ {reductions, element(1, statistics(reductions))},
+ {garbage_collection_count, NumberOfGCs},
+ {words_reclaimed, WordsReclaimed},
+ {io_input, Input},
+ {io_output, Output},
+ {os_proc_count, couch_proc_manager:get_proc_count()},
+ {stale_proc_count, couch_proc_manager:get_stale_proc_count()},
+ {process_count, erlang:system_info(process_count)},
+ {process_limit, erlang:system_info(process_limit)},
+ {message_queues, {MessageQueues}},
+ {internal_replication_jobs, mem3_sync:get_backlog()},
+ {distribution, {get_distribution_stats()}}
+ ].
+
+db_pid_stats() ->
+ {monitors, M} = process_info(whereis(couch_stats_process_tracker), monitors),
+ Candidates = [Pid || {process, Pid} <- M],
+ CouchFiles = db_pid_stats(couch_file, Candidates),
+ CouchDbUpdaters = db_pid_stats(couch_db_updater, Candidates),
+ {CouchFiles, CouchDbUpdaters}.
+
+db_pid_stats(Mod, Candidates) ->
+ Mailboxes = lists:foldl(
+ fun(Pid, Acc) ->
+ case process_info(Pid, [message_queue_len, dictionary]) of
+ undefined ->
+ Acc;
+ PI ->
+ Dictionary = proplists:get_value(dictionary, PI, []),
+ case proplists:get_value('$initial_call', Dictionary) of
+ {Mod, init, 1} ->
+ case proplists:get_value(message_queue_len, PI) of
+ undefined -> Acc;
+ Len -> [Len|Acc]
+ end;
+ _ ->
+ Acc
+ end
+ end
+ end, [], Candidates
+ ),
+ format_pid_stats(Mailboxes).
+
+format_pid_stats([]) ->
+ [];
+format_pid_stats(Mailboxes) ->
+ Sorted = lists:sort(Mailboxes),
+ Count = length(Sorted),
+ [
+ {count, Count},
+ {min, hd(Sorted)},
+ {max, lists:nth(Count, Sorted)},
+ {'50', lists:nth(round(Count * 0.5), Sorted)},
+ {'90', lists:nth(round(Count * 0.9), Sorted)},
+ {'99', lists:nth(round(Count * 0.99), Sorted)}
+ ].
+
+get_distribution_stats() ->
+ lists:map(fun({Node, Socket}) ->
+ {ok, Stats} = inet:getstat(Socket),
+ {Node, {Stats}}
+ end, erlang:system_info(dist_ctrl)).
+
+message_queues(Registered) ->
+ lists:map(fun(Name) ->
+ Type = message_queue_len,
+ {Type, Length} = process_info(whereis(Name), Type),
+ {Name, Length}
+ end, Registered).
diff --git a/src/couch/src/couch.app.src b/src/couch/src/couch.app.src
index 208938745..cac6c04f5 100644
--- a/src/couch/src/couch.app.src
+++ b/src/couch/src/couch.app.src
@@ -61,8 +61,7 @@
{"_uuids", "{couch_httpd_misc_handlers, handle_uuids_req}"},
{"_stats", "{couch_stats_httpd, handle_stats_req}"},
{"_session", "{couch_httpd_auth, handle_session_req}"},
- {"_plugins", "{couch_plugins_httpd, handle_req}"},
- {"_system", "{chttpd_misc, handle_system_req}"}
+ {"_plugins", "{couch_plugins_httpd, handle_req}"}
]},
{ httpd_db_handlers, [
{"_all_docs", "{couch_mrview_http, handle_all_docs_req}"},
diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
index 3cdfc0ca3..28975abef 100644
--- a/src/couch/src/couch_httpd.erl
+++ b/src/couch/src/couch_httpd.erl
@@ -37,6 +37,7 @@
-export([validate_host/1]).
-export([validate_bind_address/1]).
-export([check_max_request_length/1]).
+-export([handle_request/1]).
-define(HANDLER_NAME_IN_MODULE_POS, 6).
@@ -104,38 +105,14 @@ start_link(Name, Options) ->
Else -> Else
end,
ok = validate_bind_address(BindAddress),
- DefaultFun = make_arity_1_fun("{couch_httpd_db, handle_request}"),
-
- {ok, HttpdGlobalHandlers} = application:get_env(httpd_global_handlers),
-
- UrlHandlersList = lists:map(
- fun({UrlKey, SpecStr}) ->
- {?l2b(UrlKey), make_arity_1_fun(SpecStr)}
- end, HttpdGlobalHandlers),
-
- {ok, HttpdDbHandlers} = application:get_env(httpd_db_handlers),
- DbUrlHandlersList = lists:map(
- fun({UrlKey, SpecStr}) ->
- {?l2b(UrlKey), make_arity_2_fun(SpecStr)}
- end, HttpdDbHandlers),
-
- {ok, HttpdDesignHandlers} = application:get_env(httpd_design_handlers),
-
- DesignUrlHandlersList = lists:map(
- fun({UrlKey, SpecStr}) ->
- {?l2b(UrlKey), make_arity_3_fun(SpecStr)}
- end, HttpdDesignHandlers),
-
- UrlHandlers = dict:from_list(UrlHandlersList),
- DbUrlHandlers = dict:from_list(DbUrlHandlersList),
- DesignUrlHandlers = dict:from_list(DesignUrlHandlersList),
{ok, ServerOptions} = couch_util:parse_term(
config:get("httpd", "server_options", "[]")),
{ok, SocketOptions} = couch_util:parse_term(
config:get("httpd", "socket_options", "[]")),
set_auth_handlers(),
+ Handlers = get_httpd_handlers(),
% ensure uuid is set so that concurrent replications
% get the same value.
@@ -148,9 +125,7 @@ start_link(Name, Options) ->
_ ->
ok = mochiweb_socket:setopts(Req:get(socket), SocketOptions)
end,
- apply(?MODULE, handle_request, [
- Req, DefaultFun, UrlHandlers, DbUrlHandlers, DesignUrlHandlers
- ])
+ apply(?MODULE, handle_request, [Req] ++ Handlers)
end,
% set mochiweb options
@@ -187,6 +162,34 @@ set_auth_handlers() ->
auth_handler_name(SpecStr) ->
lists:nth(?HANDLER_NAME_IN_MODULE_POS, re:split(SpecStr, "[\\W_]", [])).
+get_httpd_handlers() ->
+ {ok, HttpdGlobalHandlers} = application:get_env(httpd_global_handlers),
+
+ UrlHandlersList = lists:map(
+ fun({UrlKey, SpecStr}) ->
+ {?l2b(UrlKey), make_arity_1_fun(SpecStr)}
+ end, HttpdGlobalHandlers),
+
+ {ok, HttpdDbHandlers} = application:get_env(httpd_db_handlers),
+
+ DbUrlHandlersList = lists:map(
+ fun({UrlKey, SpecStr}) ->
+ {?l2b(UrlKey), make_arity_2_fun(SpecStr)}
+ end, HttpdDbHandlers),
+
+ {ok, HttpdDesignHandlers} = application:get_env(httpd_design_handlers),
+
+ DesignUrlHandlersList = lists:map(
+ fun({UrlKey, SpecStr}) ->
+ {?l2b(UrlKey), make_arity_3_fun(SpecStr)}
+ end, HttpdDesignHandlers),
+
+ UrlHandlers = dict:from_list(UrlHandlersList),
+ DbUrlHandlers = dict:from_list(DbUrlHandlersList),
+ DesignUrlHandlers = dict:from_list(DesignUrlHandlersList),
+ DefaultFun = make_arity_1_fun("{couch_httpd_db, handle_request}"),
+ [DefaultFun, UrlHandlers, DbUrlHandlers, DesignUrlHandlers].
+
% SpecStr is a string like "{my_module, my_fun}"
% or "{my_module, my_fun, <<"my_arg">>}"
make_arity_1_fun(SpecStr) ->
@@ -217,6 +220,14 @@ make_arity_3_fun(SpecStr) ->
make_fun_spec_strs(SpecStr) ->
re:split(SpecStr, "(?<=})\\s*,\\s*(?={)", [{return, list}]).
+handle_request(MochiReq) ->
+ %[DefaultFun, UrlHandlers, DbUrlHandlers, DesignUrlHandlers] = get_httpd_handlers(),
+ DefaultFun = make_arity_1_fun("{couch_httpd_db, handle_request}"),
+ EmptyDict = dict:new(),
+% handle_request(MochiReq, DefaultFun, UrlHandlers, DbUrlHandlers,
+% DesignUrlHandlers).
+ handle_request(MochiReq, DefaultFun, EmptyDict, EmptyDict, EmptyDict).
+
handle_request(MochiReq, DefaultFun, UrlHandlers, DbUrlHandlers,
DesignUrlHandlers) ->
%% reset rewrite count for new request