summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Lehnardt <jan@apache.org>2018-11-09 14:59:45 +0100
committerGitHub <noreply@github.com>2018-11-09 14:59:45 +0100
commit064e636d4ebe972af5ecd5708ec4f0a7975a7e58 (patch)
tree438c0e3f2418416650417a9c8eea1b8ed5e70b8a
parent4dd0d31246d6c4aebb82473fc32c6149704a64dc (diff)
parentc850ffa889dc1f8bf562107aa07da429357bc8ef (diff)
downloadcouchdb-chore/dep.tar.gz
Merge branch 'master' into chore/depchore/dep
-rwxr-xr-xdev/run24
-rw-r--r--rel/files/couchdb.cmd.in (renamed from rel/overlay/bin/couchdb.cmd)3
-rwxr-xr-xrel/files/couchdb.in (renamed from rel/overlay/bin/couchdb)4
-rw-r--r--rel/overlay/etc/default.ini55
-rw-r--r--rel/overlay/etc/local.ini6
-rw-r--r--rel/reltool.config4
-rw-r--r--src/chttpd/test/chttpd_view_test.erl1
-rw-r--r--src/couch/src/couch.app.src36
-rw-r--r--src/couch/src/couch_httpd.erl17
-rw-r--r--src/couch/src/couch_httpd_proxy.erl428
-rw-r--r--src/couch/src/couch_httpd_vhost.erl16
-rw-r--r--src/couch/src/couch_os_daemons.erl394
-rw-r--r--src/couch/src/couch_proc_manager.erl46
-rw-r--r--src/couch/src/couch_secondary_sup.erl32
-rw-r--r--src/couch/test/couchdb_http_proxy_tests.erl456
-rw-r--r--src/couch/test/couchdb_os_daemons_tests.erl259
-rw-r--r--src/couch/test/couchdb_os_proc_pool.erl7
-rw-r--r--test/javascript/tests/config.js3
18 files changed, 153 insertions, 1638 deletions
diff --git a/dev/run b/dev/run
index 5ab895eb1..5bf5fc0f5 100755
--- a/dev/run
+++ b/dev/run
@@ -273,16 +273,6 @@ def boot_haproxy(ctx):
def hack_default_ini(ctx, node, contents):
- # Replace couchjs command
- couchjs = os.path.join(ctx['rootdir'], "src", "couch", "priv", "couchjs")
- mainjs = os.path.join(ctx['rootdir'], "share", "server", "main.js")
- coffeejs = os.path.join(ctx['rootdir'], "share", "server", "main-coffee.js")
-
- repl = toposixpath("javascript = %s %s" % (couchjs, mainjs))
- contents = re.sub("(?m)^javascript.*$", repl, contents)
-
- repl = toposixpath("coffeescript = %s %s" % (couchjs, coffeejs))
- contents = re.sub("(?m)^coffeescript.*$", repl, contents)
if ctx['enable_erlang_views']:
contents = re.sub(
@@ -413,15 +403,29 @@ def check_node_alive(url):
if error is not None:
raise error
+def set_boot_env(ctx):
+
+ # fudge default query server paths
+ couchjs = os.path.join(ctx['rootdir'], "src", "couch", "priv", "couchjs")
+ mainjs = os.path.join(ctx['rootdir'], "share", "server", "main.js")
+ coffeejs = os.path.join(ctx['rootdir'], "share", "server", "main-coffee.js")
+
+ qs_javascript = toposixpath("%s %s" % (couchjs, mainjs))
+ qs_coffescript = toposixpath("%s %s" % (couchjs, coffeejs))
+
+ os.environ['COUCHDB_QUERY_SERVER_JAVASCRIPT'] = qs_javascript
+ os.environ['COUCHDB_QUERY_SERVER_COFFEESCRIPT'] = qs_coffescript
@log('Start node {node}')
def boot_node(ctx, node):
erl_libs = os.path.join(ctx['rootdir'], "src")
+ set_boot_env(ctx)
env = os.environ.copy()
env["ERL_LIBS"] = os.pathsep.join([erl_libs])
node_etcdir = os.path.join(ctx['devdir'], "lib", node, "etc")
reldir = os.path.join(ctx['rootdir'], "rel")
+
cmd = [
"erl",
"-args_file", os.path.join(node_etcdir, "vm.args"),
diff --git a/rel/overlay/bin/couchdb.cmd b/rel/files/couchdb.cmd.in
index 5e5f2cfe6..9438872c6 100644
--- a/rel/overlay/bin/couchdb.cmd
+++ b/rel/files/couchdb.cmd.in
@@ -25,6 +25,9 @@ set EMU=beam
set PROGNAME=%~n0
set PATH=%PATH%;%COUCHDB_BIN_DIR%
+set COUCHDB_QUERY_SERVER_JAVASCRIPT="{{prefix}}/bin/couchjs {{prefix}}/share/server/main.js"
+set COUCHDB_QUERY_SERVER_COFFEESCRIPT="{{prefix}}/bin/couchjs {{prefix}}/share/server/main-coffee.js"
+
"%BINDIR%\erl" -boot "%ROOTDIR%\releases\%APP_VSN%\couchdb" ^
-args_file "%ROOTDIR%\etc\vm.args" ^
-config "%ROOTDIR%\releases\%APP_VSN%\sys.config" %*
diff --git a/rel/overlay/bin/couchdb b/rel/files/couchdb.in
index a9e6e9bea..aae179aa7 100755
--- a/rel/overlay/bin/couchdb
+++ b/rel/files/couchdb.in
@@ -26,10 +26,12 @@ export BINDIR="$ROOTDIR/erts-$ERTS_VSN/bin"
export EMU=beam
export PROGNAME=`echo $0 | sed 's/.*\///'`
+export COUCHDB_QUERY_SERVER_JAVASCRIPT="{{prefix}}/bin/couchjs {{prefix}}/share/server/main.js"
+export COUCHDB_QUERY_SERVER_COFFEESCRIPT="{{prefix}}/bin/couchjs {{prefix}}/share/server/main-coffee.js"
+
ARGS_FILE="${COUCHDB_ARGS_FILE:-$ROOTDIR/etc/vm.args}"
SYSCONFIG_FILE="${COUCHDB_SYSCONFIG_FILE:-$ROOTDIR/releases/$APP_VSN/sys.config}"
exec "$BINDIR/erlexec" -boot "$ROOTDIR/releases/$APP_VSN/couchdb" \
-args_file "${ARGS_FILE}" \
-config "${SYSCONFIG_FILE}" "$@"
-
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index ba2a498eb..f384de3aa 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -132,9 +132,7 @@ database_prefix = userdb-
port = {{backend_port}}
bind_address = 127.0.0.1
authentication_handlers = {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
-default_handler = {couch_httpd_db, handle_request}
secure_rewrites = true
-vhost_global_handlers = _utils, _uuids, _session, _users
allow_jsonp = false
; Options for the MochiWeb HTTP server.
;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
@@ -262,13 +260,9 @@ credentials = false
; List of hosts separated by a comma. * means accept all
; hosts =
-[query_servers]
-javascript = {{prefix}}/bin/couchjs {{prefix}}/share/server/main.js
-coffeescript = {{prefix}}/bin/couchjs {{prefix}}/share/server/main-coffee.js
-
-; enable mango query engine
[native_query_servers]
-query = {mango_native_proc, start_link, []}
+; erlang query server
+; enable_erlang_query_server = false
; Changing reduce_limit to false will disable reduce_limit.
; If you think you're hitting reduce_limit with a "good" reduce function,
@@ -283,16 +277,6 @@ os_process_limit = 100
; "infinity" is also a valid configuration value.
;group_info_timeout = 5000
-[daemons]
-index_server={couch_index_server, start_link, []}
-query_servers={couch_proc_manager, start_link, []}
-vhosts={couch_httpd_vhost, start_link, []}
-httpd={couch_httpd, start_link, []}
-uuids={couch_uuids, start, []}
-auth_cache={couch_auth_cache, start_link, []}
-os_daemons={couch_os_daemons, start_link, []}
-compaction_daemon={couch_compaction_daemon, start_link, []}
-
[mango]
; Set to true to disable the "index all fields" text index, which can lead
; to out of memory issues when users have documents with nested array fields.
@@ -303,41 +287,6 @@ compaction_daemon={couch_compaction_daemon, start_link, []}
[indexers]
couch_mrview = true
-[httpd_global_handlers]
-/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>}
-favicon.ico = {couch_httpd_misc_handlers, handle_favicon_req, "{{prefix}}/share/www"}
-
-_utils = {couch_httpd_misc_handlers, handle_utils_dir_req, "{{prefix}}/share/www"}
-_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req}
-_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req}
-_config = {couch_httpd_misc_handlers, handle_config_req}
-_replicate = {couch_replicator_httpd, handle_req}
-_uuids = {couch_httpd_misc_handlers, handle_uuids_req}
-_stats = {couch_stats_httpd, handle_stats_req}
-_session = {couch_httpd_auth, handle_session_req}
-_plugins = {couch_plugins_httpd, handle_req}
-_system = {chttpd_misc, handle_system_req}
-
-[httpd_db_handlers]
-_all_docs = {couch_mrview_http, handle_all_docs_req}
-_local_docs = {couch_mrview_http, handle_local_docs_req}
-_design_docs = {couch_mrview_http, handle_design_docs_req}
-_changes = {couch_httpd_db, handle_db_changes_req}
-_compact = {couch_httpd_db, handle_compact_req}
-_design = {couch_httpd_db, handle_design_req}
-_temp_view = {couch_mrview_http, handle_temp_view_req}
-_view_cleanup = {couch_mrview_http, handle_cleanup_req}
-
-[httpd_design_handlers]
-_compact = {couch_mrview_http, handle_compact_req}
-_info = {couch_mrview_http, handle_info_req}
-_list = {couch_mrview_show, handle_view_list_req}
-_rewrite = {couch_httpd_rewrite, handle_rewrite_req}
-_show = {couch_mrview_show, handle_doc_show_req}
-_update = {couch_mrview_show, handle_doc_update_req}
-_view = {couch_mrview_http, handle_view_req}
-_view_changes = {couch_mrview_http, handle_view_changes_req}
-
[uuids]
; Known algorithms:
; random - 128 bits of random awesome
diff --git a/rel/overlay/etc/local.ini b/rel/overlay/etc/local.ini
index ea5467c9a..ecc97f466 100644
--- a/rel/overlay/etc/local.ini
+++ b/rel/overlay/etc/local.ini
@@ -52,12 +52,8 @@
; Basic realm="server" in order to prevent you getting logged out.
; require_valid_user = false
-[daemons]
-; enable SSL support by uncommenting the following line and supply the PEM's below.
-; the default ssl port CouchDB listens on is 6984
-; httpsd = {chttpd, start_link, [https]}
-
[ssl]
+;enable = true
;cert_file = /full/path/to/server_cert.pem
;key_file = /full/path/to/server_key.pem
;password = somepassword
diff --git a/rel/reltool.config b/rel/reltool.config
index 2c55d0900..bf4ae448b 100644
--- a/rel/reltool.config
+++ b/rel/reltool.config
@@ -125,5 +125,7 @@
{copy, "files/sys.config", "releases/\{\{rel_vsn\}\}/sys.config"},
{copy, "files/vm.args", "releases/\{\{rel_vsn\}\}/vm.args"},
{template, "overlay/etc/default.ini", "etc/default.ini"},
- {template, "overlay/etc/vm.args", "etc/vm.args"}
+ {template, "overlay/etc/vm.args", "etc/vm.args"},
+ {template, "files/couchdb.in", "bin/couchdb"},
+ {template, "files/couchdb.cmd.in", "bin/couchdb.cmd"}
]}.
diff --git a/src/chttpd/test/chttpd_view_test.erl b/src/chttpd/test/chttpd_view_test.erl
index 3457c6f30..114eb089b 100644
--- a/src/chttpd/test/chttpd_view_test.erl
+++ b/src/chttpd/test/chttpd_view_test.erl
@@ -31,6 +31,7 @@ setup() ->
TmpDb = ?tempdb(),
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(chttpd, port),
+ os:putenv("COUCHDB_QUERY_SERVER_JAVASCRIPT", "../../../bin/couchjs ../../../share/server/main.js"),
Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
create_db(Url),
Url.
diff --git a/src/couch/src/couch.app.src b/src/couch/src/couch.app.src
index 6af213624..208938745 100644
--- a/src/couch/src/couch.app.src
+++ b/src/couch/src/couch.app.src
@@ -48,5 +48,41 @@
ioq,
couch_stats,
hyper
+ ]},
+ {env, [
+ { httpd_global_handlers, [
+ {"/", "{couch_httpd_misc_handlers, handle_welcome_req, <<\"Welcome\">>}"},
+ {"favicon.ico", "{couch_httpd_misc_handlers, handle_favicon_req, \"{{prefix}}/share/www\"}"},
+ {"_utils", "{couch_httpd_misc_handlers, handle_utils_dir_req, \"{{prefix}}/share/www\"}"},
+ {"_all_dbs", "{couch_httpd_misc_handlers, handle_all_dbs_req}"},
+ {"_active_tasks", "{couch_httpd_misc_handlers, handle_task_status_req}"},
+ {"_config", "{couch_httpd_misc_handlers, handle_config_req}"},
+ {"_replicate", "{couch_replicator_httpd, handle_req}"},
+ {"_uuids", "{couch_httpd_misc_handlers, handle_uuids_req}"},
+ {"_stats", "{couch_stats_httpd, handle_stats_req}"},
+ {"_session", "{couch_httpd_auth, handle_session_req}"},
+ {"_plugins", "{couch_plugins_httpd, handle_req}"},
+ {"_system", "{chttpd_misc, handle_system_req}"}
+ ]},
+ { httpd_db_handlers, [
+ {"_all_docs", "{couch_mrview_http, handle_all_docs_req}"},
+ {"_local_docs", "{couch_mrview_http, handle_local_docs_req}"},
+ {"_design_docs", "{couch_mrview_http, handle_design_docs_req}"},
+ {"_changes", "{couch_httpd_db, handle_db_changes_req}"},
+ {"_compact", "{couch_httpd_db, handle_compact_req}"},
+ {"_design", "{couch_httpd_db, handle_design_req}"},
+ {"_temp_view", "{couch_mrview_http, handle_temp_view_req}"},
+ {"_view_cleanup", "{couch_mrview_http, handle_cleanup_req}"}
+ ]},
+ { httpd_design_handlers, [
+ {"_compact", "{couch_mrview_http, handle_compact_req}"},
+ {"_info", "{couch_mrview_http, handle_info_req}"},
+ {"_list", "{couch_mrview_show, handle_view_list_req}"},
+ {"_rewrite", "{couch_httpd_rewrite, handle_rewrite_req}"},
+ {"_show", "{couch_mrview_show, handle_doc_show_req}"},
+ {"_update", "{couch_mrview_show, handle_doc_update_req}"},
+ {"_view", "{couch_mrview_http, handle_view_req}"},
+ {"_view_changes", "{couch_mrview_http, handle_view_changes_req}"}
+ ]}
]}
]}.
diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
index e66a78e70..861fd58c4 100644
--- a/src/couch/src/couch_httpd.erl
+++ b/src/couch/src/couch_httpd.erl
@@ -104,25 +104,28 @@ start_link(Name, Options) ->
Else -> Else
end,
ok = validate_bind_address(BindAddress),
- DefaultSpec = "{couch_httpd_db, handle_request}",
- DefaultFun = make_arity_1_fun(
- config:get("httpd", "default_handler", DefaultSpec)
- ),
+ DefaultFun = make_arity_1_fun("{couch_httpd_db, handle_request}"),
+
+ {ok, HttpdGlobalHandlers} = application:get_env(httpd_global_handlers),
UrlHandlersList = lists:map(
fun({UrlKey, SpecStr}) ->
{?l2b(UrlKey), make_arity_1_fun(SpecStr)}
- end, config:get("httpd_global_handlers")),
+ end, HttpdGlobalHandlers),
+
+ {ok, HttpdDbHandlers} = application:get_env(httpd_db_handlers),
DbUrlHandlersList = lists:map(
fun({UrlKey, SpecStr}) ->
{?l2b(UrlKey), make_arity_2_fun(SpecStr)}
- end, config:get("httpd_db_handlers")),
+ end, HttpdDbHandlers),
+
+ {ok, HttpdDesignHandlers} = application:get_env(httpd_design_handlers),
DesignUrlHandlersList = lists:map(
fun({UrlKey, SpecStr}) ->
{?l2b(UrlKey), make_arity_3_fun(SpecStr)}
- end, config:get("httpd_design_handlers")),
+ end, HttpdDesignHandlers),
UrlHandlers = dict:from_list(UrlHandlersList),
DbUrlHandlers = dict:from_list(DbUrlHandlersList),
diff --git a/src/couch/src/couch_httpd_proxy.erl b/src/couch/src/couch_httpd_proxy.erl
deleted file mode 100644
index d2c7acc3a..000000000
--- a/src/couch/src/couch_httpd_proxy.erl
+++ /dev/null
@@ -1,428 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(couch_httpd_proxy).
-
--compile(tuple_calls).
-
--export([handle_proxy_req/2]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("ibrowse/include/ibrowse.hrl").
-
--define(TIMEOUT, infinity).
--define(PKT_SIZE, 4096).
-
-
-handle_proxy_req(Req, ProxyDest) ->
- Method = get_method(Req),
- Url = get_url(Req, ProxyDest),
- Version = get_version(Req),
- Headers = get_headers(Req),
- Body = get_body(Req),
- Options = [
- {http_vsn, Version},
- {headers_as_is, true},
- {response_format, binary},
- {stream_to, {self(), once}}
- ],
- case ibrowse:send_req(Url, Headers, Method, Body, Options, ?TIMEOUT) of
- {ibrowse_req_id, ReqId} ->
- stream_response(Req, ProxyDest, ReqId);
- {error, Reason} ->
- throw({error, Reason})
- end.
-
-
-get_method(#httpd{mochi_req=MochiReq}) ->
- case MochiReq:get(method) of
- Method when is_atom(Method) ->
- list_to_atom(string:to_lower(atom_to_list(Method)));
- Method when is_list(Method) ->
- list_to_atom(string:to_lower(Method));
- Method when is_binary(Method) ->
- list_to_atom(string:to_lower(?b2l(Method)))
- end.
-
-
-get_url(Req, ProxyDest) when is_binary(ProxyDest) ->
- get_url(Req, ?b2l(ProxyDest));
-get_url(#httpd{mochi_req=MochiReq}=Req, ProxyDest) ->
- BaseUrl = case mochiweb_util:partition(ProxyDest, "/") of
- {[], "/", _} -> couch_httpd:absolute_uri(Req, ProxyDest);
- _ -> ProxyDest
- end,
- ProxyPrefix = "/" ++ ?b2l(hd(Req#httpd.path_parts)),
- RequestedPath = MochiReq:get(raw_path),
- case mochiweb_util:partition(RequestedPath, ProxyPrefix) of
- {[], ProxyPrefix, []} ->
- BaseUrl;
- {[], ProxyPrefix, [$/ | DestPath]} ->
- remove_trailing_slash(BaseUrl) ++ "/" ++ DestPath;
- {[], ProxyPrefix, DestPath} ->
- remove_trailing_slash(BaseUrl) ++ "/" ++ DestPath;
- _Else ->
- throw({invalid_url_path, {ProxyPrefix, RequestedPath}})
- end.
-
-get_version(#httpd{mochi_req=MochiReq}) ->
- MochiReq:get(version).
-
-
-get_headers(#httpd{mochi_req=MochiReq}) ->
- to_ibrowse_headers(mochiweb_headers:to_list(MochiReq:get(headers)), []).
-
-to_ibrowse_headers([], Acc) ->
- lists:reverse(Acc);
-to_ibrowse_headers([{K, V} | Rest], Acc) when is_atom(K) ->
- to_ibrowse_headers([{atom_to_list(K), V} | Rest], Acc);
-to_ibrowse_headers([{K, V} | Rest], Acc) when is_list(K) ->
- case string:to_lower(K) of
- "content-length" ->
- to_ibrowse_headers(Rest, [{content_length, V} | Acc]);
- % This appears to make ibrowse too smart.
- %"transfer-encoding" ->
- % to_ibrowse_headers(Rest, [{transfer_encoding, V} | Acc]);
- _ ->
- to_ibrowse_headers(Rest, [{K, V} | Acc])
- end.
-
-get_body(#httpd{method='GET'}) ->
- fun() -> eof end;
-get_body(#httpd{method='HEAD'}) ->
- fun() -> eof end;
-get_body(#httpd{method='DELETE'}) ->
- fun() -> eof end;
-get_body(#httpd{mochi_req=MochiReq}) ->
- case MochiReq:get(body_length) of
- undefined ->
- <<>>;
- {unknown_transfer_encoding, Unknown} ->
- exit({unknown_transfer_encoding, Unknown});
- chunked ->
- {fun stream_chunked_body/1, {init, MochiReq, 0}};
- 0 ->
- <<>>;
- Length when is_integer(Length) andalso Length > 0 ->
- {fun stream_length_body/1, {init, MochiReq, Length}};
- Length ->
- exit({invalid_body_length, Length})
- end.
-
-
-remove_trailing_slash(Url) ->
- rem_slash(lists:reverse(Url)).
-
-rem_slash([]) ->
- [];
-rem_slash([$\s | RevUrl]) ->
- rem_slash(RevUrl);
-rem_slash([$\t | RevUrl]) ->
- rem_slash(RevUrl);
-rem_slash([$\r | RevUrl]) ->
- rem_slash(RevUrl);
-rem_slash([$\n | RevUrl]) ->
- rem_slash(RevUrl);
-rem_slash([$/ | RevUrl]) ->
- rem_slash(RevUrl);
-rem_slash(RevUrl) ->
- lists:reverse(RevUrl).
-
-
-stream_chunked_body({init, MReq, 0}) ->
- % First chunk, do expect-continue dance.
- init_body_stream(MReq),
- stream_chunked_body({stream, MReq, 0, [], ?PKT_SIZE});
-stream_chunked_body({stream, MReq, 0, Buf, BRem}) ->
- % Finished a chunk, get next length. If next length
- % is 0, its time to try and read trailers.
- {CRem, Data} = read_chunk_length(MReq),
- case CRem of
- 0 ->
- BodyData = lists:reverse(Buf, Data),
- {ok, BodyData, {trailers, MReq, [], ?PKT_SIZE}};
- _ ->
- stream_chunked_body(
- {stream, MReq, CRem, [Data | Buf], BRem-size(Data)}
- )
- end;
-stream_chunked_body({stream, MReq, CRem, Buf, BRem}) when BRem =< 0 ->
- % Time to empty our buffers to the upstream socket.
- BodyData = lists:reverse(Buf),
- {ok, BodyData, {stream, MReq, CRem, [], ?PKT_SIZE}};
-stream_chunked_body({stream, MReq, CRem, Buf, BRem}) ->
- % Buffer some more data from the client.
- Length = lists:min([CRem, BRem]),
- Socket = MReq:get(socket),
- NewState = case mochiweb_socket:recv(Socket, Length, ?TIMEOUT) of
- {ok, Data} when size(Data) == CRem ->
- case mochiweb_socket:recv(Socket, 2, ?TIMEOUT) of
- {ok, <<"\r\n">>} ->
- {stream, MReq, 0, [<<"\r\n">>, Data | Buf], BRem-Length-2};
- _ ->
- exit(normal)
- end;
- {ok, Data} ->
- {stream, MReq, CRem-Length, [Data | Buf], BRem-Length};
- _ ->
- exit(normal)
- end,
- stream_chunked_body(NewState);
-stream_chunked_body({trailers, MReq, Buf, BRem}) when BRem =< 0 ->
- % Empty our buffers and send data upstream.
- BodyData = lists:reverse(Buf),
- {ok, BodyData, {trailers, MReq, [], ?PKT_SIZE}};
-stream_chunked_body({trailers, MReq, Buf, BRem}) ->
- % Read another trailer into the buffer or stop on an
- % empty line.
- Socket = MReq:get(socket),
- mochiweb_socket:setopts(Socket, [{packet, line}]),
- case mochiweb_socket:recv(Socket, 0, ?TIMEOUT) of
- {ok, <<"\r\n">>} ->
- mochiweb_socket:setopts(Socket, [{packet, raw}]),
- BodyData = lists:reverse(Buf, <<"\r\n">>),
- {ok, BodyData, eof};
- {ok, Footer} ->
- mochiweb_socket:setopts(Socket, [{packet, raw}]),
- NewState = {trailers, MReq, [Footer | Buf], BRem-size(Footer)},
- stream_chunked_body(NewState);
- _ ->
- exit(normal)
- end;
-stream_chunked_body(eof) ->
- % Tell ibrowse we're done sending data.
- eof.
-
-
-stream_length_body({init, MochiReq, Length}) ->
- % Do the expect-continue dance
- init_body_stream(MochiReq),
- stream_length_body({stream, MochiReq, Length});
-stream_length_body({stream, _MochiReq, 0}) ->
- % Finished streaming.
- eof;
-stream_length_body({stream, MochiReq, Length}) ->
- BufLen = lists:min([Length, ?PKT_SIZE]),
- case MochiReq:recv(BufLen) of
- <<>> -> eof;
- Bin -> {ok, Bin, {stream, MochiReq, Length-BufLen}}
- end.
-
-
-init_body_stream(MochiReq) ->
- Expect = case MochiReq:get_header_value("expect") of
- undefined ->
- undefined;
- Value when is_list(Value) ->
- string:to_lower(Value)
- end,
- case Expect of
- "100-continue" ->
- MochiReq:start_raw_response({100, gb_trees:empty()});
- _Else ->
- ok
- end.
-
-
-read_chunk_length(MochiReq) ->
- Socket = MochiReq:get(socket),
- mochiweb_socket:setopts(Socket, [{packet, line}]),
- case mochiweb_socket:recv(Socket, 0, ?TIMEOUT) of
- {ok, Header} ->
- mochiweb_socket:setopts(Socket, [{packet, raw}]),
- Splitter = fun(C) ->
- C =/= $\r andalso C =/= $\n andalso C =/= $\s
- end,
- {Hex, _Rest} = lists:splitwith(Splitter, ?b2l(Header)),
- {mochihex:to_int(Hex), Header};
- _ ->
- exit(normal)
- end.
-
-
-stream_response(Req, ProxyDest, ReqId) ->
- receive
- {ibrowse_async_headers, ReqId, "100", _} ->
- % ibrowse doesn't handle 100 Continue responses which
- % means we have to discard them so the proxy client
- % doesn't get confused.
- ibrowse:stream_next(ReqId),
- stream_response(Req, ProxyDest, ReqId);
- {ibrowse_async_headers, ReqId, Status, Headers} ->
- {Source, Dest} = get_urls(Req, ProxyDest),
- FixedHeaders = fix_headers(Source, Dest, Headers, []),
- case body_length(FixedHeaders) of
- chunked ->
- {ok, Resp} = couch_httpd:start_chunked_response(
- Req, list_to_integer(Status), FixedHeaders
- ),
- ibrowse:stream_next(ReqId),
- stream_chunked_response(Req, ReqId, Resp),
- {ok, Resp};
- Length when is_integer(Length) ->
- {ok, Resp} = couch_httpd:start_response_length(
- Req, list_to_integer(Status), FixedHeaders, Length
- ),
- ibrowse:stream_next(ReqId),
- stream_length_response(Req, ReqId, Resp),
- {ok, Resp};
- _ ->
- {ok, Resp} = couch_httpd:start_response(
- Req, list_to_integer(Status), FixedHeaders
- ),
- ibrowse:stream_next(ReqId),
- stream_length_response(Req, ReqId, Resp),
- % XXX: MochiWeb apparently doesn't look at the
- % response to see if it must force close the
- % connection. So we help it out here.
- erlang:put(mochiweb_request_force_close, true),
- {ok, Resp}
- end
- end.
-
-
-stream_chunked_response(Req, ReqId, Resp) ->
- receive
- {ibrowse_async_response, ReqId, {error, Reason}} ->
- throw({error, Reason});
- {ibrowse_async_response, ReqId, Chunk} ->
- couch_httpd:send_chunk(Resp, Chunk),
- ibrowse:stream_next(ReqId),
- stream_chunked_response(Req, ReqId, Resp);
- {ibrowse_async_response_end, ReqId} ->
- couch_httpd:last_chunk(Resp)
- end.
-
-
-stream_length_response(Req, ReqId, Resp) ->
- receive
- {ibrowse_async_response, ReqId, {error, Reason}} ->
- throw({error, Reason});
- {ibrowse_async_response, ReqId, Chunk} ->
- couch_httpd:send(Resp, Chunk),
- ibrowse:stream_next(ReqId),
- stream_length_response(Req, ReqId, Resp);
- {ibrowse_async_response_end, ReqId} ->
- ok
- end.
-
-
-get_urls(Req, ProxyDest) ->
- SourceUrl = couch_httpd:absolute_uri(Req, "/" ++ hd(Req#httpd.path_parts)),
- Source = parse_url(?b2l(iolist_to_binary(SourceUrl))),
- case (catch parse_url(ProxyDest)) of
- Dest when is_record(Dest, url) ->
- {Source, Dest};
- _ ->
- DestUrl = couch_httpd:absolute_uri(Req, ProxyDest),
- {Source, parse_url(DestUrl)}
- end.
-
-
-fix_headers(_, _, [], Acc) ->
- lists:reverse(Acc);
-fix_headers(Source, Dest, [{K, V} | Rest], Acc) ->
- Fixed = case string:to_lower(K) of
- "location" -> rewrite_location(Source, Dest, V);
- "content-location" -> rewrite_location(Source, Dest, V);
- "uri" -> rewrite_location(Source, Dest, V);
- "destination" -> rewrite_location(Source, Dest, V);
- "set-cookie" -> rewrite_cookie(Source, Dest, V);
- _ -> V
- end,
- fix_headers(Source, Dest, Rest, [{K, Fixed} | Acc]).
-
-
-rewrite_location(Source, #url{host=Host, port=Port, protocol=Proto}, Url) ->
- case (catch parse_url(Url)) of
- #url{host=Host, port=Port, protocol=Proto} = Location ->
- DestLoc = #url{
- protocol=Source#url.protocol,
- host=Source#url.host,
- port=Source#url.port,
- path=join_url_path(Source#url.path, Location#url.path)
- },
- url_to_url(DestLoc);
- #url{} ->
- Url;
- _ ->
- url_to_url(Source#url{path=join_url_path(Source#url.path, Url)})
- end.
-
-
-rewrite_cookie(_Source, _Dest, Cookie) ->
- Cookie.
-
-
-parse_url(Url) when is_binary(Url) ->
- ibrowse_lib:parse_url(?b2l(Url));
-parse_url(Url) when is_list(Url) ->
- ibrowse_lib:parse_url(?b2l(iolist_to_binary(Url))).
-
-
-join_url_path(Src, Dst) ->
- Src2 = case lists:reverse(Src) of
- "/" ++ RestSrc -> lists:reverse(RestSrc);
- _ -> Src
- end,
- Dst2 = case Dst of
- "/" ++ RestDst -> RestDst;
- _ -> Dst
- end,
- Src2 ++ "/" ++ Dst2.
-
-
-url_to_url(#url{host=Host, port=Port, path=Path, protocol=Proto} = Url) ->
- LPort = case {Proto, Port} of
- {http, 80} -> "";
- {https, 443} -> "";
- _ -> ":" ++ integer_to_list(Port)
- end,
- LPath = case Path of
- "/" ++ _RestPath -> Path;
- _ -> "/" ++ Path
- end,
- HostPart = case Url#url.host_type of
- ipv6_address ->
- "[" ++ Host ++ "]";
- _ ->
- Host
- end,
- atom_to_list(Proto) ++ "://" ++ HostPart ++ LPort ++ LPath.
-
-
-body_length(Headers) ->
- case is_chunked(Headers) of
- true -> chunked;
- _ -> content_length(Headers)
- end.
-
-
-is_chunked([]) ->
- false;
-is_chunked([{K, V} | Rest]) ->
- case string:to_lower(K) of
- "transfer-encoding" ->
- string:to_lower(V) == "chunked";
- _ ->
- is_chunked(Rest)
- end.
-
-content_length([]) ->
- undefined;
-content_length([{K, V} | Rest]) ->
- case string:to_lower(K) of
- "content-length" ->
- list_to_integer(V);
- _ ->
- content_length(Rest)
- end.
-
diff --git a/src/couch/src/couch_httpd_vhost.erl b/src/couch/src/couch_httpd_vhost.erl
index d8f952190..574dba9c8 100644
--- a/src/couch/src/couch_httpd_vhost.erl
+++ b/src/couch/src/couch_httpd_vhost.erl
@@ -380,10 +380,6 @@ code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-handle_config_change("httpd", "vhost_global_handlers", _, _, _) ->
- {ok, ?MODULE:reload()};
-handle_config_change("httpd", "redirect_vhost_handler", _, _, _) ->
- {ok, ?MODULE:reload()};
handle_config_change("vhosts", _, _, _, _) ->
{ok, ?MODULE:reload()};
handle_config_change(_, _, _, _, _) ->
@@ -396,25 +392,23 @@ handle_config_terminate(_Server, _Reason, _State) ->
load_conf() ->
%% get vhost globals
- VHostGlobals = re:split(config:get("httpd",
- "vhost_global_handlers",""), "\\s*,\\s*",[{return, list}]),
+ VHostGlobals = re:split("_utils, _uuids, _session, _users", "\\s*,\\s*",
+ [{return, list}]),
%% build vhosts matching rules
VHosts = make_vhosts(),
%% build vhosts handler fun
DefaultVHostFun = "{couch_httpd_vhost, redirect_to_vhost}",
- Fun = couch_httpd:make_arity_2_fun(config:get("httpd",
- "redirect_vhost_handler", DefaultVHostFun)),
+ Fun = couch_httpd:make_arity_2_fun(DefaultVHostFun),
{VHostGlobals, VHosts, Fun}.
%% cheaply determine if there are any virtual hosts
%% configured at all.
vhost_enabled() ->
- case {config:get("httpd", "vhost_global_handlers"),
- config:get("vhosts")} of
- {undefined, []} ->
+ case config:get("vhosts") of
+ [] ->
false;
_ ->
true
diff --git a/src/couch/src/couch_os_daemons.erl b/src/couch/src/couch_os_daemons.erl
deleted file mode 100644
index cd019dbb5..000000000
--- a/src/couch/src/couch_os_daemons.erl
+++ /dev/null
@@ -1,394 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(couch_os_daemons).
--behaviour(gen_server).
--vsn(1).
--behaviour(config_listener).
-
--export([start_link/0, info/0, info/1]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
-% config_listener api
--export([handle_config_change/5, handle_config_terminate/3]).
-
--include_lib("couch/include/couch_db.hrl").
-
--record(daemon, {
- port,
- name,
- cmd,
- kill,
- status=running,
- cfg_patterns=[],
- errors=[],
- buf=[]
-}).
-
--define(PORT_OPTIONS, [stream, {line, 1024}, binary, exit_status, hide]).
--define(RELISTEN_DELAY, 5000).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-info() ->
- info([]).
-
-info(Options) ->
- gen_server:call(?MODULE, {daemon_info, Options}).
-
-init(_) ->
- process_flag(trap_exit, true),
- ok = config:listen_for_changes(?MODULE, nil),
- Table = ets:new(?MODULE, [protected, set, {keypos, #daemon.port}]),
- reload_daemons(Table),
- {ok, Table}.
-
-terminate(_Reason, Table) ->
- [stop_port(D) || D <- ets:tab2list(Table)],
- ok.
-
-handle_call({daemon_info, Options}, _From, Table) when is_list(Options) ->
- case lists:member(table, Options) of
- true ->
- {reply, {ok, ets:tab2list(Table)}, Table};
- _ ->
- {reply, {ok, Table}, Table}
- end;
-handle_call(Msg, From, Table) ->
- couch_log:error("Unknown call message to ~p from ~p: ~p",
- [?MODULE, From, Msg]),
- {stop, error, Table}.
-
-handle_cast({config_change, Sect, Key}, Table) ->
- restart_daemons(Table, Sect, Key),
- case Sect of
- "os_daemons" -> reload_daemons(Table);
- _ -> ok
- end,
- {noreply, Table};
-handle_cast(stop, Table) ->
- {stop, normal, Table};
-handle_cast(Msg, Table) ->
- couch_log:error("Unknown cast message to ~p: ~p", [?MODULE, Msg]),
- {stop, error, Table}.
-
-handle_info({'EXIT', Port, Reason}, Table) ->
- case ets:lookup(Table, Port) of
- [] ->
- couch_log:info("Port ~p exited after stopping: ~p~n",
- [Port, Reason]);
- [#daemon{status=stopping}] ->
- true = ets:delete(Table, Port);
- [#daemon{name=Name, status=restarting}=D] ->
- couch_log:info("Daemon ~p restarting after config change.", [Name]),
- true = ets:delete(Table, Port),
- {ok, Port2} = start_port(D#daemon.cmd),
- true = ets:insert(Table, D#daemon{
- port=Port2, status=running, kill=undefined, buf=[]
- });
- [#daemon{name=Name, status=halted}] ->
- couch_log:error("Halted daemon process: ~p", [Name]);
- [D] ->
- couch_log:error("Invalid port state at exit: ~p", [D])
- end,
- {noreply, Table};
-handle_info({Port, closed}, Table) ->
- handle_info({Port, {exit_status, closed}}, Table);
-handle_info({Port, {exit_status, Status}}, Table) ->
- case ets:lookup(Table, Port) of
- [] ->
- couch_log:error("Unknown port ~p exiting ~p", [Port, Status]),
- {stop, {error, unknown_port_died, Status}, Table};
- [#daemon{name=Name, status=restarting}=D] ->
- couch_log:info("Daemon ~p restarting after config change.", [Name]),
- true = ets:delete(Table, Port),
- {ok, Port2} = start_port(D#daemon.cmd),
- true = ets:insert(Table, D#daemon{
- port=Port2, status=running, kill=undefined, buf=[]
- }),
- {noreply, Table};
- [#daemon{status=stopping}=D] ->
- % The configuration changed and this daemon is no
- % longer needed.
- couch_log:debug("Port ~p shut down.", [D#daemon.name]),
- true = ets:delete(Table, Port),
- {noreply, Table};
- [D] ->
- % Port died for unknown reason. Check to see if it's
- % died too many times or if we should boot it back up.
- case should_halt([os:timestamp() | D#daemon.errors]) of
- {true, _} ->
- % Halting the process. We won't try and reboot
- % until the configuration changes.
- Fmt = "Daemon ~p halted with exit_status ~p",
- couch_log:error(Fmt, [D#daemon.name, Status]),
- D2 = D#daemon{status=halted, errors=nil, buf=nil},
- true = ets:insert(Table, D2),
- {noreply, Table};
- {false, Errors} ->
- % We're guessing it was a random error, this daemon
- % has behaved so we'll give it another chance.
- Fmt = "Daemon ~p is being rebooted after exit_status ~p",
- couch_log:info(Fmt, [D#daemon.name, Status]),
- true = ets:delete(Table, Port),
- {ok, Port2} = start_port(D#daemon.cmd),
- true = ets:insert(Table, D#daemon{
- port=Port2, status=running, kill=undefined,
- errors=Errors, buf=[]
- }),
- {noreply, Table}
- end;
- _Else ->
- throw(error)
- end;
-handle_info({Port, {data, {noeol, Data}}}, Table) ->
- [#daemon{buf=Buf}=D] = ets:lookup(Table, Port),
- true = ets:insert(Table, D#daemon{buf=[Data | Buf]}),
- {noreply, Table};
-handle_info({Port, {data, {eol, Data}}}, Table) ->
- [#daemon{buf=Buf}=D] = ets:lookup(Table, Port),
- Line = lists:reverse(Buf, Data),
- % The first line echoed back is the kill command
- % for when we go to get rid of the port. Lines after
- % that are considered part of the stdio API.
- case D#daemon.kill of
- undefined ->
- true = ets:insert(Table, D#daemon{kill=?b2l(Line), buf=[]});
- _Else ->
- D2 = case (catch ?JSON_DECODE(Line)) of
- {invalid_json, Rejected} ->
- couch_log:error("Ignoring OS daemon request: ~p",
- [Rejected]),
- D;
- JSON ->
- {ok, D3} = handle_port_message(D, JSON),
- D3
- end,
- true = ets:insert(Table, D2#daemon{buf=[]})
- end,
- {noreply, Table};
-handle_info({Port, Error}, Table) ->
- couch_log:error("Unexpectd message from port ~p: ~p", [Port, Error]),
- stop_port(Port),
- [D] = ets:lookup(Table, Port),
- true = ets:insert(Table, D#daemon{status=restarting, buf=nil}),
- {noreply, Table};
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {noreply, State};
-handle_info(Msg, Table) ->
- couch_log:error("Unexpected info message to ~p: ~p", [?MODULE, Msg]),
- {stop, error, Table}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-
-handle_config_change(Section, Key, _, _, _) ->
- gen_server:cast(?MODULE, {config_change, Section, Key}),
- {ok, nil}.
-
-handle_config_terminate(_, stop, _) ->
- ok;
-handle_config_terminate(_Server, _Reason, _State) ->
- erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), restart_config_listener).
-
-
-% Internal API
-
-%
-% Port management helpers
-%
-
-start_port(Command) ->
- start_port(Command, []).
-
-start_port(Command, EnvPairs) ->
- PrivDir = couch_util:priv_dir(),
- Spawnkiller = "\"" ++ filename:join(PrivDir, "couchspawnkillable") ++ "\"",
- Opts = case lists:keytake(env, 1, ?PORT_OPTIONS) of
- false ->
- ?PORT_OPTIONS ++ [ {env,EnvPairs} ];
- {value, {env,OldPairs}, SubOpts} ->
- AllPairs = lists:keymerge(1, EnvPairs, OldPairs),
- SubOpts ++ [ {env,AllPairs} ]
- end,
- Port = open_port({spawn, Spawnkiller ++ " " ++ Command}, Opts),
- {ok, Port}.
-
-
-stop_port(#daemon{port=Port, kill=undefined}=D) ->
- couch_log:error("Stopping daemon without a kill command: ~p",
- [D#daemon.name]),
- catch port_close(Port);
-stop_port(#daemon{port=Port}=D) ->
- couch_log:debug("Stopping daemon: ~p", [D#daemon.name]),
- os:cmd(D#daemon.kill),
- catch port_close(Port).
-
-
-handle_port_message(#daemon{port=Port}=Daemon, [<<"get">>, Section]) ->
- KVs = config:get(Section),
- Data = lists:map(fun({K, V}) -> {?l2b(K), ?l2b(V)} end, KVs),
- Json = iolist_to_binary(?JSON_ENCODE({Data})),
- port_command(Port, <<Json/binary, "\n">>),
- {ok, Daemon};
-handle_port_message(#daemon{port=Port}=Daemon, [<<"get">>, Section, Key]) ->
- Value = case config:get(Section, Key, undefined) of
- undefined -> null;
- String -> ?l2b(String)
- end,
- Json = iolist_to_binary(?JSON_ENCODE(Value)),
- port_command(Port, <<Json/binary, "\n">>),
- {ok, Daemon};
-handle_port_message(Daemon, [<<"register">>, Sec]) when is_binary(Sec) ->
- Patterns = lists:usort(Daemon#daemon.cfg_patterns ++ [{?b2l(Sec)}]),
- {ok, Daemon#daemon{cfg_patterns=Patterns}};
-handle_port_message(Daemon, [<<"register">>, Sec, Key])
- when is_binary(Sec) andalso is_binary(Key) ->
- Pattern = {?b2l(Sec), ?b2l(Key)},
- Patterns = lists:usort(Daemon#daemon.cfg_patterns ++ [Pattern]),
- {ok, Daemon#daemon{cfg_patterns=Patterns}};
-handle_port_message(#daemon{name=Name}=Daemon, [<<"log">>, Msg]) ->
- handle_log_message(Name, Msg, <<"info">>),
- {ok, Daemon};
-handle_port_message(#daemon{name=Name}=Daemon, [<<"log">>, Msg, {Opts}]) ->
- Level = couch_util:get_value(<<"level">>, Opts, <<"info">>),
- handle_log_message(Name, Msg, Level),
- {ok, Daemon};
-handle_port_message(#daemon{name=Name}=Daemon, Else) ->
- couch_log:error("Daemon ~p made invalid request: ~p", [Name, Else]),
- {ok, Daemon}.
-
-
-handle_log_message(Name, Msg, _Level) when not is_binary(Msg) ->
- couch_log:error("Invalid log message from daemon ~p: ~p", [Name, Msg]);
-handle_log_message(Name, Msg, <<"debug">>) ->
- couch_log:debug("Daemon ~p :: ~s", [Name, ?b2l(Msg)]);
-handle_log_message(Name, Msg, <<"info">>) ->
- couch_log:info("Daemon ~p :: ~s", [Name, ?b2l(Msg)]);
-handle_log_message(Name, Msg, <<"error">>) ->
- couch_log:error("Daemon: ~p :: ~s", [Name, ?b2l(Msg)]);
-handle_log_message(Name, Msg, Level) ->
- couch_log:error("Invalid log level from daemon: ~p", [Level]),
- couch_log:info("Daemon: ~p :: ~s", [Name, ?b2l(Msg)]).
-
-%
-% Daemon management helpers
-%
-
-reload_daemons(Table) ->
- % List of daemons we want to have running.
- Configured = lists:sort(config:get("os_daemons")),
-
- % Remove records for daemons that were halted.
- MSpecHalted = #daemon{name='$1', cmd='$2', status=halted, _='_'},
- Halted = lists:sort([{N, C} || [N, C] <- ets:match(Table, MSpecHalted)]),
- ok = stop_os_daemons(Table, find_to_stop(Configured, Halted, [])),
-
- % Stop daemons that are running
- % Start newly configured daemons
- MSpecRunning = #daemon{name='$1', cmd='$2', status=running, _='_'},
- Running = lists:sort([{N, C} || [N, C] <- ets:match(Table, MSpecRunning)]),
- ok = stop_os_daemons(Table, find_to_stop(Configured, Running, [])),
- ok = boot_os_daemons(Table, find_to_boot(Configured, Running, [])),
- ok.
-
-
-restart_daemons(Table, Sect, Key) ->
- restart_daemons(Table, Sect, Key, ets:first(Table)).
-
-restart_daemons(_, _, _, '$end_of_table') ->
- ok;
-restart_daemons(Table, Sect, Key, Port) ->
- [D] = ets:lookup(Table, Port),
- HasSect = lists:member({Sect}, D#daemon.cfg_patterns),
- HasKey = lists:member({Sect, Key}, D#daemon.cfg_patterns),
- case HasSect or HasKey of
- true ->
- stop_port(D),
- D2 = D#daemon{status=restarting, buf=nil},
- true = ets:insert(Table, D2);
- _ ->
- ok
- end,
- restart_daemons(Table, Sect, Key, ets:next(Table, Port)).
-
-
-stop_os_daemons(_Table, []) ->
- ok;
-stop_os_daemons(Table, [{Name, Cmd} | Rest]) ->
- [[Port]] = ets:match(Table, #daemon{port='$1', name=Name, cmd=Cmd, _='_'}),
- [D] = ets:lookup(Table, Port),
- case D#daemon.status of
- halted ->
- ets:delete(Table, Port);
- _ ->
- stop_port(D),
- D2 = D#daemon{status=stopping, errors=nil, buf=nil},
- true = ets:insert(Table, D2)
- end,
- stop_os_daemons(Table, Rest).
-
-boot_os_daemons(_Table, []) ->
- ok;
-boot_os_daemons(Table, [{Name, Cmd} | Rest]) ->
- {ok, Port} = start_port(Cmd),
- true = ets:insert(Table, #daemon{port=Port, name=Name, cmd=Cmd}),
- boot_os_daemons(Table, Rest).
-
-% Elements unique to the configured set need to be booted.
-find_to_boot([], _Rest, Acc) ->
- % Nothing else configured.
- Acc;
-find_to_boot([D | R1], [D | R2], Acc) ->
- % Elements are equal, daemon already running.
- find_to_boot(R1, R2, Acc);
-find_to_boot([D1 | R1], [D2 | _]=A2, Acc) when D1 < D2 ->
- find_to_boot(R1, A2, [D1 | Acc]);
-find_to_boot(A1, [_ | R2], Acc) ->
- find_to_boot(A1, R2, Acc);
-find_to_boot(Rest, [], Acc) ->
- % No more candidates for already running. Boot all.
- Rest ++ Acc.
-
-% Elements unique to the running set need to be killed.
-find_to_stop([], Rest, Acc) ->
- % The rest haven't been found, so they must all
- % be ready to die.
- Rest ++ Acc;
-find_to_stop([D | R1], [D | R2], Acc) ->
- % Elements are equal, daemon already running.
- find_to_stop(R1, R2, Acc);
-find_to_stop([D1 | R1], [D2 | _]=A2, Acc) when D1 < D2 ->
- find_to_stop(R1, A2, Acc);
-find_to_stop(A1, [D2 | R2], Acc) ->
- find_to_stop(A1, R2, [D2 | Acc]);
-find_to_stop(_, [], Acc) ->
- % No more running daemons to worry about.
- Acc.
-
-should_halt(Errors) ->
- RetryTimeCfg = config:get("os_daemon_settings", "retry_time", "5"),
- RetryTime = list_to_integer(RetryTimeCfg),
-
- Now = os:timestamp(),
- RecentErrors = lists:filter(fun(Time) ->
- timer:now_diff(Now, Time) =< RetryTime * 1000000
- end, Errors),
-
- RetryCfg = config:get("os_daemon_settings", "max_retries", "3"),
- Retries = list_to_integer(RetryCfg),
-
- {length(RecentErrors) >= Retries, RecentErrors}.
diff --git a/src/couch/src/couch_proc_manager.erl b/src/couch/src/couch_proc_manager.erl
index 04101f240..d2a198e82 100644
--- a/src/couch/src/couch_proc_manager.erl
+++ b/src/couch/src/couch_proc_manager.erl
@@ -372,12 +372,54 @@ new_proc(Client) ->
end,
exit(Resp).
+get_env_for_spec(Spec, Target) ->
+ % loop over os:getenv(), match SPEC_TARGET
+ lists:filtermap(fun(VarName) ->
+ SpecStr = Spec ++ Target,
+ case string:tokens(VarName, "=") of
+ [SpecStr, Cmd] -> {true, Cmd};
+ _Else -> false
+ end
+ end, os:getenv()).
+
+get_query_server(LangStr) ->
+ % look for COUCH_QUERY_SERVER_LANGSTR in env
+ % if exists, return value, else undefined
+ UpperLangString = string:to_upper(LangStr),
+ case get_env_for_spec("COUCHDB_QUERY_SERVER_", UpperLangString) of
+ [] -> undefined;
+ [Command] -> Command
+ end.
+
+native_query_server_enabled() ->
+ % 1. [native_query_server] enable_erlang_query_server = true | false
+ % 2. if [native_query_server] erlang == {couch_native_process, start_link, []} -> pretend true as well
+ NativeEnabled = config:get_boolean("native_query_servers", "enable_erlang_query_server", false),
+ NativeLegacyConfig = config:get("native_query_servers", "erlang", ""),
+ NativeLegacyEnabled = NativeLegacyConfig =:= "{couch_native_process, start_link, []}",
+
+ NativeEnabled orelse NativeLegacyEnabled.
+
+get_native_query_server("query") -> % mango query server
+ "{mango_native_proc, start_link, []}";
+get_native_query_server("erlang") -> % erlang query server
+ case native_query_server_enabled() of
+ true -> "{couch_native_process, start_link, []}";
+ _Else -> undefined
+ end;
+get_native_query_server(LangStr) ->
+ % same as above, but COUCH_NATIVE_QUERY_SERVER_LANGSTR
+ UpperLangString = string:uppercase(LangStr),
+ case get_env_for_spec("COUCHDB_NATIVE_QUERY_SERVER_", UpperLangString) of
+ [] -> undefined;
+ [Command] -> Command
+ end.
new_proc_int(From, Lang) when is_binary(Lang) ->
LangStr = binary_to_list(Lang),
- case config:get("query_servers", LangStr) of
+ case get_query_server(LangStr) of
undefined ->
- case config:get("native_query_servers", LangStr) of
+ case get_native_query_server(LangStr) of
undefined ->
gen_server:reply(From, {unknown_query_language, Lang});
SpecStr ->
diff --git a/src/couch/src/couch_secondary_sup.erl b/src/couch/src/couch_secondary_sup.erl
index 0c3b7aa5a..9b424dc6a 100644
--- a/src/couch/src/couch_secondary_sup.erl
+++ b/src/couch/src/couch_secondary_sup.erl
@@ -26,18 +26,42 @@ init([]) ->
worker,
dynamic}
],
+ Daemons = [
+ {index_server, {couch_index_server, start_link, []}},
+ {query_servers, {couch_proc_manager, start_link, []}},
+ {vhosts, {couch_httpd_vhost, start_link, []}},
+ {httpd, {couch_httpd, start_link, []}},
+ {uuids, {couch_uuids, start, []}},
+ {auth_cache, {couch_auth_cache, start_link, []}},
+ {compaction_daemon, {couch_compaction_daemon, start_link, []}}
+ ],
+
+ MaybeHttps = case https_enabled() of
+ true -> [{httpsd, {chttpd, start_link, [https]}}];
+ _False -> []
+ end,
+
Children = SecondarySupervisors ++ [
begin
- {ok, {Module, Fun, Args}} = couch_util:parse_term(SpecStr),
+ {Module, Fun, Args} = Spec,
- {list_to_atom(Name),
+ {Name,
{Module, Fun, Args},
permanent,
brutal_kill,
worker,
[Module]}
end
- || {Name, SpecStr}
- <- config:get("daemons"), SpecStr /= ""],
+ || {Name, Spec}
+ <- Daemons ++ MaybeHttps, Spec /= ""],
{ok, {{one_for_one, 50, 3600},
couch_epi:register_service(couch_db_epi, Children)}}.
+
+https_enabled() ->
+ % 1. [ssl] enable = true | false
+ % 2. if [daemons] httpsd == {chttpd, start_link, [https]} -> pretend true as well
+ SSLEnabled = config:get_boolean("ssl", "enable", false),
+ LegacySSL = config:get("daemons", "httpsd"),
+ LegacySSLEnabled = LegacySSL =:= "{chttpd, start_link, [https]}",
+
+ SSLEnabled orelse LegacySSLEnabled.
diff --git a/src/couch/test/couchdb_http_proxy_tests.erl b/src/couch/test/couchdb_http_proxy_tests.erl
deleted file mode 100644
index f60ba3b08..000000000
--- a/src/couch/test/couchdb_http_proxy_tests.erl
+++ /dev/null
@@ -1,456 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_http_proxy_tests).
-
--compile(tuple_calls).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--record(req, {method=get, path="", headers=[], body="", opts=[]}).
-
--define(CONFIG_FIXTURE_TEMP,
- begin
- FileName = filename:join([?TEMPDIR, ?tempfile() ++ ".ini"]),
- {ok, Fd} = file:open(FileName, write),
- ok = file:truncate(Fd),
- ok = file:close(Fd),
- FileName
- end).
--define(TIMEOUT, 5000).
-
-
-start() ->
- % we have to write any config changes to temp ini file to not loose them
- % when supervisor will kill all children due to reaching restart threshold
- % (each httpd_global_handlers changes causes couch_httpd restart)
- Ctx = test_util:start_couch(?CONFIG_CHAIN ++ [?CONFIG_FIXTURE_TEMP], []),
- % 49151 is IANA Reserved, let's assume no one is listening there
- test_util:with_process_restart(couch_httpd, fun() ->
- config:set("httpd_global_handlers", "_error",
- "{couch_httpd_proxy, handle_proxy_req, <<\"http://127.0.0.1:49151/\">>}"
- )
- end),
- Ctx.
-
-setup() ->
- {ok, Pid} = test_web:start_link(),
- Value = lists:flatten(io_lib:format(
- "{couch_httpd_proxy, handle_proxy_req, ~p}",
- [list_to_binary(proxy_url())])),
- test_util:with_process_restart(couch_httpd, fun() ->
- config:set("httpd_global_handlers", "_test", Value)
- end),
- Pid.
-
-teardown(Pid) ->
- test_util:stop_sync_throw(Pid, fun() ->
- test_web:stop()
- end, {timeout, test_web_stop}, ?TIMEOUT).
-
-http_proxy_test_() ->
- {
- "HTTP Proxy handler tests",
- {
- setup,
- fun start/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_proxy_basic_request/1,
- fun should_return_alternative_status/1,
- fun should_respect_trailing_slash/1,
- fun should_proxy_headers/1,
- fun should_proxy_host_header/1,
- fun should_pass_headers_back/1,
- fun should_use_same_protocol_version/1,
- fun should_proxy_body/1,
- fun should_proxy_body_back/1,
- fun should_proxy_chunked_body/1,
- fun should_proxy_chunked_body_back/1,
- fun should_rewrite_location_header/1,
- fun should_not_rewrite_external_locations/1,
- fun should_rewrite_relative_location/1,
- fun should_refuse_connection_to_backend/1
- ]
- }
-
- }
- }.
-
-
-should_proxy_basic_request(_) ->
- Remote = fun(Req) ->
- 'GET' = Req:get(method),
- "/" = Req:get(path),
- 0 = Req:get(body_length),
- <<>> = Req:recv_body(),
- {ok, {200, [{"Content-Type", "text/plain"}], "ok"}}
- end,
- Local = fun
- ({ok, "200", _, "ok"}) ->
- true;
- (_) ->
- false
- end,
- ?_test(check_request(#req{}, Remote, Local)).
-
-should_return_alternative_status(_) ->
- Remote = fun(Req) ->
- "/alternate_status" = Req:get(path),
- {ok, {201, [], "ok"}}
- end,
- Local = fun
- ({ok, "201", _, "ok"}) ->
- true;
- (_) ->
- false
- end,
- Req = #req{path = "/alternate_status"},
- ?_test(check_request(Req, Remote, Local)).
-
-should_respect_trailing_slash(_) ->
- Remote = fun(Req) ->
- "/trailing_slash/" = Req:get(path),
- {ok, {200, [], "ok"}}
- end,
- Local = fun
- ({ok, "200", _, "ok"}) ->
- true;
- (_) ->
- false
- end,
- Req = #req{path="/trailing_slash/"},
- ?_test(check_request(Req, Remote, Local)).
-
-should_proxy_headers(_) ->
- Remote = fun(Req) ->
- "/passes_header" = Req:get(path),
- "plankton" = Req:get_header_value("X-CouchDB-Ralph"),
- {ok, {200, [], "ok"}}
- end,
- Local = fun
- ({ok, "200", _, "ok"}) ->
- true;
- (_) ->
- false
- end,
- Req = #req{
- path="/passes_header",
- headers=[{"X-CouchDB-Ralph", "plankton"}]
- },
- ?_test(check_request(Req, Remote, Local)).
-
-should_proxy_host_header(_) ->
- Remote = fun(Req) ->
- "/passes_host_header" = Req:get(path),
- "www.google.com" = Req:get_header_value("Host"),
- {ok, {200, [], "ok"}}
- end,
- Local = fun
- ({ok, "200", _, "ok"}) ->
- true;
- (_) ->
- false
- end,
- Req = #req{
- path="/passes_host_header",
- headers=[{"Host", "www.google.com"}]
- },
- ?_test(check_request(Req, Remote, Local)).
-
-should_pass_headers_back(_) ->
- Remote = fun(Req) ->
- "/passes_header_back" = Req:get(path),
- {ok, {200, [{"X-CouchDB-Plankton", "ralph"}], "ok"}}
- end,
- Local = fun
- ({ok, "200", Headers, "ok"}) ->
- lists:member({"X-CouchDB-Plankton", "ralph"}, Headers);
- (_) ->
- false
- end,
- Req = #req{path="/passes_header_back"},
- ?_test(check_request(Req, Remote, Local)).
-
-should_use_same_protocol_version(_) ->
- Remote = fun(Req) ->
- "/uses_same_version" = Req:get(path),
- {1, 0} = Req:get(version),
- {ok, {200, [], "ok"}}
- end,
- Local = fun
- ({ok, "200", _, "ok"}) ->
- true;
- (_) ->
- false
- end,
- Req = #req{
- path="/uses_same_version",
- opts=[{http_vsn, {1, 0}}]
- },
- ?_test(check_request(Req, Remote, Local)).
-
-should_proxy_body(_) ->
- Remote = fun(Req) ->
- 'PUT' = Req:get(method),
- "/passes_body" = Req:get(path),
- <<"Hooray!">> = Req:recv_body(),
- {ok, {201, [], "ok"}}
- end,
- Local = fun
- ({ok, "201", _, "ok"}) ->
- true;
- (_) ->
- false
- end,
- Req = #req{
- method=put,
- path="/passes_body",
- body="Hooray!"
- },
- ?_test(check_request(Req, Remote, Local)).
-
-should_proxy_body_back(_) ->
- BodyChunks = [<<"foo">>, <<"bar">>, <<"bazinga">>],
- Remote = fun(Req) ->
- 'GET' = Req:get(method),
- "/passes_eof_body" = Req:get(path),
- {raw, {200, [{"Connection", "close"}], BodyChunks}}
- end,
- Local = fun
- ({ok, "200", _, "foobarbazinga"}) ->
- true;
- (_) ->
- false
- end,
- Req = #req{path="/passes_eof_body"},
- ?_test(check_request(Req, Remote, Local)).
-
-should_proxy_chunked_body(_) ->
- BodyChunks = [<<"foo">>, <<"bar">>, <<"bazinga">>],
- Remote = fun(Req) ->
- 'POST' = Req:get(method),
- "/passes_chunked_body" = Req:get(path),
- RecvBody = fun
- ({Length, Chunk}, [Chunk | Rest]) ->
- Length = size(Chunk),
- Rest;
- ({0, []}, []) ->
- ok
- end,
- ok = Req:stream_body(1024 * 1024, RecvBody, BodyChunks),
- {ok, {201, [], "ok"}}
- end,
- Local = fun
- ({ok, "201", _, "ok"}) ->
- true;
- (_) ->
- false
- end,
- Req = #req{
- method=post,
- path="/passes_chunked_body",
- headers=[{"Transfer-Encoding", "chunked"}],
- body=chunked_body(BodyChunks)
- },
- ?_test(check_request(Req, Remote, Local)).
-
-should_proxy_chunked_body_back(_) ->
- ?_test(begin
- Remote = fun(Req) ->
- 'GET' = Req:get(method),
- "/passes_chunked_body_back" = Req:get(path),
- BodyChunks = [<<"foo">>, <<"bar">>, <<"bazinga">>],
- {chunked, {200, [{"Transfer-Encoding", "chunked"}], BodyChunks}}
- end,
- Req = #req{
- path="/passes_chunked_body_back",
- opts=[{stream_to, self()}]
- },
-
- Resp = check_request(Req, Remote, no_local),
- ?assertMatch({ibrowse_req_id, _}, Resp),
- {_, ReqId} = Resp,
-
- % Grab headers from response
- receive
- {ibrowse_async_headers, ReqId, "200", Headers} ->
- ?assertEqual("chunked",
- proplists:get_value("Transfer-Encoding", Headers)),
- ibrowse:stream_next(ReqId)
- after 1000 ->
- throw({error, timeout})
- end,
-
- ?assertEqual(<<"foobarbazinga">>, recv_body(ReqId, [])),
- ?assertEqual(was_ok, test_web:check_last())
- end).
-
-should_refuse_connection_to_backend(_) ->
- Local = fun
- ({ok, "500", _, _}) ->
- true;
- (_) ->
- false
- end,
- Req = #req{opts=[{url, server_url("/_error")}]},
- ?_test(check_request(Req, no_remote, Local)).
-
-should_rewrite_location_header(_) ->
- {
- "Testing location header rewrites",
- do_rewrite_tests([
- {"Location", proxy_url() ++ "/foo/bar",
- server_url() ++ "/foo/bar"},
- {"Content-Location", proxy_url() ++ "/bing?q=2",
- server_url() ++ "/bing?q=2"},
- {"Uri", proxy_url() ++ "/zip#frag",
- server_url() ++ "/zip#frag"},
- {"Destination", proxy_url(),
- server_url() ++ "/"}
- ])
- }.
-
-should_not_rewrite_external_locations(_) ->
- {
- "Testing no rewrite of external locations",
- do_rewrite_tests([
- {"Location", external_url() ++ "/search",
- external_url() ++ "/search"},
- {"Content-Location", external_url() ++ "/s?q=2",
- external_url() ++ "/s?q=2"},
- {"Uri", external_url() ++ "/f#f",
- external_url() ++ "/f#f"},
- {"Destination", external_url() ++ "/f?q=2#f",
- external_url() ++ "/f?q=2#f"}
- ])
- }.
-
-should_rewrite_relative_location(_) ->
- {
- "Testing relative rewrites",
- do_rewrite_tests([
- {"Location", "/foo",
- server_url() ++ "/foo"},
- {"Content-Location", "bar",
- server_url() ++ "/bar"},
- {"Uri", "/zing?q=3",
- server_url() ++ "/zing?q=3"},
- {"Destination", "bing?q=stuff#yay",
- server_url() ++ "/bing?q=stuff#yay"}
- ])
- }.
-
-
-do_rewrite_tests(Tests) ->
- lists:map(fun({Header, Location, Url}) ->
- should_rewrite_header(Header, Location, Url)
- end, Tests).
-
-should_rewrite_header(Header, Location, Url) ->
- Remote = fun(Req) ->
- "/rewrite_test" = Req:get(path),
- {ok, {302, [{Header, Location}], "ok"}}
- end,
- Local = fun
- ({ok, "302", Headers, "ok"}) ->
- ?assertEqual(Url, couch_util:get_value(Header, Headers)),
- true;
- (E) ->
- ?debugFmt("~p", [E]),
- false
- end,
- Req = #req{path="/rewrite_test"},
- {Header, ?_test(check_request(Req, Remote, Local))}.
-
-
-server_url() ->
- server_url("/_test").
-
-server_url(Resource) ->
- Addr = config:get("httpd", "bind_address"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- lists:concat(["http://", Addr, ":", Port, Resource]).
-
-proxy_url() ->
- "http://127.0.0.1:" ++ integer_to_list(test_web:get_port()).
-
-external_url() ->
- "https://google.com".
-
-check_request(Req, Remote, Local) ->
- case Remote of
- no_remote ->
- ok;
- _ ->
- test_web:set_assert(Remote)
- end,
- Url = case proplists:lookup(url, Req#req.opts) of
- none ->
- server_url() ++ Req#req.path;
- {url, DestUrl} ->
- DestUrl
- end,
- Opts = [{headers_as_is, true} | Req#req.opts],
- Resp =ibrowse:send_req(
- Url, Req#req.headers, Req#req.method, Req#req.body, Opts
- ),
- %?debugFmt("ibrowse response: ~p", [Resp]),
- case Local of
- no_local ->
- ok;
- _ ->
- ?assert(Local(Resp))
- end,
- case {Remote, Local} of
- {no_remote, _} ->
- ok;
- {_, no_local} ->
- ok;
- _ ->
- ?assertEqual(was_ok, test_web:check_last())
- end,
- Resp.
-
-chunked_body(Chunks) ->
- chunked_body(Chunks, []).
-
-chunked_body([], Acc) ->
- iolist_to_binary(lists:reverse(Acc, "0\r\n\r\n"));
-chunked_body([Chunk | Rest], Acc) ->
- Size = to_hex(size(Chunk)),
- chunked_body(Rest, ["\r\n", Chunk, "\r\n", Size | Acc]).
-
-to_hex(Val) ->
- to_hex(Val, []).
-
-to_hex(0, Acc) ->
- Acc;
-to_hex(Val, Acc) ->
- to_hex(Val div 16, [hex_char(Val rem 16) | Acc]).
-
-hex_char(V) when V < 10 -> $0 + V;
-hex_char(V) -> $A + V - 10.
-
-recv_body(ReqId, Acc) ->
- receive
- {ibrowse_async_response, ReqId, Data} ->
- recv_body(ReqId, [Data | Acc]);
- {ibrowse_async_response_end, ReqId} ->
- iolist_to_binary(lists:reverse(Acc));
- Else ->
- throw({error, unexpected_mesg, Else})
- after ?TIMEOUT ->
- throw({error, timeout})
- end.
diff --git a/src/couch/test/couchdb_os_daemons_tests.erl b/src/couch/test/couchdb_os_daemons_tests.erl
deleted file mode 100644
index 1728314bb..000000000
--- a/src/couch/test/couchdb_os_daemons_tests.erl
+++ /dev/null
@@ -1,259 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_os_daemons_tests).
-
-%% tests are UNIX-specific, will not function under Windows
--ifdef(WINDOWS).
--undef(TEST).
--define(NOTEST, 1).
--endif.
-
--include_lib("couch/include/couch_eunit.hrl").
-
-%% keep in sync with couchdb/couch_os_daemons.erl
--record(daemon, {
- port,
- name,
- cmd,
- kill,
- status=running,
- cfg_patterns=[],
- errors=[],
- buf=[]
-}).
-
--define(DAEMON_CONFIGER, "os_daemon_configer.escript").
--define(DAEMON_LOOPER, "os_daemon_looper.escript").
--define(DAEMON_BAD_PERM, "os_daemon_bad_perm.sh").
--define(DAEMON_CAN_REBOOT, "os_daemon_can_reboot.sh").
--define(DAEMON_DIE_ON_BOOT, "os_daemon_die_on_boot.sh").
--define(DAEMON_DIE_QUICKLY, "os_daemon_die_quickly.sh").
--define(TRIES, 40).
--define(TRY_DELAY_MS, 100).
--define(TIMEOUT, 10000).
--define(CONFIG_TIMEOUT, 1000).
-
-
-setup(DName) ->
- Ctx = test_util:start(?MODULE, [couch_log], [{dont_mock, [config]}]),
- {ok, OsDPid} = couch_os_daemons:start_link(),
- config:set("os_daemons", DName,
- filename:join([?FIXTURESDIR, DName]), false),
- % Set configuration option to be used by configuration_reader_test_
- % This will be used in os_daemon_configer.escript:test_get_cfg2
- config:set("uuids", "algorithm","sequential", false),
- config:set("os_daemon_settings", "max_retries", "2", false),
- ensure_n_daemons_are_alive(1),
- {Ctx, OsDPid}.
-
-teardown(_, {Ctx, OsDPid}) ->
- try
- test_util:stop_sync_throw(OsDPid, fun() ->
- exit(OsDPid, shutdown)
- end, {timeout, os_daemon_stop}, ?TIMEOUT)
- catch
- {timeout, os_daemon_stop} ->
- Msg = "~nWARNING: OS daemons test stop ~p msec timeout exceeded~n",
- io:format(standard_error, Msg, [?TIMEOUT]),
- exit(OsDPid, kill)
- end,
- test_util:stop(Ctx).
-
-
-os_daemons_test_() ->
- {
- "OS Daemons tests",
- {
- foreachx,
- fun setup/1, fun teardown/2,
- [{?DAEMON_LOOPER, Fun} || Fun <- [
- fun should_check_daemon/2,
- fun should_check_daemon_table_form/2,
- fun should_clean_tables_on_daemon_remove/2,
- fun should_spawn_multiple_daemons/2,
- fun should_keep_alive_one_daemon_on_killing_other/2
- ]]
- }
- }.
-
-configuration_reader_test_() ->
- {
- "OS Daemon requests CouchDB configuration",
- {
- foreachx,
- fun setup/1, fun teardown/2,
- [{?DAEMON_CONFIGER,
- fun should_read_write_config_settings_by_daemon/2}]
-
- }
- }.
-
-error_test_() ->
- {
- "OS Daemon process error tests",
- {
- foreachx,
- fun setup/1, fun teardown/2,
- [{?DAEMON_BAD_PERM, fun should_fail_due_to_lack_of_permissions/2},
- {?DAEMON_DIE_ON_BOOT, fun should_die_on_boot/2},
- {?DAEMON_DIE_QUICKLY, fun should_die_quickly/2},
- {?DAEMON_CAN_REBOOT, fun should_not_being_halted/2}]
- }
- }.
-
-
-should_check_daemon(DName, _) ->
- ?_test(begin
- {ok, [D]} = couch_os_daemons:info([table]),
- check_daemon(D, DName)
- end).
-
-should_check_daemon_table_form(DName, _) ->
- ?_test(begin
- {ok, Tab} = couch_os_daemons:info(),
- [D] = ets:tab2list(Tab),
- check_daemon(D, DName)
- end).
-
-should_clean_tables_on_daemon_remove(DName, _) ->
- ?_test(begin
- config:delete("os_daemons", DName, false),
- {ok, Tab2} = couch_os_daemons:info(),
- ?_assertEqual([], ets:tab2list(Tab2))
- end).
-
-should_spawn_multiple_daemons(DName, _) ->
- ?_test(begin
- config:set("os_daemons", "bar",
- filename:join([?FIXTURESDIR, DName]), false),
- config:set("os_daemons", "baz",
- filename:join([?FIXTURESDIR, DName]), false),
- ensure_n_daemons_are_alive(3), % DName, "bar" and "baz"
- {ok, Daemons} = couch_os_daemons:info([table]),
- lists:foreach(fun(D) ->
- check_daemon(D)
- end, Daemons),
- {ok, Tab} = couch_os_daemons:info(),
- lists:foreach(fun(D) ->
- check_daemon(D)
- end, ets:tab2list(Tab))
- end).
-
-should_keep_alive_one_daemon_on_killing_other(DName, _) ->
- ?_test(begin
- config:set("os_daemons", "bar",
- filename:join([?FIXTURESDIR, DName]), false),
- ensure_n_daemons_are_alive(2), % DName and "bar"
- {ok, Daemons} = couch_os_daemons:info([table]),
- lists:foreach(fun(D) ->
- check_daemon(D)
- end, Daemons),
-
- config:delete("os_daemons", "bar", false),
- ensure_n_daemons_are_alive(1), % Dname only, "bar" should be dead
- {ok, [D2]} = couch_os_daemons:info([table]),
- check_daemon(D2, DName),
-
- {ok, Tab} = couch_os_daemons:info(),
- [T] = ets:tab2list(Tab),
- check_daemon(T, DName)
- end).
-
-should_read_write_config_settings_by_daemon(DName, _) ->
- ?_test(begin
- % have to wait till daemon run all his tests
- % see daemon's script for more info
- timer:sleep(?CONFIG_TIMEOUT),
- {ok, [D]} = couch_os_daemons:info([table]),
- check_daemon(D, DName)
- end).
-
-should_fail_due_to_lack_of_permissions(DName, _) ->
- ?_test(should_halts(DName, 1000)).
-
-should_die_on_boot(DName, _) ->
- ?_test(should_halts(DName, 2000)).
-
-should_die_quickly(DName, _) ->
- ?_test(should_halts(DName, 4000)).
-
-should_not_being_halted(DName, _) ->
- ?_test(begin
- timer:sleep(1000),
- {ok, [D1]} = couch_os_daemons:info([table]),
- check_daemon(D1, DName, 0),
-
- % Should reboot every two seconds. We're at 1s, so wait
- % until 3s to be in the middle of the next invocation's
- % life span.
-
- timer:sleep(2000),
- {ok, [D2]} = couch_os_daemons:info([table]),
- check_daemon(D2, DName, 1),
-
- % If the kill command changed, that means we rebooted the process.
- ?assertNotEqual(D1#daemon.kill, D2#daemon.kill)
- end).
-
-should_halts(DName, Time) ->
- timer:sleep(Time),
- {ok, [D]} = couch_os_daemons:info([table]),
- check_dead(D, DName),
- config:delete("os_daemons", DName, false).
-
-check_daemon(D) ->
- check_daemon(D, D#daemon.name).
-
-check_daemon(D, Name) ->
- check_daemon(D, Name, 0).
-
-check_daemon(D, Name, Errs) ->
- ?assert(is_port(D#daemon.port)),
- ?assertEqual(Name, D#daemon.name),
- ?assertNotEqual(undefined, D#daemon.kill),
- ?assertEqual(running, D#daemon.status),
- ?assertEqual(Errs, length(D#daemon.errors)),
- ?assertEqual([], D#daemon.buf).
-
-check_dead(D, Name) ->
- ?assert(is_port(D#daemon.port)),
- ?assertEqual(Name, D#daemon.name),
- ?assertNotEqual(undefined, D#daemon.kill),
- ?assertEqual(halted, D#daemon.status),
- ?assertEqual(nil, D#daemon.errors),
- ?assertEqual(nil, D#daemon.buf).
-
-daemons() ->
- {ok, Daemons} = couch_os_daemons:info([table]),
- Daemons.
-
-ensure_n_daemons_are_alive(NumDaemons) ->
- retry(fun() -> length(daemons()) == NumDaemons end, "spawning"),
- retry(fun() ->
- lists:all(fun(D) -> D#daemon.kill =/= undefined end, daemons())
- end, "waiting for kill flag").
-
-retry(Pred, FailReason) ->
- retry(Pred, ?TRIES, FailReason).
-
-retry(_Pred, 0, FailReason) ->
- erlang:error({assertion_failed,[{module, ?MODULE}, {line, ?LINE},
- {reason, "Timed out: " ++ FailReason}]});
-retry(Pred, N, FailReason) ->
- case Pred() of
- true ->
- ok;
- false ->
- timer:sleep(?TRY_DELAY_MS),
- retry(Pred, N - 1, FailReason)
- end.
diff --git a/src/couch/test/couchdb_os_proc_pool.erl b/src/couch/test/couchdb_os_proc_pool.erl
index 65ae5c54c..69f8051ad 100644
--- a/src/couch/test/couchdb_os_proc_pool.erl
+++ b/src/couch/test/couchdb_os_proc_pool.erl
@@ -206,8 +206,7 @@ should_reduce_pool_on_idle_os_procs() ->
setup_config() ->
- MFA = "{couch_native_process, start_link, []}",
- config:set("native_query_servers", "test_lang", MFA, false),
+ config:set("native_query_servers", "enable_erlang_query_server", "true", false),
config:set("query_server_config", "os_process_limit", "3", false),
config:set("query_server_config", "os_process_soft_limit", "2", false),
ok = confirm_config("os_process_soft_limit", "2").
@@ -235,7 +234,7 @@ spawn_client() ->
Parent = self(),
Ref = make_ref(),
Pid = spawn(fun() ->
- Proc = couch_query_servers:get_os_process(<<"test_lang">>),
+ Proc = couch_query_servers:get_os_process(<<"erlang">>),
loop(Parent, Ref, Proc)
end),
{Pid, Ref}.
@@ -245,7 +244,7 @@ spawn_client(DDocId) ->
Ref = make_ref(),
Pid = spawn(fun() ->
DDocKey = {DDocId, <<"1-abcdefgh">>},
- DDoc = #doc{body={[{<<"language">>, <<"test_lang">>}]}},
+ DDoc = #doc{body={[{<<"language">>, <<"erlang">>}]}},
Proc = couch_query_servers:get_ddoc_process(DDoc, DDocKey),
loop(Parent, Ref, Proc)
end),
diff --git a/test/javascript/tests/config.js b/test/javascript/tests/config.js
index 8c7ce9917..a9dce637f 100644
--- a/test/javascript/tests/config.js
+++ b/test/javascript/tests/config.js
@@ -50,10 +50,7 @@ couchTests.config = function(debug) {
}
T(config.couchdb.database_dir);
- T(config.daemons.httpd);
- T(config.httpd_global_handlers._config);
T(config.log.level);
- T(config.query_servers.javascript);
// test that settings can be altered, and that an undefined whitelist allows any change
TEquals(undefined, config.httpd.config_whitelist, "Default whitelist is empty");