summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoan Touzet <joant@atypical.net>2017-07-16 12:32:41 -0400
committerJoan Touzet <joant@atypical.net>2017-07-16 12:32:41 -0400
commit4d9bd589fe68d6108021ef8417f3e5d91e4d113e (patch)
treedf6f7f11cc17160e6f8d1a211c90dfcb340caa51
parentc38d7aab035bce5f88ed772de59187a39da2768f (diff)
parent4b90eca7ede04abaa72a36e38740cdd9c9b22ebe (diff)
downloadcouchdb-4d9bd589fe68d6108021ef8417f3e5d91e4d113e.tar.gz
Merge branch '593-setup-single-node' of https://github.com/apache/couchdb-setup
-rw-r--r--.gitignore2
-rw-r--r--README.md17
-rw-r--r--src/setup.erl88
-rw-r--r--src/setup_httpd.erl56
-rwxr-xr-xtest/t-single-node.sh46
5 files changed, 165 insertions, 44 deletions
diff --git a/.gitignore b/.gitignore
index 1dbfa4bce..f84f14c93 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,4 @@
ebin
.rebar
+*~
+*.swp
diff --git a/README.md b/README.md
index a6c6d184e..e30c40027 100644
--- a/README.md
+++ b/README.md
@@ -2,21 +2,26 @@ This module implements /_cluster_setup and manages the setting up, duh, of a Cou
### Testing
-```
+```bash
git clone https://git-wip-us.apache.org/repos/asf/couchdb.git
cd couchdb
git checkout setup
./configure
make
+dev/run --no-join -n 2 --admin a:b
+```
-# in dev/run comment out the line `connect_nodes("127.0.0.1", 15984)`
+Then, in a new terminal:
-dev/run --admin a:b
+ $ src/setup/test/t.sh
-# in a new terminal
-src/setup/test/t.sh
+Before running each test, kill the `dev/run` script, then reset the
+CouchDB instances with:
-```
+ $ rm -rf dev/lib/ dev/logs/
+ $ dev/run --no-join -n 2 --admin a:b
+
+before running the next shell script.
The Plan:
diff --git a/src/setup.erl b/src/setup.erl
index 5a7100491..d0ecd2cd9 100644
--- a/src/setup.erl
+++ b/src/setup.erl
@@ -12,8 +12,9 @@
-module(setup).
--export([enable_cluster/1, finish_cluster/0, add_node/1, receive_cookie/1]).
--export([is_cluster_enabled/0, has_cluster_system_dbs/0]).
+-export([enable_cluster/1, finish_cluster/1, add_node/1, receive_cookie/1]).
+-export([is_cluster_enabled/0, has_cluster_system_dbs/1, cluster_system_dbs/0]).
+-export([enable_single_node/1, is_single_node_enabled/1]).
-include_lib("../couch/include/couch_db.hrl").
@@ -44,25 +45,31 @@ is_cluster_enabled() ->
BindAddress = config:get("chttpd", "bind_address"),
Admins = config:get("admins"),
case {BindAddress, Admins} of
- {"127.0.0.1", _} -> no;
- {_,[]} -> no;
- {_,_} -> ok
+ {"127.0.0.1", _} -> false;
+ {_,[]} -> false;
+ {_,_} -> true
end.
+is_single_node_enabled(Dbs) ->
+ % admins != empty AND dbs exist
+ Admins = config:get("admins"),
+ HasDbs = has_cluster_system_dbs(Dbs),
+ case {Admins, HasDbs} of
+ {[], _} -> false;
+ {_, false} -> false;
+ {_,_} -> true
+ end.
cluster_system_dbs() ->
["_users", "_replicator", "_global_changes"].
-has_cluster_system_dbs() ->
- has_cluster_system_dbs(cluster_system_dbs()).
-
has_cluster_system_dbs([]) ->
- ok;
+ true;
has_cluster_system_dbs([Db|Dbs]) ->
case catch fabric:get_db_info(Db) of
{ok, _} -> has_cluster_system_dbs(Dbs);
- _ -> no
+ _ -> false
end.
enable_cluster(Options) ->
@@ -119,9 +126,9 @@ enable_cluster_http(Options) ->
{error, Else}
end.
-enable_cluster_int(_Options, ok) ->
+enable_cluster_int(_Options, true) ->
{error, cluster_enabled};
-enable_cluster_int(Options, no) ->
+enable_cluster_int(Options, false) ->
% if no admin in config and no admin in req -> error
CurrentAdmins = config:get("admins"),
@@ -132,13 +139,22 @@ enable_cluster_int(Options, no) ->
Pw -> Pw
end
},
-
+ ok = require_admins(CurrentAdmins, NewCredentials),
% if bind_address == 127.0.0.1 and no bind_address in req -> error
CurrentBindAddress = config:get("chttpd","bind_address"),
NewBindAddress = proplists:get_value(bind_address, Options),
- ok = require_admins(CurrentAdmins, NewCredentials),
ok = require_bind_address(CurrentBindAddress, NewBindAddress),
+ NodeCount = couch_util:get_value(node_count, Options),
+ ok = require_node_count(NodeCount),
+ Port = proplists:get_value(port, Options),
+ setup_node(NewCredentials, NewBindAddress, NodeCount, Port),
+ couch_log:notice("Enable Cluster: ~p~n", [Options]).
+
+set_admin(Username, Password) ->
+ config:set("admins", binary_to_list(Username), binary_to_list(Password)).
+
+setup_node(NewCredentials, NewBindAddress, NodeCount, Port) ->
case NewCredentials of
{undefined, undefined} ->
ok;
@@ -153,11 +169,8 @@ enable_cluster_int(Options, no) ->
config:set("chttpd", "bind_address", binary_to_list(NewBindAddress))
end,
- NodeCount = couch_util:get_value(node_count, Options),
- ok = require_node_count(NodeCount),
config:set_integer("cluster", "n", NodeCount),
- Port = proplists:get_value(port, Options),
case Port of
undefined ->
ok;
@@ -165,27 +178,46 @@ enable_cluster_int(Options, no) ->
config:set("chttpd", "port", binary_to_list(Port));
Port when is_integer(Port) ->
config:set_integer("chttpd", "port", Port)
- end,
- couch_log:notice("Enable Cluster: ~p~n", [Options]).
+ end.
-set_admin(Username, Password) ->
- config:set("admins", binary_to_list(Username), binary_to_list(Password)).
+finish_cluster(Options) ->
+ Dbs = proplists:get_value(ensure_dbs_exist, Options, cluster_system_dbs()),
+ finish_cluster_int(Dbs, has_cluster_system_dbs(Dbs)).
-finish_cluster() ->
- finish_cluster_int(has_cluster_system_dbs()).
-finish_cluster_int(ok) ->
+finish_cluster_int(_Dbs, true) ->
{error, cluster_finished};
-finish_cluster_int(no) ->
- lists:foreach(fun fabric:create_db/1, cluster_system_dbs()).
+finish_cluster_int(Dbs, false) ->
+ lists:foreach(fun fabric:create_db/1, Dbs).
+
+
+enable_single_node(Options) ->
+ % if no admin in config and no admin in req -> error
+ CurrentAdmins = config:get("admins"),
+ NewCredentials = {
+ proplists:get_value(username, Options),
+ case proplists:get_value(password_hash, Options) of
+ undefined -> proplists:get_value(password, Options);
+ Pw -> Pw
+ end
+ },
+ ok = require_admins(CurrentAdmins, NewCredentials),
+ % skip bind_address validation, anything is fine
+ NewBindAddress = proplists:get_value(bind_address, Options),
+ Port = proplists:get_value(port, Options),
+
+ setup_node(NewCredentials, NewBindAddress, 1, Port),
+ Dbs = proplists:get_value(ensure_dbs_exist, Options, cluster_system_dbs()),
+ finish_cluster_int(Dbs, has_cluster_system_dbs(Dbs)),
+ couch_log:notice("Enable Single Node: ~p~n", [Options]).
add_node(Options) ->
add_node_int(Options, is_cluster_enabled()).
-add_node_int(_Options, no) ->
+add_node_int(_Options, false) ->
{error, cluster_not_enabled};
-add_node_int(Options, ok) ->
+add_node_int(Options, true) ->
couch_log:notice("add node_int: ~p~n", [Options]),
ErlangCookie = erlang:get_cookie(),
diff --git a/src/setup_httpd.erl b/src/setup_httpd.erl
index a23a3e21d..f4e05ce09 100644
--- a/src/setup_httpd.erl
+++ b/src/setup_httpd.erl
@@ -29,15 +29,27 @@ handle_setup_req(#httpd{method='POST'}=Req) ->
end;
handle_setup_req(#httpd{method='GET'}=Req) ->
ok = chttpd:verify_is_server_admin(Req),
- case setup:is_cluster_enabled() of
- no ->
- chttpd:send_json(Req, 200, {[{state, cluster_disabled}]});
- ok ->
- case setup:has_cluster_system_dbs() of
- no ->
- chttpd:send_json(Req, 200, {[{state, cluster_enabled}]});
- ok ->
- chttpd:send_json(Req, 200, {[{state, cluster_finished}]})
+ Dbs = chttpd:qs_json_value(Req, "ensure_dbs_exist", setup:cluster_system_dbs()),
+ couch_log:notice("Dbs: ~p~n", [Dbs]),
+ case erlang:list_to_integer(config:get("cluster", "n", undefined)) of
+ 1 ->
+ case setup:is_single_node_enabled(Dbs) of
+ false ->
+ chttpd:send_json(Req, 200, {[{state, single_node_disabled}]});
+ true ->
+ chttpd:send_json(Req, 200, {[{state, single_node_enabled}]})
+ end;
+ _ ->
+ case setup:is_cluster_enabled() of
+ false ->
+ chttpd:send_json(Req, 200, {[{state, cluster_disabled}]});
+ true ->
+ case setup:has_cluster_system_dbs(Dbs) of
+ false ->
+ chttpd:send_json(Req, 200, {[{state, cluster_enabled}]});
+ true ->
+ chttpd:send_json(Req, 200, {[{state, cluster_finished}]})
+ end
end
end;
handle_setup_req(#httpd{}=Req) ->
@@ -74,7 +86,30 @@ handle_action("enable_cluster", Setup) ->
handle_action("finish_cluster", Setup) ->
couch_log:notice("finish_cluster: ~p~n", [Setup]),
- case setup:finish_cluster() of
+
+ Options = get_options([
+ {ensure_dbs_exist, <<"ensure_dbs_exist">>}
+ ], Setup),
+ case setup:finish_cluster(Options) of
+ {error, cluster_finished} ->
+ {error, <<"Cluster is already finished">>};
+ Else ->
+ couch_log:notice("finish_cluster: ~p~n", [Else]),
+ ok
+ end;
+
+handle_action("enable_single_node", Setup) ->
+ couch_log:notice("enable_single_node: ~p~n", [Setup]),
+
+ Options = get_options([
+ {ensure_dbs_exist, <<"ensure_dbs_exist">>},
+ {username, <<"username">>},
+ {password, <<"password">>},
+ {password_hash, <<"password_hash">>},
+ {bind_address, <<"bind_address">>},
+ {port, <<"port">>}
+ ], Setup),
+ case setup:enable_single_node(Options) of
{error, cluster_finished} ->
{error, <<"Cluster is already finished">>};
Else ->
@@ -82,6 +117,7 @@ handle_action("finish_cluster", Setup) ->
ok
end;
+
handle_action("add_node", Setup) ->
couch_log:notice("add_node: ~p~n", [Setup]),
diff --git a/test/t-single-node.sh b/test/t-single-node.sh
new file mode 100755
index 000000000..d49043773
--- /dev/null
+++ b/test/t-single-node.sh
@@ -0,0 +1,46 @@
+#!/bin/sh -ex
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+HEADERS="-HContent-Type:application/json"
+# show cluster state:
+curl a:b@127.0.0.1:15986/_nodes/_all_docs
+curl a:b@127.0.0.1:15984/_cluster_setup
+
+# Enable Cluster on single node
+curl a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"enable_single_node","username":"foo","password":"baz","bind_address":"127.0.0.1"}' $HEADERS
+
+# Show cluster state:
+curl a:b@127.0.0.1:15986/_nodes/_all_docs
+curl a:b@127.0.0.1:15984/_all_dbs
+curl a:b@127.0.0.1:15984/_cluster_setup
+
+# Delete a database
+curl -X DELETE a:b@127.0.0.1:15984/_global_changes
+
+# Should show single_node_disabled
+curl a:b@127.0.0.1:15984/_cluster_setup
+
+# Change the check
+curl -g 'a:b@127.0.0.1:15984/_cluster_setup?ensure_dbs_exist=["_replicator","_users"]'
+
+# delete all the things
+curl -X DELETE a:b@127.0.0.1:15984/_replicator
+curl -X DELETE a:b@127.0.0.1:15984/_users
+
+# setup only creating _users
+curl -g a:b@127.0.0.1:15984/_cluster_setup -d '{"action":"enable_single_node","username":"foo","password":"baz","bind_address":"127.0.0.1","ensure_dbs_exist":["_users"]}' $HEADERS
+
+# check it
+curl -g 'a:b@127.0.0.1:15984/_cluster_setup?ensure_dbs_exist=["_users"]'
+
+echo "YAY ALL GOOD"