summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTony Sun <tony.sun427@gmail.com>2021-07-09 14:22:50 -0700
committerGitHub <noreply@github.com>2021-07-09 14:22:50 -0700
commitd92209dab872b715c9ef3ce9c704c830336ea8af (patch)
tree932a2a187800e1a3f1fa6e0fa6b43bf439ee9016
parent3cec3049f445e70d4f7dd4f9fb78542932488c0b (diff)
parent3245e9bcce9e640406a22de24db940898bc0386a (diff)
downloadcouchdb-port-prometheus-3.x.tar.gz
Merge branch '3.x' into port-prometheus-3.xport-prometheus-3.x
-rw-r--r--build-aux/Jenkinsfile.full136
-rw-r--r--rebar.config.script8
-rw-r--r--rel/overlay/etc/default.ini108
-rw-r--r--src/chttpd/src/chttpd.erl21
-rw-r--r--src/chttpd/src/chttpd_auth_cache.erl2
-rw-r--r--src/chttpd/src/chttpd_db.erl8
-rw-r--r--src/chttpd/src/chttpd_misc.erl2
-rw-r--r--src/chttpd/src/chttpd_prefer_header.erl8
-rw-r--r--src/chttpd/src/chttpd_sup.erl7
-rw-r--r--src/chttpd/test/eunit/chttpd_util_test.erl85
-rw-r--r--src/couch/src/couch_db.erl14
-rw-r--r--src/couch/src/couch_doc.erl2
-rw-r--r--src/couch/src/couch_httpd.erl9
-rw-r--r--src/couch/src/couch_httpd_misc_handlers.erl2
-rw-r--r--src/couch/src/couch_proc_manager.erl11
-rw-r--r--src/couch/src/couch_query_servers.erl2
-rw-r--r--src/couch/src/couch_server.erl32
-rw-r--r--src/couch/src/couch_uuids.erl2
-rw-r--r--src/couch/test/eunit/couch_doc_json_tests.erl2
-rw-r--r--src/couch_replicator/src/couch_replicator_docs.erl77
-rw-r--r--src/couch_replicator/src/couch_replicator_ids.erl2
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl4
-rw-r--r--src/custodian/src/custodian_db_checker.erl2
-rw-r--r--src/custodian/src/custodian_util.erl4
-rw-r--r--src/dreyfus/src/dreyfus_rpc.erl11
-rw-r--r--src/fabric/src/fabric_doc_attachments.erl4
-rw-r--r--src/fabric/src/fabric_doc_atts.erl4
-rw-r--r--src/fabric/src/fabric_rpc.erl4
-rw-r--r--src/fabric/src/fabric_util.erl2
-rw-r--r--src/ioq/src/ioq.erl14
-rw-r--r--src/mem3/src/mem3.erl2
-rw-r--r--src/mem3/src/mem3_rpc.erl2
-rw-r--r--src/mem3/src/mem3_shards.erl14
-rw-r--r--src/mem3/src/mem3_util.erl64
-rw-r--r--src/mem3/test/eunit/mem3_shards_test.erl129
-rw-r--r--src/mem3/test/eunit/mem3_util_test.erl2
-rw-r--r--src/setup/src/setup_httpd.erl4
-rw-r--r--test/elixir/test/config_test.exs2
38 files changed, 522 insertions, 286 deletions
diff --git a/build-aux/Jenkinsfile.full b/build-aux/Jenkinsfile.full
index 1c93472be..fdb2f4c6a 100644
--- a/build-aux/Jenkinsfile.full
+++ b/build-aux/Jenkinsfile.full
@@ -558,54 +558,96 @@ pipeline {
} // post
} // stage
-/*
- - Removed 2020.09.15 - VMs are offline
-*/
+ stage('Debian Buster ppc64le') {
+ agent {
+ docker {
+ image 'apache/couchdbci-debian:ppc64le-buster-erlang-20.3.8.26-1'
+ label 'ppc64le'
+ args "${DOCKER_ARGS}"
+ registryUrl 'https://docker.io/'
+ registryCredentialsId 'dockerhub_creds'
+ }
+ }
+ environment {
+ platform = 'buster'
+ sm_ver = '60'
+ }
+ stages {
+ stage('Build from tarball & test') {
+ steps {
+ unstash 'tarball'
+ sh( script: build_and_test )
+ }
+ post {
+ always {
+ junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml, **/src/mango/nosetests.xml, **/test/javascript/junit.xml'
+ }
+ }
+ }
+ stage('Build CouchDB packages') {
+ steps {
+ sh( script: make_packages )
+ sh( script: cleanup_and_save )
+ }
+ post {
+ success {
+ archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
+ }
+ }
+ }
+ } // stages
+ post {
+ cleanup {
+ sh 'rm -rf ${WORKSPACE}/*'
+ }
+ } // post
+ } // stage
+
+ // stage('Debian Buster s390x') {
+ // agent {
+ // docker {
+ // image 'apache/couchdbci-debian:s390x-buster-erlang-20.3.8.26-1'
+ // label 's390x'
+ // args "${DOCKER_ARGS}"
+ // registryUrl 'https://docker.io/'
+ // registryCredentialsId 'dockerhub_creds'
+ // }
+ // }
+ // environment {
+ // platform = 'buster'
+ // sm_ver = '60'
+ // }
+ // stages {
+ // stage('Build from tarball & test') {
+ // steps {
+ // unstash 'tarball'
+ // sh( script: build_and_test )
+ // }
+ // post {
+ // always {
+ // junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml, **/src/mango/nosetests.xml, **/test/javascript/junit.xml'
+ // }
+ // }
+ // }
+ // stage('Build CouchDB packages') {
+ // steps {
+ // sh( script: make_packages )
+ // sh( script: cleanup_and_save )
+ // }
+ // post {
+ // success {
+ // archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
+ // }
+ // }
+ // }
+ // } // stages
+ // post {
+ // cleanup {
+ // sh 'rm -rf ${WORKSPACE}/*'
+ // }
+ // } // post
+ // } // stage
-// stage('Debian Buster ppc64le') {
-// agent {
-// docker {
-// image 'apache/couchdbci-debian:ppc64le-buster-erlang-20.3.8.26-1'
-// label 'ppc64le'
-// args "${DOCKER_ARGS}"
-// registryUrl 'https://docker.io/'
-// registryCredentialsId 'dockerhub_creds'
-// }
-// }
-// environment {
-// platform = 'buster'
-// sm_ver = '60'
-// }
-// stages {
-// stage('Build from tarball & test') {
-// steps {
-// unstash 'tarball'
-// sh( script: build_and_test )
-// }
-// post {
-// always {
-// junit '**/.eunit/*.xml, **/_build/*/lib/couchdbtest/*.xml, **/src/mango/nosetests.xml, **/test/javascript/junit.xml'
-// }
-// }
-// }
-// stage('Build CouchDB packages') {
-// steps {
-// sh( script: make_packages )
-// sh( script: cleanup_and_save )
-// }
-// post {
-// success {
-// archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
-// }
-// }
-// }
-// } // stages
-// post {
-// cleanup {
-// sh 'rm -rf ${WORKSPACE}/*'
-// }
-// } // post
-// } // stage
/*
* Example of how to do a qemu-based run, please leave here
diff --git a/rebar.config.script b/rebar.config.script
index d8e0b6548..8267fc13e 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -146,7 +146,7 @@ SubDirs = [
DepDescs = [
%% Independent Apps
-{config, "config", {tag, "2.1.8"}},
+{config, "config", {tag, "2.1.9"}},
{b64url, "b64url", {tag, "1.0.2"}},
{ets_lru, "ets-lru", {tag, "1.1.0"}},
{khash, "khash", {tag, "1.1.0"}},
@@ -160,9 +160,9 @@ DepDescs = [
%% Third party deps
{folsom, "folsom", {tag, "CouchDB-0.8.4"}},
{hyper, "hyper", {tag, "CouchDB-2.2.0-7"}},
-{ibrowse, "ibrowse", {tag, "CouchDB-4.4.2-3"}},
+{ibrowse, "ibrowse", {tag, "CouchDB-4.4.2-5"}},
{jiffy, "jiffy", {tag, "CouchDB-1.0.5-1"}},
-{mochiweb, "mochiweb", {tag, "v2.20.0"}},
+{mochiweb, "mochiweb", {tag, "v2.21.0"}},
{meck, "meck", {tag, "0.9.2"}},
{recon, "recon", {tag, "2.5.0"}}
].
@@ -197,7 +197,7 @@ ErlOpts = case os:getenv("ERL_OPTS") of
end.
AddConfig = [
- {require_otp_vsn, "19|20|21|22|23|24"},
+ {require_otp_vsn, "20|21|22|23|24"},
{deps_dir, "src"},
{deps, lists:map(MakeDep, DepDescs ++ OptionalDeps)},
{sub_dirs, SubDirs},
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index 8f864b216..5f916a0af 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -8,13 +8,13 @@ database_dir = {{data_dir}}
view_index_dir = {{view_index_dir}}
; util_driver_dir =
; plugin_dir =
-os_process_timeout = 5000 ; 5 seconds. for view servers.
+;os_process_timeout = 5000 ; 5 seconds. for view servers.
; Maximum number of .couch files to open at once.
; The actual limit may be slightly lower depending on how
; many schedulers you have as the allowance is divided evenly
; among them.
-max_dbs_open = 500
+;max_dbs_open = 500
; Method used to compress everything that is appended to database and view index files, except
; for attachments (see the attachments section). Available methods are:
@@ -23,17 +23,17 @@ max_dbs_open = 500
; snappy - use google snappy, a very fast compressor/decompressor
; deflate_N - use zlib's deflate, N is the compression level which ranges from 1 (fastest,
; lowest compression ratio) to 9 (slowest, highest compression ratio)
-file_compression = snappy
+;file_compression = snappy
; Higher values may give better read performance due to less read operations
; and/or more OS page cache hits, but they can also increase overall response
; time for writes when there are many attachment write requests in parallel.
-attachment_stream_buffer_size = 4096
+;attachment_stream_buffer_size = 4096
; Default security object for databases if not explicitly set
; everyone - same as couchdb 1.0, everyone can read/write
; admin_only - only admins can read/write
; admin_local - sharded dbs on :5984 are read/write for everyone,
; local dbs on :5986 are read/write for admins only
-default_security = admin_only
+;default_security = admin_only
; btree_chunk_size = 1279
; maintenance_mode = false
; stem_interactive_updates = true
@@ -41,7 +41,7 @@ default_security = admin_only
; The speed of processing the _changes feed with doc_ids filter can be
; influenced directly with this setting - increase for faster processing at the
; expense of more memory usage.
-changes_doc_ids_optimization_threshold = 100
+;changes_doc_ids_optimization_threshold = 100
; Maximum document ID length. Can be set to an integer or 'infinity'.
;max_document_id_length = infinity
;
@@ -50,7 +50,7 @@ changes_doc_ids_optimization_threshold = 100
; requests which update a single document as well as individual documents from
; a _bulk_docs request. The size limit is approximate due to the nature of JSON
; encoding.
-max_document_size = 8000000 ; bytes
+;max_document_size = 8000000 ; bytes
;
; Maximum attachment size.
; max_attachment_size = 1073741824 ; 1 gibibyte
@@ -60,7 +60,7 @@ max_document_size = 8000000 ; bytes
;
; The default storage engine to use when creating databases
; is set as a key into the [couchdb_engines] section.
-default_engine = couch
+;default_engine = couch
;
; Enable this to only "soft-delete" databases when DELETE /{db} requests are
; made. This will place a .recovery directory in your data directory and
@@ -78,7 +78,7 @@ default_engine = couch
;single_node = false
; Allow edits on the _security object in the user db. By default, it's disabled.
-users_db_security_editable = false
+;users_db_security_editable = false
[purge]
; Allowed maximum number of documents in one purge request
@@ -106,8 +106,8 @@ couch = couch_bt_engine
;couch_server = false
[cluster]
-q=2
-n=3
+;q=2
+;n=3
; placement = metro-dc-a:2,metro-dc-b:1
; Supply a comma-delimited list of node names that this node should
@@ -121,18 +121,18 @@ n=3
; These settings affect the main, clustered port (5984 by default).
port = {{cluster_port}}
bind_address = 127.0.0.1
-backlog = 512
-socket_options = [{sndbuf, 262144}, {nodelay, true}]
-server_options = [{recbuf, undefined}]
-require_valid_user = false
+;backlog = 512
+;socket_options = [{sndbuf, 262144}, {nodelay, true}]
+;server_options = [{recbuf, undefined}]
+;require_valid_user = false
; require_valid_user_except_for_up = false
; List of headers that will be kept when the header Prefer: return=minimal is included in a request.
; If Server header is left out, Mochiweb will add its own one in.
-prefer_minimal = Cache-Control, Content-Length, Content-Range, Content-Type, ETag, Server, Transfer-Encoding, Vary
+;prefer_minimal = Cache-Control, Content-Length, Content-Range, Content-Type, ETag, Server, Transfer-Encoding, Vary
;
; Limit maximum number of databases when tying to get detailed information using
; _dbs_info in a request
-max_db_number_for_dbs_info_req = 100
+;max_db_number_for_dbs_info_req = 100
; set to true to delay the start of a response until the end has been calculated
;buffer_response = false
@@ -194,28 +194,28 @@ max_db_number_for_dbs_info_req = 100
; exists for each document in _users. These databases are writable only
; by the corresponding user. Databases are in the following form:
; userdb-{hex encoded username}
-enable = false
+;enable = false
; If set to true and a user is deleted, the respective database gets
; deleted as well.
-delete_dbs = false
+;delete_dbs = false
; Set a default q value for peruser-created databases that is different from
; cluster / q
;q = 1
; prefix for user databases. If you change this after user dbs have been
; created, the existing databases won't get deleted if the associated user
; gets deleted because of the then prefix mismatch.
-database_prefix = userdb-
+;database_prefix = userdb-
[httpd]
port = {{backend_port}}
bind_address = 127.0.0.1
-authentication_handlers = {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
+;authentication_handlers = {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
; Options for the MochiWeb HTTP server.
;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
; For more socket options, consult Erlang's module 'inet' man page.
;socket_options = [{recbuf, undefined}, {sndbuf, 262144}, {nodelay, true}]
-socket_options = [{sndbuf, 262144}]
+;socket_options = [{sndbuf, 262144}]
; These settings were moved to [chttpd]
; secure_rewrites, allow_jsonp, enable_cors, enable_xframe_options,
@@ -230,7 +230,7 @@ socket_options = [{sndbuf, 262144}]
; ratio = 0.01
[ssl]
-port = 6984
+;port = 6984
[chttpd_auth]
;authentication_db = _users
@@ -313,11 +313,11 @@ authentication_db = _users
; CSP (Content Security Policy) Support for _utils
[csp]
-enable = true
+;enable = true
; header_value = default-src 'self'; img-src 'self'; font-src *; script-src 'self' 'unsafe-eval'; style-src 'self' 'unsafe-inline';
[cors]
-credentials = false
+;credentials = false
; List of origins separated by a comma, * means accept all
; Origins must include the scheme: http://example.com
; You can't set origins: * and credentials = true at the same time.
@@ -364,8 +364,8 @@ credentials = false
; please let us know on the mailing list so we can fine tune the heuristic.
[query_server_config]
; commit_freq = 5
-reduce_limit = true
-os_process_limit = 100
+;reduce_limit = true
+;os_process_limit = 100
; os_process_idle_limit = 300
; os_process_soft_limit = 100
; Timeout for how long a response from a busy view group server can take.
@@ -407,42 +407,42 @@ partitioned||* = true
; First 14 characters are the time in hex. Last 18 are random.
; utc_id - Time since Jan 1, 1970 UTC with microseconds, plus utc_id_suffix string
; First 14 characters are the time in hex. uuids/utc_id_suffix string value is appended to these.
-algorithm = sequential
+;algorithm = sequential
; The utc_id_suffix value will be appended to uuids generated by the utc_id algorithm.
; Replicating instances should have unique utc_id_suffix values to ensure uniqueness of utc_id ids.
-utc_id_suffix =
+;utc_id_suffix =
# Maximum number of UUIDs retrievable from /_uuids in a single request
-max_count = 1000
+;max_count = 1000
[attachments]
-compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
-compressible_types = text/*, application/javascript, application/json, application/xml
+;compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
+;compressible_types = text/*, application/javascript, application/json, application/xml
[replicator]
; Random jitter applied on replication job startup (milliseconds)
-startup_jitter = 5000
+;startup_jitter = 5000
; Number of actively running replications
-max_jobs = 500
+;max_jobs = 500
;Scheduling interval in milliseconds. During each reschedule cycle
-interval = 60000
+;interval = 60000
; Maximum number of replications to start and stop during rescheduling.
-max_churn = 20
+;max_churn = 20
; More worker processes can give higher network throughput but can also
; imply more disk and network IO.
-worker_processes = 4
+;worker_processes = 4
; With lower batch sizes checkpoints are done more frequently. Lower batch sizes
; also reduce the total amount of used RAM memory.
-worker_batch_size = 500
+;worker_batch_size = 500
; Maximum number of HTTP connections per replication.
-http_connections = 20
+;http_connections = 20
; HTTP connection timeout per replication.
; Even for very fast/reliable networks it might need to be increased if a remote
; database is too busy.
-connection_timeout = 30000
+;connection_timeout = 30000
; Request timeout
;request_timeout = infinity
; If a request fails, the replicator will retry it up to N times.
-retries_per_request = 5
+;retries_per_request = 5
; Use checkpoints
;use_checkpoints = true
; Checkpoint interval
@@ -453,7 +453,7 @@ retries_per_request = 5
; {recbuf, integer()}
; {priority, integer()}
; See the `inet` Erlang module's man page for the full list of options.
-socket_options = [{keepalive, true}, {nodelay, false}]
+;socket_options = [{keepalive, true}, {nodelay, false}]
; Path to a file containing the user's certificate.
;cert_file = /full/path/to/server_cert.pem
; Path to file containing user's private PEM encoded key.
@@ -461,11 +461,11 @@ socket_options = [{keepalive, true}, {nodelay, false}]
; String containing the user's password. Only used if the private keyfile is password protected.
;password = somepassword
; Set to true to validate peer certificates.
-verify_ssl_certificates = false
+;verify_ssl_certificates = false
; File containing a list of peer trusted certificates (in the PEM format).
;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
; Maximum peer certificate depth (must be set even if certificate validation is off).
-ssl_certificate_max_depth = 3
+;ssl_certificate_max_depth = 3
; Maximum document ID length for replication.
;max_document_id_length = infinity
; How much time to wait before retrying after a missing doc exception. This
@@ -543,7 +543,7 @@ ssl_certificate_max_depth = 3
; emergency, emerg
; none
;
-level = info
+;level = info
;
; Set the maximum log message length in bytes that will be
; passed through the writer
@@ -566,7 +566,7 @@ level = info
; over the network, and a journald writer that's more suitable
; when using systemd journald.
;
-writer = stderr
+;writer = stderr
; Journald Writer notes:
;
; The journald writer doesn't have any options. It still writes
@@ -634,11 +634,11 @@ writer = stderr
[ioq]
; The maximum number of concurrent in-flight IO requests that
-concurrency = 10
+;concurrency = 10
; The fraction of the time that a background IO request will be selected
; over an interactive IO request when both queues are non-empty
-ratio = 0.01
+;ratio = 0.01
[ioq.bypass]
; System administrators can choose to submit specific classes of IO directly
@@ -648,23 +648,23 @@ ratio = 0.01
; classes are recognized with the following defaults:
; Messages on their way to an external process (e.g., couchjs) are bypassed
-os_process = true
+;os_process = true
; Disk IO fulfilling interactive read requests is bypassed
-read = true
+;read = true
; Disk IO required to update a database is bypassed
-write = true
+;write = true
; Disk IO required to update views and other secondary indexes is bypassed
-view_update = true
+;view_update = true
; Disk IO issued by the background replication processes that fix any
; inconsistencies between shard copies is queued
-shard_sync = false
+;shard_sync = false
; Disk IO issued by compaction jobs is queued
-compaction = false
+;compaction = false
[dreyfus]
; The name and location of the Clouseau Java service required to
diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl
index b11338eef..8fd05597d 100644
--- a/src/chttpd/src/chttpd.erl
+++ b/src/chttpd/src/chttpd.erl
@@ -57,6 +57,9 @@
buffer_response=false
}).
+-define(DEFAULT_SERVER_OPTIONS, "[{recbuf, undefined}]").
+-define(DEFAULT_SOCKET_OPTIONS, "[{sndbuf, 262144}, {nodelay, true}]").
+
start_link() ->
start_link(http).
start_link(http) ->
@@ -141,7 +144,12 @@ start_link(Name, Options) ->
end.
get_server_options(Module) ->
- ServerOptsCfg = config:get(Module, "server_options", "[]"),
+ ServerOptsCfg =
+ case Module of
+ "chttpd" -> config:get(Module,
+ "server_options", ?DEFAULT_SERVER_OPTIONS);
+ _ -> config:get(Module, "server_options", "[]")
+ end,
{ok, ServerOpts} = couch_util:parse_term(ServerOptsCfg),
ServerOpts.
@@ -159,13 +167,10 @@ handle_request(MochiReq0) ->
handle_request_int(MochiReq) ->
Begin = os:timestamp(),
- case config:get("chttpd", "socket_options") of
- undefined ->
- ok;
- SocketOptsCfg ->
- {ok, SocketOpts} = couch_util:parse_term(SocketOptsCfg),
- ok = mochiweb_socket:setopts(MochiReq:get(socket), SocketOpts)
- end,
+ SocketOptsCfg = config:get(
+ "chttpd", "socket_options", ?DEFAULT_SOCKET_OPTIONS),
+ {ok, SocketOpts} = couch_util:parse_term(SocketOptsCfg),
+ ok = mochiweb_socket:setopts(MochiReq:get(socket), SocketOpts),
% for the path, use the raw path with the query string and fragment
% removed, but URL quoting left intact
diff --git a/src/chttpd/src/chttpd_auth_cache.erl b/src/chttpd/src/chttpd_auth_cache.erl
index fdae27b79..17a31bf55 100644
--- a/src/chttpd/src/chttpd_auth_cache.erl
+++ b/src/chttpd/src/chttpd_auth_cache.erl
@@ -13,7 +13,7 @@
-module(chttpd_auth_cache).
-behaviour(gen_server).
--export([start_link/0, get_user_creds/2, update_user_creds/3]).
+-export([start_link/0, get_user_creds/2, update_user_creds/3, dbname/0]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
code_change/3]).
-export([listen_for_changes/1, changes_callback/2]).
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index 27a564be8..8e528b5f2 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -1713,8 +1713,8 @@ parse_doc_query(Req) ->
parse_shards_opt(Req) ->
[
- {n, parse_shards_opt("n", Req, config:get("cluster", "n", "3"))},
- {q, parse_shards_opt("q", Req, config:get("cluster", "q", "8"))},
+ {n, parse_shards_opt("n", Req, config:get_integer("cluster", "n", 3))},
+ {q, parse_shards_opt("q", Req, config:get_integer("cluster", "q", 2))},
{placement, parse_shards_opt(
"placement", Req, config:get("cluster", "placement"))}
].
@@ -2230,7 +2230,7 @@ t_should_default_on_missing_q() ->
?_test(begin
Req = mock_request("/all-test21"),
Opts = parse_shards_opt(Req),
- ?assertEqual("8", couch_util:get_value(q, Opts))
+ ?assertEqual(2, couch_util:get_value(q, Opts))
end).
t_should_throw_on_invalid_q() ->
@@ -2251,7 +2251,7 @@ t_should_default_on_missing_n() ->
?_test(begin
Req = mock_request("/all-test21"),
Opts = parse_shards_opt(Req),
- ?assertEqual("3", couch_util:get_value(n, Opts))
+ ?assertEqual(3, couch_util:get_value(n, Opts))
end).
t_should_throw_on_invalid_n() ->
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index 84dba5893..ffe0344b3 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -93,7 +93,7 @@ handle_utils_dir_req(#httpd{method='GET'}=Req, DocumentRoot) ->
{_ActionKey, "/", RelativePath} ->
% GET /_utils/path or GET /_utils/
CachingHeaders = [{"Cache-Control", "private, must-revalidate"}],
- EnableCsp = config:get("csp", "enable", "false"),
+ EnableCsp = config:get("csp", "enable", "true"),
Headers = maybe_add_csp_headers(CachingHeaders, EnableCsp),
chttpd:serve_file(Req, RelativePath, DocumentRoot, Headers);
{_ActionKey, "", _RelativePath} ->
diff --git a/src/chttpd/src/chttpd_prefer_header.erl b/src/chttpd/src/chttpd_prefer_header.erl
index 1ad1443ea..de2660399 100644
--- a/src/chttpd/src/chttpd_prefer_header.erl
+++ b/src/chttpd/src/chttpd_prefer_header.erl
@@ -22,6 +22,11 @@
-include_lib("couch/include/couch_db.hrl").
+-define(DEFAULT_PREFER_MINIMAL,
+ "Cache-Control, Content-Length, Content-Range, "
+ "Content-Type, ETag, Server, Transfer-Encoding, Vary").
+
+
maybe_return_minimal(#httpd{mochi_req = MochiReq}, Headers) ->
case get_prefer_header(MochiReq) of
"return=minimal" ->
@@ -47,7 +52,8 @@ filter_headers(Headers, IncludeList) ->
get_header_list() ->
- SectionStr = config:get("chttpd", "prefer_minimal", ""),
+ SectionStr = config:get("chttpd",
+ "prefer_minimal", ?DEFAULT_PREFER_MINIMAL),
split_list(SectionStr).
diff --git a/src/chttpd/src/chttpd_sup.erl b/src/chttpd/src/chttpd_sup.erl
index 5bcd4bd62..78fc66979 100644
--- a/src/chttpd/src/chttpd_sup.erl
+++ b/src/chttpd/src/chttpd_sup.erl
@@ -24,6 +24,8 @@
%% Helper macro for declaring children of supervisor
-define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 100, Type, [I]}).
+-define(DEFAULT_BACKLOG, 512).
+-define(DEFAULT_SERVER_OPTIONS, "[{recbuf, undefined}]").
start_link(Args) ->
case supervisor:start_link({local, ?MODULE}, ?MODULE, Args) of
@@ -75,8 +77,9 @@ settings() ->
[
{bind_address, config:get("chttpd", "bind_address")},
{port, config:get("chttpd", "port")},
- {backlog, config:get("chttpd", "backlog")},
- {server_options, config:get("chttpd", "server_options")}
+ {backlog, config:get_integer("chttpd", "backlog", ?DEFAULT_BACKLOG)},
+ {server_options, config:get("chttpd",
+ "server_options", ?DEFAULT_SERVER_OPTIONS)}
].
maybe_replace(Key, Value, Settings) ->
diff --git a/src/chttpd/test/eunit/chttpd_util_test.erl b/src/chttpd/test/eunit/chttpd_util_test.erl
index 24403b5cf..41fe6cb23 100644
--- a/src/chttpd/test/eunit/chttpd_util_test.erl
+++ b/src/chttpd/test/eunit/chttpd_util_test.erl
@@ -18,6 +18,15 @@
setup() ->
+ ok = lists:foreach(fun(Section) ->
+ ok = config_delete_all_keys(Section)
+ end, ["httpd", "chttpd", "couch_httpd_auth", "chttpd_auth"]),
+
+ ok = config:set("httpd", "authentication_handlers",
+ "{couch_httpd_auth, cookie_authentication_handler}, "
+ "{couch_httpd_auth, default_authentication_handler}", _Persist = false),
+ ok = config:set("httpd", "backlog", "512", _Persist = false),
+ ok = config:set("chttpd", "require_valid_user", "false", _Persist = false),
ok = config:set("httpd", "both_exist", "get_in_httpd", _Persist = false),
ok = config:set("chttpd", "both_exist", "get_in_chttpd", _Persist = false),
ok = config:set("httpd", "httpd_only", "true", _Persist = false),
@@ -29,6 +38,9 @@ setup() ->
teardown(_) ->
+ ok = config:delete("httpd", "authentication_handlers", _Persist = false),
+ ok = config:delete("httpd", "backlog", _Persist = false),
+ ok = config:delete("chttpd", "require_valid_user", _Persist = false),
ok = config:delete("httpd", "both_exist", _Persist = false),
ok = config:delete("chttpd", "both_exist", _Persist = false),
ok = config:delete("httpd", "httpd_only", _Persist = false),
@@ -39,6 +51,12 @@ teardown(_) ->
ok = config:delete("chttpd_auth", "ca_only", _Persist = false).
+config_delete_all_keys(Section) ->
+ lists:foreach(fun({Key, _Val}) ->
+ ok = config:delete(Section, Key, _Persist = false)
+ end, config:get(Section)).
+
+
chttpd_util_config_test_() ->
{
"chttpd util config tests",
@@ -51,25 +69,17 @@ chttpd_util_config_test_() ->
fun setup/0,
fun teardown/1,
[
- ?TDEF_FE(test_behavior),
+ ?TDEF_FE(test_chttpd_behavior),
?TDEF_FE(test_with_undefined_option),
- ?TDEF_FE(test_with_httpd_option),
- ?TDEF_FE(test_with_chttpd_option),
- ?TDEF_FE(test_with_chttpd_option_which_moved_from_httpd),
- ?TDEF_FE(test_get_chttpd_config_integer),
- ?TDEF_FE(test_get_chttpd_config_boolean),
?TDEF_FE(test_auth_behavior),
- ?TDEF_FE(test_auth_with_undefined_option),
- ?TDEF_FE(test_auth_with_moved_options),
- ?TDEF_FE(test_get_chttpd_auth_config_integer),
- ?TDEF_FE(test_get_chttpd_auth_config_boolean)
+ ?TDEF_FE(test_auth_with_undefined_option)
]
}
}
}.
-test_behavior(_) ->
+test_chttpd_behavior(_) ->
?assertEqual("get_in_chttpd", chttpd_util:get_chttpd_config("both_exist")),
?assertEqual(1, chttpd_util:get_chttpd_config_integer("chttpd_only", 0)),
?assert(chttpd_util:get_chttpd_config_boolean("httpd_only", false)).
@@ -86,40 +96,6 @@ test_with_undefined_option(_) ->
?assertNot(chttpd_util:get_chttpd_config("undefined_option", false)).
-test_with_httpd_option(_) ->
- ?assertEqual("{couch_httpd_auth, cookie_authentication_handler}, " ++
- "{couch_httpd_auth, default_authentication_handler}",
- chttpd_util:get_chttpd_config("authentication_handlers")).
-
-
-test_with_chttpd_option(_) ->
- ?assertEqual("512", chttpd_util:get_chttpd_config("backlog")),
- ?assertEqual("512", chttpd_util:get_chttpd_config("backlog", 123)),
- ?assertEqual(512, chttpd_util:get_chttpd_config_integer("backlog", 123)),
- ?assertEqual("false",
- chttpd_util:get_chttpd_config("require_valid_user")),
- ?assertEqual("false",
- chttpd_util:get_chttpd_config("require_valid_user", "true")),
- ?assertEqual(false,
- chttpd_util:get_chttpd_config_boolean("require_valid_user", true)).
-
-
-test_with_chttpd_option_which_moved_from_httpd(_) ->
- ?assertEqual(undefined, chttpd_util:get_chttpd_config("max_uri_length")),
- ?assertEqual(8000, chttpd_util:get_chttpd_config("max_uri_length", 8000)),
- ?assertEqual(undefined, chttpd_util:get_chttpd_config("WWW-Authenticate")),
- ?assert(chttpd_util:get_chttpd_config("enable_cors", true)).
-
-
-test_get_chttpd_config_integer(_) ->
- ?assertEqual(123,
- chttpd_util:get_chttpd_config_integer("max_http_request_size", 123)).
-
-
-test_get_chttpd_config_boolean(_) ->
- ?assert(chttpd_util:get_chttpd_config_boolean("allow_jsonp", true)).
-
-
test_auth_behavior(_) ->
?assertEqual("ca", chttpd_util:get_chttpd_auth_config("both_exist")),
?assertEqual(1, chttpd_util:get_chttpd_auth_config_integer("ca_only", 0)),
@@ -135,22 +111,3 @@ test_auth_with_undefined_option(_) ->
?assertEqual("", chttpd_util:get_chttpd_auth_config("undefine", "")),
?assert(chttpd_util:get_chttpd_auth_config("undefine", true)),
?assertNot(chttpd_util:get_chttpd_auth_config("undefine", false)).
-
-
-test_auth_with_moved_options(_) ->
- ?assertEqual("/_utils/session.html", chttpd_util:get_chttpd_auth_config(
- "authentication_redirect", "/_utils/session.html")),
- ?assert(chttpd_util:get_chttpd_auth_config("require_valid_user", true)),
- ?assertEqual(10, chttpd_util:get_chttpd_auth_config("iterations", 10)).
-
-
-test_get_chttpd_auth_config_integer(_) ->
- ?assertEqual(123, chttpd_util:get_chttpd_auth_config_integer(
- "timeout", 123)).
-
-
-test_get_chttpd_auth_config_boolean(_) ->
- ?assertNot(chttpd_util:get_chttpd_auth_config_boolean(
- "require_valid_user", false)),
- ?assert(chttpd_util:get_chttpd_auth_config_boolean(
- "allow_persistent_cookies", true)).
diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl
index 390a198df..8837101ec 100644
--- a/src/couch/src/couch_db.erl
+++ b/src/couch/src/couch_db.erl
@@ -142,6 +142,8 @@
"^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*" % use the stock CouchDB regex
"(\\.[0-9]{10,})?$" % but allow an optional shard timestamp at the end
).
+-define(DEFAULT_COMPRESSIBLE_TYPES,
+ "text/*, application/javascript, application/json, application/xml").
start_link(Engine, DbName, Filepath, Options) ->
Arg = {Engine, DbName, Filepath, Options},
@@ -1391,7 +1393,8 @@ compressible_att_type(MimeType) when is_binary(MimeType) ->
compressible_att_type(?b2l(MimeType));
compressible_att_type(MimeType) ->
TypeExpList = re:split(
- config:get("attachments", "compressible_types", ""),
+ config:get("attachments", "compressible_types",
+ ?DEFAULT_COMPRESSIBLE_TYPES),
"\\s*,\\s*",
[{return, list}]
),
@@ -1416,13 +1419,12 @@ compressible_att_type(MimeType) ->
% pretend that no Content-MD5 exists.
with_stream(Db, Att, Fun) ->
[InMd5, Type, Enc] = couch_att:fetch([md5, type, encoding], Att),
- BufferSize = list_to_integer(
- config:get("couchdb", "attachment_stream_buffer_size", "4096")),
+ BufferSize = config:get_integer("couchdb",
+ "attachment_stream_buffer_size", 4096),
Options = case (Enc =:= identity) andalso compressible_att_type(Type) of
true ->
- CompLevel = list_to_integer(
- config:get("attachments", "compression_level", "0")
- ),
+ CompLevel = config:get_integer(
+ "attachments", "compression_level", 8),
[
{buffer_size, BufferSize},
{encoding, gzip},
diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl
index 33ad14f0b..ec16d21db 100644
--- a/src/couch/src/couch_doc.erl
+++ b/src/couch/src/couch_doc.erl
@@ -131,7 +131,7 @@ from_json_obj_validate(EJson) ->
from_json_obj_validate(EJson, undefined).
from_json_obj_validate(EJson, DbName) ->
- MaxSize = config:get_integer("couchdb", "max_document_size", 4294967296),
+ MaxSize = config:get_integer("couchdb", "max_document_size", 8000000),
Doc = from_json_obj(EJson, DbName),
case couch_ejson_size:encoded_size(Doc#doc.body) =< MaxSize of
true ->
diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
index 5417bdd67..00379bbb4 100644
--- a/src/couch/src/couch_httpd.erl
+++ b/src/couch/src/couch_httpd.erl
@@ -44,6 +44,10 @@
-define(HANDLER_NAME_IN_MODULE_POS, 6).
-define(MAX_DRAIN_BYTES, 1048576).
-define(MAX_DRAIN_TIME_MSEC, 1000).
+-define(DEFAULT_SOCKET_OPTIONS, "[{sndbuf, 262144}]").
+-define(DEFAULT_AUTHENTICATION_HANDLERS,
+ "{couch_httpd_auth, cookie_authentication_handler}, "
+ "{couch_httpd_auth, default_authentication_handler}").
start_link() ->
start_link(http).
@@ -110,7 +114,7 @@ start_link(Name, Options) ->
{ok, ServerOptions} = couch_util:parse_term(
config:get("httpd", "server_options", "[]")),
{ok, SocketOptions} = couch_util:parse_term(
- config:get("httpd", "socket_options", "[]")),
+ config:get("httpd", "socket_options", ?DEFAULT_SOCKET_OPTIONS)),
set_auth_handlers(),
Handlers = get_httpd_handlers(),
@@ -152,7 +156,8 @@ stop() ->
set_auth_handlers() ->
AuthenticationSrcs = make_fun_spec_strs(
- config:get("httpd", "authentication_handlers", "")),
+ config:get("httpd", "authentication_handlers",
+ ?DEFAULT_AUTHENTICATION_HANDLERS)),
AuthHandlers = lists:map(
fun(A) -> {auth_handler_name(A), make_arity_1_fun(A)} end, AuthenticationSrcs),
AuthenticationFuns = AuthHandlers ++ [
diff --git a/src/couch/src/couch_httpd_misc_handlers.erl b/src/couch/src/couch_httpd_misc_handlers.erl
index 9bbede746..ea9c1cb84 100644
--- a/src/couch/src/couch_httpd_misc_handlers.erl
+++ b/src/couch/src/couch_httpd_misc_handlers.erl
@@ -82,7 +82,7 @@ handle_task_status_req(Req) ->
handle_uuids_req(#httpd{method='GET'}=Req) ->
- Max = list_to_integer(config:get("uuids","max_count","1000")),
+ Max = config:get_integer("uuids","max_count", 1000),
Count = try list_to_integer(couch_httpd:qs_value(Req, "count", "1")) of
N when N > Max ->
throw({bad_request, <<"count parameter too large">>});
diff --git a/src/couch/src/couch_proc_manager.erl b/src/couch/src/couch_proc_manager.erl
index 1f6d0167b..e7a25a6d2 100644
--- a/src/couch/src/couch_proc_manager.erl
+++ b/src/couch/src/couch_proc_manager.erl
@@ -584,11 +584,11 @@ can_spawn(#state{hard_limit = HardLimit, counts = Counts}, Lang) ->
get_proc_config() ->
- Limit = config:get("query_server_config", "reduce_limit", "true"),
- Timeout = config:get("couchdb", "os_process_timeout", "5000"),
+ Limit = config:get_boolean("query_server_config", "reduce_limit", true),
+ Timeout = config:get_integer("couchdb", "os_process_timeout", 5000),
{[
- {<<"reduce_limit">>, list_to_atom(Limit)},
- {<<"timeout">>, list_to_integer(Timeout)}
+ {<<"reduce_limit">>, Limit},
+ {<<"timeout">>, Timeout}
]}.
@@ -598,5 +598,4 @@ get_hard_limit() ->
get_soft_limit() ->
- LimStr = config:get("query_server_config", "os_process_soft_limit", "100"),
- list_to_integer(LimStr).
+ config:get_integer("query_server_config", "os_process_soft_limit", 100).
diff --git a/src/couch/src/couch_query_servers.erl b/src/couch/src/couch_query_servers.erl
index ebf82ab31..10b8048dd 100644
--- a/src/couch/src/couch_query_servers.erl
+++ b/src/couch/src/couch_query_servers.erl
@@ -623,7 +623,7 @@ proc_set_timeout(Proc, Timeout) ->
apply(Mod, Func, [Proc#proc.pid, Timeout]).
get_os_process_timeout() ->
- list_to_integer(config:get("couchdb", "os_process_timeout", "5000")).
+ config:get_integer("couchdb", "os_process_timeout", 5000).
get_ddoc_process(#doc{} = DDoc, DDocKey) ->
% remove this case statement
diff --git a/src/couch/src/couch_server.erl b/src/couch/src/couch_server.erl
index feeebdf17..04e8c6cf9 100644
--- a/src/couch/src/couch_server.erl
+++ b/src/couch/src/couch_server.erl
@@ -38,6 +38,7 @@
-define(MAX_DBS_OPEN, 500).
-define(RELISTEN_DELAY, 5000).
+-define(DEFAULT_ENGINE, "couch").
-record(server,{
root_dir = [],
@@ -270,10 +271,9 @@ init([N]) ->
RootDir = config:get("couchdb", "database_dir", "."),
Engines = get_configured_engines(),
- MaxDbsOpen = list_to_integer(
- config:get("couchdb", "max_dbs_open", integer_to_list(?MAX_DBS_OPEN))),
- UpdateLruOnRead =
- config:get("couchdb", "update_lru_on_read", "false") =:= "true",
+ MaxDbsOpen = config:get_integer("couchdb", "max_dbs_open", ?MAX_DBS_OPEN),
+ UpdateLruOnRead = config:get_boolean(
+ "couchdb", "update_lru_on_read", false),
ok = config:listen_for_changes(?MODULE, N),
ok = couch_file:init_delete_dir(RootDir),
hash_admin_passwords(),
@@ -823,20 +823,16 @@ get_default_engine(Server, DbName) ->
engines = Engines
} = Server,
Default = {couch_bt_engine, make_filepath(RootDir, DbName, "couch")},
- case config:get("couchdb", "default_engine") of
- Extension when is_list(Extension) ->
- case lists:keyfind(Extension, 1, Engines) of
- {Extension, Module} ->
- {ok, {Module, make_filepath(RootDir, DbName, Extension)}};
- false ->
- Fmt = "Invalid storage engine extension ~s,"
- " configured engine extensions are: ~s",
- Exts = [E || {E, _} <- Engines],
- Args = [Extension, string:join(Exts, ", ")],
- couch_log:error(Fmt, Args),
- {ok, Default}
- end;
- _ ->
+ Extension = config:get("couchdb", "default_engine", ?DEFAULT_ENGINE),
+ case lists:keyfind(Extension, 1, Engines) of
+ {Extension, Module} ->
+ {ok, {Module, make_filepath(RootDir, DbName, Extension)}};
+ false ->
+ Fmt = "Invalid storage engine extension ~s,"
+ " configured engine extensions are: ~s",
+ Exts = [E || {E, _} <- Engines],
+ Args = [Extension, string:join(Exts, ", ")],
+ couch_log:error(Fmt, Args),
{ok, Default}
end.
diff --git a/src/couch/src/couch_uuids.erl b/src/couch/src/couch_uuids.erl
index b9c03b502..3fffd04b3 100644
--- a/src/couch/src/couch_uuids.erl
+++ b/src/couch/src/couch_uuids.erl
@@ -98,7 +98,7 @@ inc() ->
couch_rand:uniform(16#ffd).
state() ->
- AlgoStr = config:get("uuids", "algorithm", "random"),
+ AlgoStr = config:get("uuids", "algorithm", "sequential"),
case couch_util:to_existing_atom(AlgoStr) of
random ->
random;
diff --git a/src/couch/test/eunit/couch_doc_json_tests.erl b/src/couch/test/eunit/couch_doc_json_tests.erl
index 51f228900..a68932eca 100644
--- a/src/couch/test/eunit/couch_doc_json_tests.erl
+++ b/src/couch/test/eunit/couch_doc_json_tests.erl
@@ -39,7 +39,7 @@ mock(couch_log) ->
mock(config) ->
meck:new(config, [passthrough]),
meck:expect(config, get_integer,
- fun("couchdb", "max_document_size", 4294967296) -> 1024 end),
+ fun("couchdb", "max_document_size", 8000000) -> 1024 end),
meck:expect(config, get, fun(_, _) -> undefined end),
meck:expect(config, get, fun(_, _, Default) -> Default end),
ok.
diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl
index 3087195bd..7c60e8a43 100644
--- a/src/couch_replicator/src/couch_replicator_docs.erl
+++ b/src/couch_replicator/src/couch_replicator_docs.erl
@@ -456,26 +456,26 @@ maybe_add_trailing_slash(Url) ->
make_options(Props) ->
Options0 = lists:ukeysort(1, convert_options(Props)),
Options = check_options(Options0),
- DefWorkers = config:get("replicator", "worker_processes", "4"),
- DefBatchSize = config:get("replicator", "worker_batch_size", "500"),
- DefConns = config:get("replicator", "http_connections", "20"),
- DefTimeout = config:get("replicator", "connection_timeout", "30000"),
- DefRetries = config:get("replicator", "retries_per_request", "5"),
- UseCheckpoints = config:get("replicator", "use_checkpoints", "true"),
- DefCheckpointInterval = config:get("replicator", "checkpoint_interval",
- "30000"),
+ DefWorkers = config:get_integer("replicator", "worker_processes", 4),
+ DefBatchSize = config:get_integer("replicator", "worker_batch_size", 500),
+ DefConns = config:get_integer("replicator", "http_connections", 20),
+ DefTimeout = config:get_integer("replicator", "connection_timeout", 30000),
+ DefRetries = config:get_integer("replicator", "retries_per_request", 5),
+ UseCheckpoints = config:get_boolean("replicator", "use_checkpoints", true),
+ DefCheckpointInterval = config:get_integer("replicator",
+ "checkpoint_interval", 30000),
{ok, DefSocketOptions} = couch_util:parse_term(
config:get("replicator", "socket_options",
"[{keepalive, true}, {nodelay, false}]")),
lists:ukeymerge(1, Options, lists:keysort(1, [
- {connection_timeout, list_to_integer(DefTimeout)},
- {retries, list_to_integer(DefRetries)},
- {http_connections, list_to_integer(DefConns)},
+ {connection_timeout, DefTimeout},
+ {retries, DefRetries},
+ {http_connections, DefConns},
{socket_options, DefSocketOptions},
- {worker_batch_size, list_to_integer(DefBatchSize)},
- {worker_processes, list_to_integer(DefWorkers)},
- {use_checkpoints, list_to_existing_atom(UseCheckpoints)},
- {checkpoint_interval, list_to_integer(DefCheckpointInterval)}
+ {worker_batch_size, DefBatchSize},
+ {worker_processes, DefWorkers},
+ {use_checkpoints, UseCheckpoints},
+ {checkpoint_interval, DefCheckpointInterval}
])).
@@ -571,8 +571,7 @@ parse_proxy_params(ProxyUrl) ->
password = Passwd,
protocol = Protocol
} = ibrowse_lib:parse_url(ProxyUrl),
- [
- {proxy_protocol, Protocol},
+ Params = [
{proxy_host, Host},
{proxy_port, Port}
] ++ case is_list(User) andalso is_list(Passwd) of
@@ -580,21 +579,38 @@ parse_proxy_params(ProxyUrl) ->
[];
true ->
[{proxy_user, User}, {proxy_password, Passwd}]
- end.
+ end,
+ case Protocol of
+ socks5 ->
+ [proxy_to_socks5(Param) || Param <- Params];
+ _ ->
+ Params
+ end.
+
+
+-spec proxy_to_socks5({atom(), string()}) -> {atom(), string()}.
+proxy_to_socks5({proxy_host, Val}) ->
+ {socks5_host, Val};
+proxy_to_socks5({proxy_port, Val}) ->
+ {socks5_port, Val};
+proxy_to_socks5({proxy_user, Val}) ->
+ {socks5_user, Val};
+proxy_to_socks5({proxy_password, Val}) ->
+ {socks5_password, Val}.
-spec ssl_params([_]) -> [_].
ssl_params(Url) ->
case ibrowse_lib:parse_url(Url) of
#url{protocol = https} ->
- Depth = list_to_integer(
- config:get("replicator", "ssl_certificate_max_depth", "3")
- ),
- VerifyCerts = config:get("replicator", "verify_ssl_certificates"),
+ Depth = config:get_integer("replicator",
+ "ssl_certificate_max_depth", 3),
+ VerifyCerts = config:get_boolean("replicator",
+ "verify_ssl_certificates", false),
CertFile = config:get("replicator", "cert_file", undefined),
KeyFile = config:get("replicator", "key_file", undefined),
Password = config:get("replicator", "password", undefined),
- SslOpts = [{depth, Depth} | ssl_verify_options(VerifyCerts =:= "true")],
+ SslOpts = [{depth, Depth} | ssl_verify_options(VerifyCerts)],
SslOpts1 = case CertFile /= undefined andalso KeyFile /= undefined of
true ->
case Password of
@@ -790,6 +806,21 @@ check_strip_credentials_test() ->
]].
+parse_proxy_params_test() ->
+ ?assertEqual([
+ {proxy_host, "foo.com"},
+ {proxy_port, 443},
+ {proxy_user, "u"},
+ {proxy_password, "p"}
+ ], parse_proxy_params("https://u:p@foo.com")),
+ ?assertEqual([
+ {socks5_host, "foo.com"},
+ {socks5_port, 1080},
+ {socks5_user, "u"},
+ {socks5_password, "p"}
+ ], parse_proxy_params("socks5://u:p@foo.com")).
+
+
setup() ->
DbName = ?tempdb(),
{ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
diff --git a/src/couch_replicator/src/couch_replicator_ids.erl b/src/couch_replicator/src/couch_replicator_ids.erl
index 80ff0016a..316e6a28a 100644
--- a/src/couch_replicator/src/couch_replicator_ids.erl
+++ b/src/couch_replicator/src/couch_replicator_ids.erl
@@ -62,7 +62,7 @@ replication_id(#rep{} = Rep, 2) ->
% TODO: we might be under an SSL socket server only, or both under
% SSL and a non-SSL socket.
% ... mochiweb_socket_server:get(https, port)
- list_to_integer(config:get("httpd", "port", "5984"))
+ config:get_integer("httpd", "port", 5984)
end,
Src = get_rep_endpoint(Rep#rep.source),
Tgt = get_rep_endpoint(Rep#rep.target),
diff --git a/src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl
index 63310d39e..dae4cd122 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl
@@ -101,10 +101,10 @@ should_create_target_with_default({_Ctx, {Source, Target}}) ->
{ok, TargetInfo} = fabric:get_db_info(Target),
{ClusterInfo} = couch_util:get_value(cluster, TargetInfo),
- Q = config:get("cluster", "q", "8"),
+ Q = config:get_integer("cluster", "q", 2),
delete_db(Source),
delete_db(Target),
- ?_assertEqual(list_to_integer(Q), couch_util:get_value(q, ClusterInfo)).
+ ?_assertEqual(Q, couch_util:get_value(q, ClusterInfo)).
should_not_create_target_with_q_any({_Ctx, {Source, Target}}) ->
diff --git a/src/custodian/src/custodian_db_checker.erl b/src/custodian/src/custodian_db_checker.erl
index 10502dd76..f9ab8c85e 100644
--- a/src/custodian/src/custodian_db_checker.erl
+++ b/src/custodian/src/custodian_db_checker.erl
@@ -132,7 +132,7 @@ get_dbs() ->
get_users_db() ->
- UsersDb = config:get("couch_httpd_auth", "authentication_db", "users"),
+ UsersDb = chttpd_auth_cache:dbname(),
[list_to_binary(UsersDb)].
diff --git a/src/custodian/src/custodian_util.erl b/src/custodian/src/custodian_util.erl
index 785bbd3da..ee217108f 100644
--- a/src/custodian/src/custodian_util.erl
+++ b/src/custodian/src/custodian_util.erl
@@ -43,7 +43,7 @@ report() ->
fold_dbs([], Fun).
ensure_dbs_exists() ->
- DbName = config:get("mem3", "shards_db", "dbs"),
+ DbName = mem3_sync:shards_db(),
{ok, Db} = mem3_util:ensure_exists(DbName),
ensure_custodian_ddoc_exists(Db),
{ok, Db}.
@@ -149,7 +149,7 @@ get_n_rings(N, Ranges, Rings) ->
cluster_n() ->
- list_to_integer(config:get("cluster", "n", "3")).
+ config:get_integer("cluster", "n", 3).
maintenance_nodes(Nodes) ->
{Modes, _} = rpc:multicall(Nodes, config, get, ["couchdb", "maintenance_mode"]),
diff --git a/src/dreyfus/src/dreyfus_rpc.erl b/src/dreyfus/src/dreyfus_rpc.erl
index 5542bd029..cc50d0999 100644
--- a/src/dreyfus/src/dreyfus_rpc.erl
+++ b/src/dreyfus/src/dreyfus_rpc.erl
@@ -103,13 +103,14 @@ disk_size(DbName, DDoc, IndexName) ->
get_or_create_db(DbName, Options) ->
case couch_db:open_int(DbName, Options) of
- {not_found, no_db_file} ->
- couch_log:warning("~p creating ~s", [?MODULE, DbName]),
- couch_server:create(DbName, Options);
- Else ->
- Else
+ {not_found, no_db_file} ->
+ couch_log:warning("~p creating ~s", [?MODULE, DbName]),
+ mem3_util:get_or_create_db(DbName, Options);
+ Else ->
+ Else
end.
+
calculate_seqs(Db, Stale) ->
LastSeq = couch_db:get_update_seq(Db),
if
diff --git a/src/fabric/src/fabric_doc_attachments.erl b/src/fabric/src/fabric_doc_attachments.erl
index 723b9e804..922a89ea1 100644
--- a/src/fabric/src/fabric_doc_attachments.erl
+++ b/src/fabric/src/fabric_doc_attachments.erl
@@ -106,13 +106,13 @@ middleman(Req, chunked) ->
Receiver = spawn(fun() -> couch_httpd:recv_chunked(Req,4096,RcvFun,ok) end),
% take requests from the DB writers and get data from the receiver
- N = erlang:list_to_integer(config:get("cluster","n")),
+ N = config:get_integer("cluster", "n", 3),
Timeout = fabric_util:attachments_timeout(),
middleman_loop(Receiver, N, [], [], Timeout);
middleman(Req, Length) ->
Receiver = spawn(fun() -> receive_unchunked_attachment(Req, Length) end),
- N = erlang:list_to_integer(config:get("cluster","n")),
+ N = config:get_integer("cluster", "n", 3),
Timeout = fabric_util:attachments_timeout(),
middleman_loop(Receiver, N, [], [], Timeout).
diff --git a/src/fabric/src/fabric_doc_atts.erl b/src/fabric/src/fabric_doc_atts.erl
index a3aae80ec..65ba65f07 100644
--- a/src/fabric/src/fabric_doc_atts.erl
+++ b/src/fabric/src/fabric_doc_atts.erl
@@ -116,13 +116,13 @@ middleman(Req, chunked) ->
Receiver = spawn(fun() -> couch_httpd:recv_chunked(Req,4096,RcvFun,ok) end),
% take requests from the DB writers and get data from the receiver
- N = erlang:list_to_integer(config:get("cluster","n")),
+ N = config:get_integer("cluster", "n", 3),
Timeout = fabric_util:attachments_timeout(),
middleman_loop(Receiver, N, [], [], Timeout);
middleman(Req, Length) ->
Receiver = spawn(fun() -> receive_unchunked_attachment(Req, Length) end),
- N = erlang:list_to_integer(config:get("cluster","n")),
+ N = config:get_integer("cluster", "n", 3),
Timeout = fabric_util:attachments_timeout(),
middleman_loop(Receiver, N, [], [], Timeout).
diff --git a/src/fabric/src/fabric_rpc.erl b/src/fabric/src/fabric_rpc.erl
index 7776bd9fe..9ed8efd14 100644
--- a/src/fabric/src/fabric_rpc.erl
+++ b/src/fabric/src/fabric_rpc.erl
@@ -304,7 +304,7 @@ reset_validation_funs(DbName) ->
open_shard(Name, Opts) ->
set_io_priority(Name, Opts),
try
- rexi:reply(couch_db:open(Name, Opts))
+ rexi:reply(mem3_util:get_or_create_db(Name, Opts))
catch exit:{timeout, _} ->
couch_stats:increment_counter([fabric, open_shard, timeouts])
end.
@@ -439,7 +439,7 @@ get_node_seqs(Db, Nodes) ->
get_or_create_db(DbName, Options) ->
- mem3_util:get_or_create_db(DbName, Options).
+ mem3_util:get_or_create_db_int(DbName, Options).
get_view_cb(#mrargs{extra = Options}) ->
diff --git a/src/fabric/src/fabric_util.erl b/src/fabric/src/fabric_util.erl
index 8aa14e73a..84ffef122 100644
--- a/src/fabric/src/fabric_util.erl
+++ b/src/fabric/src/fabric_util.erl
@@ -105,7 +105,7 @@ get_db(DbName, Options) ->
Nodes = [node()|erlang:nodes()],
Live = [S || #shard{node = N} = S <- Shards, lists:member(N, Nodes)],
Factor = list_to_integer(config:get("fabric", "shard_timeout_factor", "2")),
- get_shard(Live, [{create_if_missing, true} | Options], 100, Factor).
+ get_shard(Live, Options, 100, Factor).
get_shard([], _Opts, _Timeout, _Factor) ->
erlang:error({internal_server_error, "No DB shards could be opened."});
diff --git a/src/ioq/src/ioq.erl b/src/ioq/src/ioq.erl
index 3da640e47..c3f9365bf 100644
--- a/src/ioq/src/ioq.erl
+++ b/src/ioq/src/ioq.erl
@@ -55,7 +55,15 @@ get_queue_lengths() ->
gen_server:call(?MODULE, get_queue_lengths).
bypass(Priority) ->
- config:get("ioq.bypass", atom_to_list(Priority)) =:= "true".
+ case Priority of
+ os_process -> config:get_boolean("ioq.bypass", "os_process", true);
+ read -> config:get_boolean("ioq.bypass", "read", true);
+ write -> config:get_boolean("ioq.bypass", "write", true);
+ view_update -> config:get_boolean("ioq.bypass", "view_update", true);
+ shard_sync -> config:get_boolean("ioq.bypass", "shard_sync", false);
+ compaction -> config:get_boolean("ioq.bypass", "compaction", false);
+ _ -> config:get("ioq.bypass", atom_to_list(Priority)) =:= "true"
+ end.
io_class({prompt, _}, _) ->
os_process;
@@ -91,8 +99,8 @@ init(_) ->
{ok, read_config(State)}.
read_config(State) ->
- Ratio = list_to_float(config:get("ioq", "ratio", "0.01")),
- Concurrency = list_to_integer(config:get("ioq", "concurrency", "10")),
+ Ratio = config:get_float("ioq", "ratio", 0.01),
+ Concurrency = config:get_integer("ioq", "concurrency", 10),
State#state{concurrency=Concurrency, ratio=Ratio}.
handle_call(get_queue_lengths, _From, State) ->
diff --git a/src/mem3/src/mem3.erl b/src/mem3/src/mem3.erl
index 6f3a10df8..86eb3bb45 100644
--- a/src/mem3/src/mem3.erl
+++ b/src/mem3/src/mem3.erl
@@ -204,7 +204,7 @@ choose_shards(DbName, Nodes, Options) ->
true -> ok
end,
Q = mem3_util:q_val(couch_util:get_value(q, Options,
- config:get("cluster", "q", "8"))),
+ config:get_integer("cluster", "q", 2))),
%% rotate to a random entry in the nodelist for even distribution
RotatedNodes = rotate_rand(Nodes),
mem3_util:create_partition_map(DbName, N, Q, RotatedNodes, Suffix).
diff --git a/src/mem3/src/mem3_rpc.erl b/src/mem3/src/mem3_rpc.erl
index 5d1c62c06..9e0f42a8e 100644
--- a/src/mem3/src/mem3_rpc.erl
+++ b/src/mem3/src/mem3_rpc.erl
@@ -401,7 +401,7 @@ rexi_call(Node, MFA, Timeout) ->
get_or_create_db(DbName, Options) ->
- mem3_util:get_or_create_db(DbName, Options).
+ mem3_util:get_or_create_db_int(DbName, Options).
-ifdef(TEST).
diff --git a/src/mem3/src/mem3_shards.erl b/src/mem3/src/mem3_shards.erl
index 4f3323740..fd1894abe 100644
--- a/src/mem3/src/mem3_shards.erl
+++ b/src/mem3/src/mem3_shards.erl
@@ -46,7 +46,8 @@
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-opts_for_db(DbName) ->
+opts_for_db(DbName0) ->
+ DbName = mem3:dbname(DbName0),
{ok, Db} = mem3_util:ensure_exists(mem3_sync:shards_db()),
case couch_db:open_doc(Db, DbName, [ejson_body]) of
{ok, #doc{body = {Props}}} ->
@@ -358,7 +359,7 @@ changes_callback({change, {Change}, _}, _) ->
ets:insert(?OPENERS, {DbName, Writer}),
Msg = {cache_insert_change, DbName, Writer, Seq},
gen_server:cast(?MODULE, Msg),
- [create_if_missing(mem3:name(S), mem3:engine(S)) || S
+ [create_if_missing(mem3:name(S)) || S
<- Shards, mem3:node(S) =:= node()]
end
end
@@ -408,17 +409,18 @@ in_range(Shard, HashKey) ->
[B, E] = mem3:range(Shard),
B =< HashKey andalso HashKey =< E.
-create_if_missing(Name, Options) ->
- case couch_server:exists(Name) of
+create_if_missing(ShardName) ->
+ case couch_server:exists(ShardName) of
true ->
ok;
false ->
- case couch_server:create(Name, [?ADMIN_CTX] ++ Options) of
+ Options = opts_for_db(ShardName),
+ case couch_server:create(ShardName, [?ADMIN_CTX] ++ Options) of
{ok, Db} ->
couch_db:close(Db);
Error ->
couch_log:error("~p tried to create ~s, got ~p",
- [?MODULE, Name, Error])
+ [?MODULE, ShardName, Error])
end
end.
diff --git a/src/mem3/src/mem3_util.erl b/src/mem3/src/mem3_util.erl
index 28cb17778..005a6b1bc 100644
--- a/src/mem3/src/mem3_util.erl
+++ b/src/mem3/src/mem3_util.erl
@@ -14,7 +14,8 @@
-export([name_shard/2, create_partition_map/5, build_shards/2,
n_val/2, q_val/1, to_atom/1, to_integer/1, write_db_doc/1, delete_db_doc/1,
- shard_info/1, ensure_exists/1, open_db_doc/1, get_or_create_db/2]).
+ shard_info/1, ensure_exists/1, open_db_doc/1, update_db_doc/1]).
+-export([get_or_create_db/2, get_or_create_db_int/2]).
-export([is_deleted/1, rotate_list/2]).
-export([get_shard_opts/1, get_engine_opt/1, get_props_opt/1]).
-export([get_shard_props/1, find_dirty_shards/0]).
@@ -116,6 +117,34 @@ write_db_doc(DbName, #doc{id=Id, body=Body} = Doc, ShouldMutate) ->
couch_db:close(Db)
end.
+update_db_doc(Doc) ->
+ update_db_doc(mem3_sync:shards_db(), Doc, true).
+
+update_db_doc(DbName, #doc{id=Id, body=Body} = Doc, ShouldMutate) ->
+ {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
+ try couch_db:open_doc(Db, Id, [ejson_body]) of
+ {ok, #doc{body = Body}} ->
+ % the doc is already in the desired state, we're done here
+ ok;
+ {ok, #doc{body = Body1}} ->
+ % the doc has a new body to be written
+ {ok, _} = couch_db:update_doc(Db, Doc#doc{body=Body1}, []),
+ ok;
+ {not_found, _} when ShouldMutate ->
+ try couch_db:update_doc(Db, Doc, []) of
+ {ok, _} ->
+ ok
+ catch conflict ->
+ % check to see if this was a replication race or a different edit
+ update_db_doc(DbName, Doc, false)
+ end;
+ _ ->
+ % the doc already exists in a different state
+ conflict
+ after
+ couch_db:close(Db)
+ end.
+
delete_db_doc(DocId) ->
gen_server:cast(mem3_shards, {cache_remove, DocId}),
delete_db_doc(mem3_sync:shards_db(), DocId, true).
@@ -240,7 +269,7 @@ db_props_from_json([{K, V} | Rest]) ->
[{K, V} | db_props_from_json(Rest)].
n_val(undefined, NodeCount) ->
- n_val(config:get("cluster", "n", "3"), NodeCount);
+ n_val(config:get_integer("cluster", "n", 3), NodeCount);
n_val(N, NodeCount) when is_list(N) ->
n_val(list_to_integer(N), NodeCount);
n_val(N, NodeCount) when is_integer(NodeCount), N > NodeCount ->
@@ -508,18 +537,39 @@ sort_ranges_fun({B1, _}, {B2, _}) ->
B1 =< B2.
+add_db_config_options(DbName, Options) ->
+ DbOpts = case mem3:dbname(DbName) of
+ DbName -> [];
+ MDbName -> mem3_shards:opts_for_db(MDbName)
+ end,
+ merge_opts(DbOpts, Options).
+
+
get_or_create_db(DbName, Options) ->
+ case couch_db:open(DbName, Options) of
+ {ok, _} = OkDb ->
+ OkDb;
+ {not_found, no_db_file} ->
+ try
+ Options1 = [{create_if_missing, true} | Options],
+ Options2 = add_db_config_options(DbName, Options1),
+ couch_db:open(DbName, Options2)
+ catch error:database_does_not_exist ->
+ throw({error, missing_target})
+ end;
+ Else ->
+ Else
+ end.
+
+
+get_or_create_db_int(DbName, Options) ->
case couch_db:open_int(DbName, Options) of
{ok, _} = OkDb ->
OkDb;
{not_found, no_db_file} ->
try
- DbOpts = case mem3:dbname(DbName) of
- DbName -> [];
- MDbName -> mem3_shards:opts_for_db(MDbName)
- end,
Options1 = [{create_if_missing, true} | Options],
- Options2 = merge_opts(DbOpts, Options1),
+ Options2 = add_db_config_options(DbName, Options1),
couch_db:open_int(DbName, Options2)
catch error:database_does_not_exist ->
throw({error, missing_target})
diff --git a/src/mem3/test/eunit/mem3_shards_test.erl b/src/mem3/test/eunit/mem3_shards_test.erl
new file mode 100644
index 000000000..9c9bbb402
--- /dev/null
+++ b/src/mem3/test/eunit/mem3_shards_test.erl
@@ -0,0 +1,129 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(mem3_shards_test).
+
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("mem3/src/mem3_reshard.hrl").
+-include_lib("couch_mrview/include/couch_mrview.hrl"). % for all_docs function
+
+-define(ID, <<"_id">>).
+-define(TIMEOUT, 60).
+
+setup() ->
+ DbName = ?tempdb(),
+ PartProps = [{partitioned, true}, {hash, [couch_partition, hash, []]}],
+ create_db(DbName, [{q, 8}, {n, 1}, {props, PartProps}]),
+ {ok, DbDoc} = mem3_util:open_db_doc(DbName),
+ #{dbname => DbName, dbdoc => DbDoc}.
+
+
+teardown(#{dbname := DbName}) ->
+ delete_db(DbName).
+
+
+start_couch() ->
+ test_util:start_couch(?CONFIG_CHAIN, [mem3, fabric]).
+
+
+stop_couch(Ctx) ->
+ test_util:stop_couch(Ctx).
+
+
+mem3_shards_db_create_props_test_() ->
+ {
+ "mem3 shards partition query database properties tests",
+ {
+ setup,
+ fun start_couch/0, fun stop_couch/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun partitioned_shards_recreated_properly/1
+ ]
+ }
+ }
+ }.
+
+
+% This asserts that when the mem3_shards's changes listener on the shards db
+% encounters a db doc update for a db that has a missing shard on the local
+% instance, the shard creation logic will properly propagate the db's config
+% properties.
+% SEE: apache/couchdb#3631
+partitioned_shards_recreated_properly(#{dbname := DbName, dbdoc := DbDoc}) ->
+ {timeout, ?TIMEOUT, ?_test(begin
+ #doc{body = {Body0}} = DbDoc,
+ Body1 = [{<<"foo">>, <<"bar">>} | Body0],
+ Shards = [Shard|_] = lists:sort(mem3:shards(DbName)),
+ ShardName = Shard#shard.name,
+ ?assert(is_partitioned(Shards)),
+ ok = with_proc(fun() -> couch_server:delete(ShardName, []) end),
+ ?assertThrow({not_found, no_db_file}, is_partitioned(Shard)),
+ ok = mem3_util:update_db_doc(DbDoc#doc{body = {Body1}}),
+ Shards = [Shard|_] = test_util:wait_value(fun() ->
+ lists:sort(mem3:shards(DbName))
+ end, Shards),
+ ?assertEqual(true, test_util:wait_value(fun() ->
+ catch is_partitioned(Shard)
+ end, true))
+ end)}.
+
+
+is_partitioned([#shard{}|_]=Shards) ->
+ lists:all(fun is_partitioned/1, Shards);
+is_partitioned(#shard{name=Name}) ->
+ couch_util:with_db(Name, fun couch_db:is_partitioned/1);
+is_partitioned(Db) ->
+ couch_db:is_partitioned(Db).
+
+
+create_db(DbName, Opts) ->
+ GL = erlang:group_leader(),
+ with_proc(fun() -> fabric:create_db(DbName, Opts) end, GL).
+
+
+delete_db(DbName) ->
+ GL = erlang:group_leader(),
+ with_proc(fun() -> fabric:delete_db(DbName, [?ADMIN_CTX]) end, GL).
+
+
+with_proc(Fun) ->
+ with_proc(Fun, undefined, 30000).
+
+
+with_proc(Fun, GroupLeader) ->
+ with_proc(Fun, GroupLeader, 30000).
+
+
+with_proc(Fun, GroupLeader, Timeout) ->
+ {Pid, Ref} = spawn_monitor(fun() ->
+ case GroupLeader of
+ undefined -> ok;
+ _ -> erlang:group_leader(GroupLeader, self())
+ end,
+ exit({with_proc_res, Fun()})
+ end),
+ receive
+ {'DOWN', Ref, process, Pid, {with_proc_res, Res}} ->
+ Res;
+ {'DOWN', Ref, process, Pid, Error} ->
+ error(Error)
+ after Timeout ->
+ erlang:demonitor(Ref, [flush]),
+ exit(Pid, kill),
+ error({with_proc_timeout, Fun, Timeout})
+ end.
+
diff --git a/src/mem3/test/eunit/mem3_util_test.erl b/src/mem3/test/eunit/mem3_util_test.erl
index 8b74c4b2b..a8103bfd0 100644
--- a/src/mem3/test/eunit/mem3_util_test.erl
+++ b/src/mem3/test/eunit/mem3_util_test.erl
@@ -118,7 +118,7 @@ nval_test_() ->
fun() ->
meck:new([config, couch_log]),
meck:expect(couch_log, error, 2, ok),
- meck:expect(config, get, 3, "5")
+ meck:expect(config, get_integer, 3, 5)
end,
fun(_) -> meck:unload() end,
[
diff --git a/src/setup/src/setup_httpd.erl b/src/setup/src/setup_httpd.erl
index 44ea5d1a7..6d17186fb 100644
--- a/src/setup/src/setup_httpd.erl
+++ b/src/setup/src/setup_httpd.erl
@@ -36,8 +36,8 @@ handle_setup_req(#httpd{method='GET'}=Req) ->
true ->
chttpd:send_json(Req, 200, {[{state, single_node_enabled}]});
_ ->
- case config:get("cluster", "n", undefined) of
- "1" ->
+ case config:get_integer("cluster", "n", 3) of
+ 1 ->
case setup:is_single_node_enabled(Dbs) of
false ->
chttpd:send_json(Req, 200, {[{state, single_node_disabled}]});
diff --git a/test/elixir/test/config_test.exs b/test/elixir/test/config_test.exs
index 7600cecf2..1ad70a8a7 100644
--- a/test/elixir/test/config_test.exs
+++ b/test/elixir/test/config_test.exs
@@ -76,7 +76,7 @@ defmodule ConfigTest do
test "Standard config options are present", context do
assert context[:config]["couchdb"]["database_dir"]
- assert context[:config]["log"]["level"]
+ assert context[:config]["chttpd"]["port"]
end
test "Settings can be altered with undefined whitelist allowing any change", context do