summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoan Touzet <wohali@users.noreply.github.com>2020-01-08 01:08:35 -0500
committerGitHub <noreply@github.com>2020-01-08 01:08:35 -0500
commit8949653f95eb2949c9f0d91995569dfb16514217 (patch)
tree2507cd69b5d556fcca19bb68b5d49e0ccf6acea7
parent873b2adb9f4f05dc9e34a0ab767aa9266e74593c (diff)
parente8508952ce76a5842a51d0c632f12fcbfabb6e41 (diff)
downloadcouchdb-8949653f95eb2949c9f0d91995569dfb16514217.tar.gz
Merge branch 'master' into fix/2143/override-query-server-config
-rw-r--r--Makefile4
-rw-r--r--Makefile.win4
-rw-r--r--build-aux/Jenkinsfile.full22
-rw-r--r--build-aux/Jenkinsfile.pr2
-rw-r--r--rel/files/couchdb.cmd.in3
-rwxr-xr-xrel/overlay/bin/couchup613
-rw-r--r--rel/overlay/etc/default.ini2
-rw-r--r--src/couch/src/couch_httpd_auth.erl5
-rw-r--r--src/couch/src/couch_sup.erl2
-rw-r--r--src/couch_replicator/src/couch_replicator_worker.erl42
-rw-r--r--src/rexi/src/rexi.erl5
-rw-r--r--test/elixir/test/cookie_auth_test.exs25
12 files changed, 52 insertions, 677 deletions
diff --git a/Makefile b/Makefile
index 66b1714d3..9ab9b1b4d 100644
--- a/Makefile
+++ b/Makefile
@@ -224,7 +224,7 @@ python-black: .venv/bin/black
@python3 -c "import sys; exit(1 if sys.version_info >= (3,6) else 0)" || \
LC_ALL=C.UTF-8 LANG=C.UTF-8 .venv/bin/black --check \
--exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/rebar/pr2relnotes.py|src/fauxton" \
- . dev/run rel/overlay/bin/couchup test/javascript/run
+ . dev/run test/javascript/run
python-black-update: .venv/bin/black
@python3 -c "import sys; exit(1 if sys.version_info < (3,6) else 0)" || \
@@ -232,7 +232,7 @@ python-black-update: .venv/bin/black
@python3 -c "import sys; exit(1 if sys.version_info >= (3,6) else 0)" || \
LC_ALL=C.UTF-8 LANG=C.UTF-8 .venv/bin/black \
--exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/rebar/pr2relnotes.py|src/fauxton" \
- . dev/run rel/overlay/bin/couchup test/javascript/run
+ . dev/run test/javascript/run
.PHONY: elixir
elixir: export MIX_ENV=integration
diff --git a/Makefile.win b/Makefile.win
index 7278fec76..ee314059d 100644
--- a/Makefile.win
+++ b/Makefile.win
@@ -176,7 +176,7 @@ python-black: .venv/bin/black
@python.exe -c "import sys; exit(1 if sys.version_info >= (3,6) else 0)" || \
.venv\Scripts\black.exe --check \
--exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/rebar/pr2relnotes.py|src/fauxton" \
- . dev\run rel\overlay\bin\couchup test\javascript\run
+ . dev\run test\javascript\run
python-black-update: .venv/bin/black
@python.exe -c "import sys; exit(1 if sys.version_info < (3,6) else 0)" || \
@@ -184,7 +184,7 @@ python-black-update: .venv/bin/black
@python.exe -c "import sys; exit(1 if sys.version_info >= (3,6) else 0)" || \
.venv\Scripts\black.exe \
--exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/rebar/pr2relnotes.py|src/fauxton" \
- . dev\run rel\overlay\bin\couchup test\javascript\run
+ . dev\run test\javascript\run
.PHONY: elixir
elixir: export COUCHDB_TEST_ADMIN_PARTY_OVERRIDE=1
diff --git a/build-aux/Jenkinsfile.full b/build-aux/Jenkinsfile.full
index 7087bba63..174cbd4cf 100644
--- a/build-aux/Jenkinsfile.full
+++ b/build-aux/Jenkinsfile.full
@@ -80,6 +80,7 @@ pipeline {
stage('Build Release Tarball') {
agent {
docker {
+ label 'docker'
image 'couchdbdev/debian-stretch-erlang-20.3.8.24-1:latest'
args "${DOCKER_ARGS}"
alwaysPull true
@@ -124,7 +125,7 @@ pipeline {
stage('FreeBSD') {
agent {
- label 'couchdb && freebsd'
+ label 'freebsd'
}
steps {
// deleteDir is OK here because we're not inside of a Docker container!
@@ -160,6 +161,7 @@ pipeline {
agent {
docker {
image 'couchdbdev/centos-6-erlang-20.3.8.24-1:latest'
+ label 'docker'
args "${DOCKER_ARGS}"
alwaysPull true
}
@@ -203,6 +205,7 @@ pipeline {
agent {
docker {
image 'couchdbdev/centos-7-erlang-20.3.8.24-1:latest'
+ label 'docker'
args "${DOCKER_ARGS}"
alwaysPull true
}
@@ -247,6 +250,7 @@ pipeline {
agent {
docker {
image 'couchdbdev/centos-8-erlang-20.3.8.24-1:latest'
+ label 'docker'
args "${DOCKER_ARGS}"
alwaysPull true
}
@@ -291,6 +295,7 @@ pipeline {
agent {
docker {
image 'couchdbdev/ubuntu-xenial-erlang-20.3.8.24-1:latest'
+ label 'docker'
args "${DOCKER_ARGS}"
alwaysPull true
}
@@ -334,6 +339,7 @@ pipeline {
agent {
docker {
image 'couchdbdev/ubuntu-bionic-erlang-20.3.8.24-1:latest'
+ label 'docker'
alwaysPull true
args "${DOCKER_ARGS}"
}
@@ -377,6 +383,7 @@ pipeline {
agent {
docker {
image 'couchdbdev/debian-stretch-erlang-20.3.8.24-1:latest'
+ label 'docker'
alwaysPull true
args "${DOCKER_ARGS}"
}
@@ -420,6 +427,7 @@ pipeline {
agent {
docker {
image 'couchdbdev/debian-buster-erlang-20.3.8.24-1:latest'
+ label 'docker'
alwaysPull true
args "${DOCKER_ARGS}"
}
@@ -543,6 +551,7 @@ pipeline {
agent {
docker {
image 'couchdbdev/debian-buster-erlang-20.3.8.24-1:latest'
+ label 'docker'
alwaysPull true
args "${DOCKER_ARGS}"
}
@@ -561,18 +570,18 @@ pipeline {
echo 'Retrieving & cleaning current couchdb-vm2 tree...'
sh '''
rsync -avz -e "ssh -o StrictHostKeyChecking=no -i $KEY" jenkins@couchdb-vm2.apache.org:/var/www/html/$BRANCH_NAME . || mkdir -p $BRANCH_NAME
- rm -rf $BRANCH_NAME/debian/* $BRANCH_NAME/el6/* $BRANCH_NAME/el7/*
- mkdir -p $BRANCH_NAME/debian $BRANCH_NAME/el6 $BRANCH_NAME/el7 $BRANCH_NAME/source
+ rm -rf $BRANCH_NAME/debian/* $BRANCH_NAME/el6/* $BRANCH_NAME/el7/* $BRANCH_NAME/el8/*
+ mkdir -p $BRANCH_NAME/debian $BRANCH_NAME/el6 $BRANCH_NAME/el7 $BRANCH_NAME/el8 $BRANCH_NAME/source
rsync -avz -e "ssh -o StrictHostKeyChecking=no -i $KEY" jenkins@couchdb-vm2.apache.org:/var/www/html/js .
'''
echo 'Building Debian repo...'
sh '''
git clone https://github.com/apache/couchdb-pkg
- cp js/debian-jessie/*.deb pkgs/jessie
- reprepro -b couchdb-pkg/repo includedeb jessie pkgs/jessie/*.deb
cp js/debian-stretch/*.deb pkgs/stretch
reprepro -b couchdb-pkg/repo includedeb stretch pkgs/stretch/*.deb
+ cp js/debian-buster/*.deb pkgs/stretch
+ reprepro -b couchdb-pkg/repo includedeb stretch pkgs/buster/*.deb
cp js/ubuntu-xenial/*.deb pkgs/xenial
reprepro -b couchdb-pkg/repo includedeb xenial pkgs/xenial/*.deb
cp js/ubuntu-bionic/*.deb pkgs/bionic
@@ -583,8 +592,10 @@ pipeline {
sh '''
cp js/centos-6/*rpm pkgs/centos6
cp js/centos-7/*rpm pkgs/centos7
+ cp js/centos-8/*rpm pkgs/centos8
cd pkgs/centos6 && createrepo --database .
cd ../centos7 && createrepo --database .
+ cd ../centos8 && createrepo --database .
'''
echo 'Building tree to upload...'
@@ -593,6 +604,7 @@ pipeline {
mv couchdb-pkg/repo/dists $BRANCH_NAME/debian
mv pkgs/centos6/* $BRANCH_NAME/el6
mv pkgs/centos7/* $BRANCH_NAME/el7
+ mv pkgs/centos8/* $BRANCH_NAME/el8
mv apache-couchdb-*.tar.gz $BRANCH_NAME/source
cd $BRANCH_NAME/source
ls -1tr | head -n -10 | xargs -d '\n' rm -f --
diff --git a/build-aux/Jenkinsfile.pr b/build-aux/Jenkinsfile.pr
index c7cb68f71..c45c04ac9 100644
--- a/build-aux/Jenkinsfile.pr
+++ b/build-aux/Jenkinsfile.pr
@@ -67,6 +67,7 @@ pipeline {
agent {
docker {
image "${DOCKER_IMAGE}"
+ label 'docker'
args "${DOCKER_ARGS}"
alwaysPull true
}
@@ -114,6 +115,7 @@ pipeline {
agent {
docker {
image "${DOCKER_IMAGE}"
+ label 'docker'
args "${DOCKER_ARGS}"
alwaysPull true
}
diff --git a/rel/files/couchdb.cmd.in b/rel/files/couchdb.cmd.in
index 4abb6cdf5..4e63e3659 100644
--- a/rel/files/couchdb.cmd.in
+++ b/rel/files/couchdb.cmd.in
@@ -27,8 +27,7 @@ set PATH=%PATH%;%COUCHDB_BIN_DIR%
IF NOT DEFINED COUCHDB_QUERY_SERVER_JAVASCRIPT SET COUCHDB_QUERY_SERVER_JAVASCRIPT="{{prefix}}/bin/couchjs {{prefix}}/share/server/main.js"
IF NOT DEFINED COUCHDB_QUERY_SERVER_COFFEESCRIPT SET COUCHDB_QUERY_SERVER_COFFEESCRIPT="{{prefix}}/bin/couchjs {{prefix}}/share/server/main-coffee.js"
-
-REM set COUCHDB_FAUXTON_DOCROOT="{{fauxton_root}}"
+set COUCHDB_FAUXTON_DOCROOT="{{fauxton_root}}"
"%BINDIR%\erl" -boot "%ROOTDIR%\releases\%APP_VSN%\couchdb" ^
-args_file "%ROOTDIR%\etc\vm.args" ^
diff --git a/rel/overlay/bin/couchup b/rel/overlay/bin/couchup
deleted file mode 100755
index 52d746c2d..000000000
--- a/rel/overlay/bin/couchup
+++ /dev/null
@@ -1,613 +0,0 @@
-#!/usr/bin/env python3
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-import argparse
-import base64
-import json
-import textwrap
-import threading
-import time
-import sys
-
-try:
- from urllib.parse import quote
-except ImportError:
- from urllib import quote
-import requests
-
-try:
- import progressbar
-
- HAVE_BAR = True
-except ImportError:
- HAVE_BAR = False
-
-
-def _tojson(req):
- """Support requests v0.x as well as 1.x+"""
- if requests.__version__[0] == "0":
- return json.loads(req.content)
- return req.json()
-
-
-def _args(args):
- args = vars(args)
- if args["password"]:
- args["creds"] = (args["login"], args["password"])
- else:
- args["creds"] = None
- return args
-
-
-def _do_list(args):
- port = str(args["local_port"])
- req = requests.get("http://127.0.0.1:" + port + "/_all_dbs", auth=args["creds"])
- req.raise_for_status()
- dbs = _tojson(req)
- local_dbs = [x for x in dbs if "shards" not in x and x not in ["_dbs", "_nodes"]]
- clustered_dbs = list(
- set([x.split("/")[2].split(".")[0] for x in dbs if "shards" in x])
- )
- if not args["include_system_dbs"]:
- # list comprehension to eliminate dbs starting with underscore
- local_dbs = [x for x in local_dbs if x[0] != "_"]
- clustered_dbs = [x for x in clustered_dbs if x[0] != "_"]
- local_dbs.sort()
- clustered_dbs.sort()
- if args.get("clustered"):
- return clustered_dbs
- return local_dbs
-
-
-def _list(args):
- args = _args(args)
- ret = _do_list(args)
- print(", ".join(ret))
-
-
-def _watch_replication(
- db,
- local_port=5986,
- clustered_port=5984,
- creds=None,
- hide_progress_bar=False,
- quiet=False,
- timeout=30,
-):
- """Watches replication, optionally with a progressbar."""
- time.sleep(1)
- if not quiet:
- print("Replication started.")
- url = "http://127.0.0.1:{}/{}".format(local_port, db)
- try:
- req = requests.get(url, auth=creds)
- req.raise_for_status()
- req = _tojson(req)
- # here, local means node-local, i.e. source (1.x) database
- local_docs = req["doc_count"]
- local_size = req["data_size"]
- except requests.exceptions.HTTPError:
- raise Exception("Cannot retrieve {} doc_count!".format(db))
- if local_size == 0:
- return
- if HAVE_BAR and not hide_progress_bar and not quiet:
- widgets = [
- db,
- " ",
- progressbar.Percentage(),
- " ",
- progressbar.Bar(marker=progressbar.RotatingMarker()),
- " ",
- progressbar.ETA(),
- " ",
- progressbar.FileTransferSpeed(),
- ]
- progbar = progressbar.ProgressBar(widgets=widgets, maxval=local_size).start()
- count = 0
- stall_count = 0
- url = "http://127.0.0.1:{}/{}".format(clustered_port, db)
- while count < local_docs:
- try:
- req = requests.get(url, auth=creds)
- req.raise_for_status()
- req = _tojson(req)
- # here, cluster means clustered port, i.e. port 5984
- clus_count = req["doc_count"]
- clus_size = req["data_size"]
- except requests.exceptions.HTTPError as exc:
- if exc.response.status_code == 404:
- clus_count = 0
- clus_size = 0
- else:
- raise Exception("Cannot retrieve {} doc_count!".format(db))
- if count == clus_count:
- stall_count += 1
- else:
- stall_count = 0
- if stall_count == timeout:
- if not quiet:
- print("Replication is stalled. Increase timeout or reduce load.")
- exit(1)
- if HAVE_BAR and not hide_progress_bar and not quiet:
- if clus_size > local_size:
- clus_size = local_size
- progbar.update(clus_size)
- count = clus_count
- time.sleep(1)
- if HAVE_BAR and not hide_progress_bar and not quiet:
- progbar.finish()
- return 0
-
-
-def _put_filter(args, db=None):
- """Adds _design/repl_filters tombstone replication filter to DB."""
- ddoc = {
- "_id": "_design/repl_filters",
- "filters": {"no_deleted": "function(doc,req){return !doc._deleted;};"},
- }
- try:
- req = requests.get(
- "http://127.0.0.1:{}/{}/_design/repl_filters".format(
- args["local_port"], db
- ),
- auth=args["creds"],
- )
- req.raise_for_status()
- doc = _tojson(req)
- del doc["_rev"]
- if doc != ddoc:
- if not args["quiet"]:
- print("Source replication filter does not match! Aborting.")
- exit(1)
- except requests.exceptions.HTTPError as exc:
- if exc.response.status_code == 404:
- if not args["quiet"]:
- print("Adding replication filter to source database...")
- req = requests.put(
- "http://127.0.0.1:{}/{}/_design/repl_filters".format(
- args["local_port"], db
- ),
- data=json.dumps(ddoc),
- auth=args["creds"],
- )
- req.raise_for_status()
- elif not args["quiet"]:
- print(exc.response.text)
- exit(1)
-
-
-def _do_security(args, db=None):
- """Copies the _security object from source to target DB."""
- try:
- req = requests.get(
- "http://127.0.0.1:{}/{}/_security".format(args["local_port"], db),
- auth=args["creds"],
- )
- req.raise_for_status()
- security_doc = _tojson(req)
- req = requests.put(
- "http://127.0.0.1:{}/{}/_security".format(args["clustered_port"], db),
- data=json.dumps(security_doc),
- auth=args["creds"],
- )
- req.raise_for_status()
- except requests.exceptions.HTTPError as exc:
- print(exc.response.text)
- exit(1)
-
-
-def _replicate(args):
- args = _args(args)
- if args["all_dbs"]:
- dbs = _do_list(args)
- else:
- dbs = args["dbs"]
-
- for db in dbs:
- if args["filter_deleted"]:
- _put_filter(args, db)
-
- if not args["quiet"]:
- print("Starting replication for " + db + "...")
- db = quote(db, safe="")
- doc = {
- "continuous": False,
- "create_target": True,
- "source": {"url": "http://127.0.0.1:{}/{}".format(args["local_port"], db)},
- "target": {
- "url": "http://127.0.0.1:{}/{}".format(args["clustered_port"], db)
- },
- }
- if args["filter_deleted"]:
- doc["filter"] = "repl_filters/no_deleted"
- if args["creds"]:
- auth = (
- "Basic " + base64.b64encode(":".join(args["creds"]).encode()).decode()
- )
- headers = {"authorization": auth}
- doc["source"]["headers"] = headers
- doc["target"]["headers"] = headers
- watch_args = {
- y: args[y]
- for y in [
- "local_port",
- "clustered_port",
- "creds",
- "hide_progress_bar",
- "timeout",
- "quiet",
- ]
- }
- watch_args["db"] = db
- watch = threading.Thread(target=_watch_replication, kwargs=watch_args)
- watch.start()
- try:
- req = requests.post(
- "http://127.0.0.1:{}/_replicate".format(args["clustered_port"]),
- auth=args["creds"],
- data=json.dumps(doc),
- headers={"Content-type": "application/json"},
- )
- req.raise_for_status()
- req = _tojson(req)
- except requests.exceptions.HTTPError as exc:
- if not args["quiet"]:
- print(exc.response.text)
- exit(1)
- watch.join()
- if req.get("no_changes"):
- if not args["quiet"]:
- print("No changes, replication is caught up.")
-
- if not args["quiet"]:
- print("Copying _security object for " + db + "...")
- _do_security(args, db)
-
- if not args["quiet"]:
- print("Replication complete.")
-
-
-def _rebuild(args):
- args = _args(args)
- if args["all_dbs"]:
- if args["views"]:
- if not args["quiet"]:
- print("Cannot take list of views for more than 1 database.")
- exit(1)
- args["clustered"] = True
- dbs = _do_list(args)
- else:
- dbs = [args["db"]]
- for db in dbs:
- if args["views"]:
- views = args["views"]
- else:
- try:
- req = requests.get(
- "http://127.0.0.1:{}/{}/_all_docs".format(
- args["clustered_port"], db
- ),
- params={"start_key": '"_design/"', "end_key": '"_design0"'},
- auth=args["creds"],
- )
- req.raise_for_status()
- req = _tojson(req)
- except requests.exceptions.HTTPError as exc:
- if not args["quiet"]:
- print(exc.response.text)
- exit(1)
- req = req["rows"]
- ddocs = [x["id"].split("/")[1] for x in req]
- for ddoc in ddocs:
- try:
- req = requests.get(
- "http://127.0.0.1:{}/{}/_design/{}".format(
- args["clustered_port"], db, ddoc
- ),
- auth=args["creds"],
- )
- req.raise_for_status()
- doc = _tojson(req)
- except requests.exceptions.HTTPError as exc:
- if not args["quiet"]:
- print(exc.response.text)
- exit(1)
- if "views" not in doc:
- if not args["quiet"]:
- print("Skipping {}/{}, no views found".format(db, ddoc))
- continue
- # only need to refresh a single view per ddoc
- if not args["quiet"]:
- print("Refreshing views in {}/{}...".format(db, ddoc))
- view = list(doc["views"].keys())[0]
- try:
- req = requests.get(
- "http://127.0.0.1:{}/{}/_design/{}/_view/{}".format(
- args["clustered_port"], db, ddoc, view
- ),
- params={"limit": 1},
- auth=args["creds"],
- timeout=float(args["timeout"]),
- )
- except requests.exceptions.Timeout:
- if not args["quiet"]:
- print("Timeout, view is processing. Moving on.")
- except requests.exceptions.HTTPError as exc:
- if not args["quiet"]:
- print(exc.response.text)
- exit(1)
-
-
-def _delete(args):
- args = _args(args)
- if args["all_dbs"]:
- args["include_system_dbs"] = False
- dbs = _do_list(args)
- else:
- dbs = args["dbs"]
- for db in dbs:
- db = quote(db, safe="")
- local_url = "http://127.0.0.1:{}/{}".format(args["local_port"], db)
- clus_url = "http://127.0.0.1:{}/{}".format(args["clustered_port"], db)
- try:
- req = requests.get(local_url, auth=args["creds"])
- req.raise_for_status()
- req = _tojson(req)
- local_docs = req["doc_count"]
- req = requests.get(clus_url, auth=args["creds"])
- req.raise_for_status()
- req = _tojson(req)
- clus_docs = req["doc_count"]
- if clus_docs < local_docs and not args["force"]:
- if not args["quiet"]:
- print(
- "Clustered DB has less docs than local version!"
- + " Skipping..."
- )
- continue
- if not args["quiet"]:
- print("Deleting " + db + "...")
- req = requests.delete(
- "http://127.0.0.1:{}/{}".format(args["local_port"], db),
- auth=args["creds"],
- )
- req.raise_for_status()
- except requests.exceptions.HTTPError as exc:
- if not args["quiet"]:
- print(exc.response.text)
- exit(1)
-
-
-def main(argv):
- """Kindly do the needful."""
- parser = argparse.ArgumentParser(
- prog="couchup",
- formatter_class=argparse.RawDescriptionHelpFormatter,
- description=textwrap.dedent(
- """\
- Migrate CouchDB 1.x databases to CouchDB 2.x.
-
- Specify a subcommand and -h or --help for more help.
- """
- ),
- )
-
- subparsers = parser.add_subparsers()
-
- parser_list = subparsers.add_parser(
- "list",
- help="lists all CouchDB 1.x databases",
- formatter_class=argparse.RawTextHelpFormatter,
- description=textwrap.dedent(
- """\
- Examples:
- couchup list
- couchup list -c -i -p mysecretpassword
- """
- ),
- )
- parser_list.add_argument(
- "-c",
- "--clustered",
- action="store_true",
- help="show clustered (2.x) databases instead",
- )
- parser_list.add_argument(
- "-i",
- "--include-system-dbs",
- action="store_true",
- help="include system databases (_users, _replicator, etc.)",
- )
- parser_list.add_argument(
- "-l", "--login", default="admin", help="specify login (default admin)"
- )
- parser_list.add_argument("-p", "--password", help="specify password")
- parser_list.add_argument(
- "--local-port", default=5986, help="override local port (default 5986)"
- )
- parser_list.add_argument(
- "--clustered-port", default=5984, help="override clustered port (default 5984)"
- )
- parser_list.set_defaults(func=_list)
-
- parser_replicate = subparsers.add_parser(
- "replicate",
- help="replicates one or more 1.x databases to CouchDB 2.x",
- formatter_class=argparse.RawTextHelpFormatter,
- description=textwrap.dedent(
- """\
- Examples:
- couchup replicate movies
- couchup replicate -f lots_of_deleted_docs_db
- couchup replicate -i -q -n _users
-
- Note:
- The -f/--filter-deleted option adds a replication filter
- to the source database, _design/repl_filters, that
- is used during replication to filter out deleted
- documents. This can greatly reduce the size of your
- 2.x database if there are many deleted documents.
-
- It is IMPORTANT that no documents be deleted from the 1.x
- database during this process, or those deletions may not
- successfully replicate to the 2.x database.
- """
- ),
- )
- parser_replicate.add_argument(
- "-a", "--all-dbs", action="store_true", help="act on all databases available"
- )
- parser_replicate.add_argument(
- "-i",
- "--include-system-dbs",
- action="store_true",
- help="include system databases (_users, _replicator, etc.)",
- )
- parser_replicate.add_argument(
- "-q", "--quiet", action="store_true", help="suppress all output"
- )
- parser_replicate.add_argument(
- "-n",
- "--hide-progress-bar",
- action="store_true",
- help="suppress progress bar display",
- )
- parser_replicate.add_argument(
- "-f",
- "--filter-deleted",
- action="store_true",
- help="filter deleted document tombstones during replication",
- )
- parser_replicate.add_argument(
- "-t",
- "--timeout",
- default=30,
- help="stalled replication timeout threshhold in s (def: 30)",
- )
- parser_replicate.add_argument(
- "-l", "--login", default="admin", help="specify login (default admin)"
- )
- parser_replicate.add_argument("-p", "--password", help="specify password")
- parser_replicate.add_argument(
- "--local-port", default=5986, help="override local port (default 5986)"
- )
- parser_replicate.add_argument(
- "--clustered-port", default=5984, help="override clustered port (default 5984)"
- )
- parser_replicate.add_argument(
- "dbs", metavar="db", type=str, nargs="*", help="database(s) to be processed"
- )
- parser_replicate.set_defaults(func=_replicate)
-
- parser_rebuild = subparsers.add_parser(
- "rebuild",
- help="rebuilds one or more CouchDB 2.x views",
- formatter_class=argparse.RawTextHelpFormatter,
- description=textwrap.dedent(
- """\
- Examples:
- couchup rebuild movies
- couchup rebuild movies by_name
- couchup rebuild -a -q -p mysecretpassword
- """
- ),
- )
- parser_rebuild.add_argument(
- "-a", "--all-dbs", action="store_true", help="act on all databases available"
- )
- parser_rebuild.add_argument(
- "-q", "--quiet", action="store_true", help="suppress all output"
- )
- parser_rebuild.add_argument(
- "-t",
- "--timeout",
- default=5,
- help="timeout for waiting for view rebuild in s (default: 5)",
- )
- parser_rebuild.add_argument(
- "-i",
- "--include-system-dbs",
- action="store_true",
- help="include system databases (_users, _replicator, etc.)",
- )
- parser_rebuild.add_argument(
- "-l", "--login", default="admin", help="specify login (default admin)"
- )
- parser_rebuild.add_argument("-p", "--password", help="specify password")
- parser_rebuild.add_argument(
- "--local-port", default=5986, help="override local port (default 5986)"
- )
- parser_rebuild.add_argument(
- "--clustered-port", default=5984, help="override clustered port (default 5984)"
- )
- parser_rebuild.add_argument(
- "db", metavar="db", type=str, nargs="?", help="database to be processed"
- )
- parser_rebuild.add_argument(
- "views",
- metavar="view",
- type=str,
- nargs="*",
- help="view(s) to be processed (all by default)",
- )
- parser_rebuild.set_defaults(func=_rebuild)
-
- parser_delete = subparsers.add_parser(
- "delete",
- help="deletes one or more CouchDB 1.x databases",
- formatter_class=argparse.RawTextHelpFormatter,
- description=textwrap.dedent(
- """\
- Examples:
- couchup delete movies
- couchup delete -q -p mysecretpassword movies
- """
- ),
- )
- parser_delete.add_argument(
- "-a", "--all-dbs", action="store_true", help="act on all databases available"
- )
- parser_delete.add_argument(
- "-f",
- "--force",
- action="store_true",
- help="force deletion even if 1.x and 2.x databases are not identical",
- )
- parser_delete.add_argument(
- "-q", "--quiet", action="store_true", help="suppress all output"
- )
- parser_delete.add_argument(
- "-l", "--login", default="admin", help="specify login (default admin)"
- )
- parser_delete.add_argument("-p", "--password", help="specify password")
- parser_delete.add_argument(
- "--local-port", default=5986, help="override local port (default 5986)"
- )
- parser_delete.add_argument(
- "--clustered-port", default=5984, help="override clustered port (default 5984)"
- )
- parser_delete.add_argument(
- "dbs", metavar="db", type=str, nargs="*", help="database(s) to be processed"
- )
- parser_delete.set_defaults(func=_delete)
-
- args = parser.parse_args(argv[1:])
- try:
- args.func(args)
- except AttributeError:
- parser.print_help()
- sys.exit(0)
-
-
-if __name__ == "__main__":
- main(sys.argv)
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index f5f057859..5fc8e0761 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -118,6 +118,7 @@ backlog = 512
socket_options = [{sndbuf, 262144}, {nodelay, true}]
server_options = [{recbuf, undefined}]
require_valid_user = false
+; require_valid_user_except_for_up = false
; List of headers that will be kept when the header Prefer: return=minimal is included in a request.
; If Server header is left out, Mochiweb will add its own one in.
prefer_minimal = Cache-Control, Content-Length, Content-Range, Content-Type, ETag, Server, Transfer-Encoding, Vary
@@ -214,6 +215,7 @@ port = 6984
; [rexi]
; buffer_count = 2000
; server_per_node = true
+; stream_limit = 5
; [global_changes]
; max_event_delay = 25
diff --git a/src/couch/src/couch_httpd_auth.erl b/src/couch/src/couch_httpd_auth.erl
index b5195349b..515ce6132 100644
--- a/src/couch/src/couch_httpd_auth.erl
+++ b/src/couch/src/couch_httpd_auth.erl
@@ -88,6 +88,11 @@ basic_name_pw(Req) ->
default_authentication_handler(Req) ->
default_authentication_handler(Req, couch_auth_cache).
+default_authentication_handler(#httpd{path_parts=[<<"_up">>]}=Req, AuthModule) ->
+ case config:get_boolean("chttpd", "require_valid_user_except_for_up", false) of
+ true -> Req#httpd{user_ctx=?ADMIN_USER};
+ _False -> default_authentication_handler(Req, AuthModule)
+ end;
default_authentication_handler(Req, AuthModule) ->
case basic_name_pw(Req) of
{User, Pass} ->
diff --git a/src/couch/src/couch_sup.erl b/src/couch/src/couch_sup.erl
index ac117ea43..c4a2e6303 100644
--- a/src/couch/src/couch_sup.erl
+++ b/src/couch/src/couch_sup.erl
@@ -99,7 +99,7 @@ assert_admins() ->
++ "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%~n", []),
% Wait a second so the log message can make it to the log
timer:sleep(500),
- throw(admin_account_required);
+ erlang:halt(1);
_ -> ok
end.
diff --git a/src/couch_replicator/src/couch_replicator_worker.erl b/src/couch_replicator/src/couch_replicator_worker.erl
index 986c32c0a..23a4ea107 100644
--- a/src/couch_replicator/src/couch_replicator_worker.erl
+++ b/src/couch_replicator/src/couch_replicator_worker.erl
@@ -28,8 +28,6 @@
% TODO: maybe make both buffer max sizes configurable
-define(DOC_BUFFER_BYTE_SIZE, 512 * 1024). % for remote targets
--define(MAX_BULK_ATT_SIZE, 64 * 1024).
--define(MAX_BULK_ATTS_PER_DOC, 8).
-define(STATS_DELAY, 10000000). % 10 seconds (in microseconds)
-define(MISSING_DOC_RETRY_MSEC, 2000).
@@ -334,40 +332,18 @@ maybe_flush_docs(Doc,State) ->
maybe_flush_docs(#httpdb{} = Target, Batch, Doc) ->
#batch{docs = DocAcc, size = SizeAcc} = Batch,
- case batch_doc(Doc) of
- false ->
- couch_log:debug("Worker flushing doc with attachments", []),
- case flush_doc(Target, Doc) of
- ok ->
- {Batch, couch_replicator_stats:new([{docs_written, 1}])};
- _ ->
- {Batch, couch_replicator_stats:new([{doc_write_failures, 1}])}
- end;
- true ->
- JsonDoc = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, [revs, attachments])),
- case SizeAcc + iolist_size(JsonDoc) of
- SizeAcc2 when SizeAcc2 > ?DOC_BUFFER_BYTE_SIZE ->
- couch_log:debug("Worker flushing doc batch of size ~p bytes", [SizeAcc2]),
- Stats = flush_docs(Target, [JsonDoc | DocAcc]),
- {#batch{}, Stats};
- SizeAcc2 ->
- Stats = couch_replicator_stats:new(),
- {#batch{docs = [JsonDoc | DocAcc], size = SizeAcc2}, Stats}
- end
+ JsonDoc = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, [revs, attachments])),
+ case SizeAcc + iolist_size(JsonDoc) of
+ SizeAcc2 when SizeAcc2 > ?DOC_BUFFER_BYTE_SIZE ->
+ couch_log:debug("Worker flushing doc batch of size ~p bytes", [SizeAcc2]),
+ Stats = flush_docs(Target, [JsonDoc | DocAcc]),
+ {#batch{}, Stats};
+ SizeAcc2 ->
+ Stats = couch_replicator_stats:new(),
+ {#batch{docs = [JsonDoc | DocAcc], size = SizeAcc2}, Stats}
end.
-batch_doc(#doc{atts = []}) ->
- true;
-batch_doc(#doc{atts = Atts}) ->
- (length(Atts) =< ?MAX_BULK_ATTS_PER_DOC) andalso
- lists:all(
- fun(Att) ->
- [L, Data] = couch_att:fetch([disk_len, data], Att),
- (L =< ?MAX_BULK_ATT_SIZE) andalso (Data =/= stub)
- end, Atts).
-
-
flush_docs(_Target, []) ->
couch_replicator_stats:new();
flush_docs(Target, DocList) ->
diff --git a/src/rexi/src/rexi.erl b/src/rexi/src/rexi.erl
index ead5bee0c..170503b7c 100644
--- a/src/rexi/src/rexi.erl
+++ b/src/rexi/src/rexi.erl
@@ -211,9 +211,10 @@ stream(Msg, Limit, Timeout) ->
exit(timeout)
end.
-%% @equiv stream2(Msg, 10, 300000)
+%% @equiv stream2(Msg, 5, 300000)
stream2(Msg) ->
- stream2(Msg, 10, 300000).
+ Limit = config:get_integer("rexi", "stream_limit", 5),
+ stream2(Msg, Limit).
%% @equiv stream2(Msg, Limit, 300000)
stream2(Msg, Limit) ->
diff --git a/test/elixir/test/cookie_auth_test.exs b/test/elixir/test/cookie_auth_test.exs
index ac1110be2..b10ee84f1 100644
--- a/test/elixir/test/cookie_auth_test.exs
+++ b/test/elixir/test/cookie_auth_test.exs
@@ -95,12 +95,9 @@ defmodule CookieAuthTest do
session = use_session || login_as(user)
resp =
- Couch.get(
- "/#{db_name}/#{URI.encode(doc_id)}",
- headers: [
- Cookie: session.cookie,
- "X-CouchDB-www-Authenticate": "Cookie"
- ]
+ Couch.Session.get(
+ session,
+ "/#{db_name}/#{URI.encode(doc_id)}"
)
if use_session == nil do
@@ -125,12 +122,9 @@ defmodule CookieAuthTest do
session = use_session || login_as(user)
resp =
- Couch.put(
+ Couch.Session.put(
+ session,
"/#{db_name}/#{URI.encode(doc["_id"])}",
- headers: [
- Cookie: session.cookie,
- "X-CouchDB-www-Authenticate": "Cookie"
- ],
body: doc
)
@@ -160,12 +154,9 @@ defmodule CookieAuthTest do
session = use_session || login_as(user)
resp =
- Couch.delete(
- "/#{db_name}/#{URI.encode(doc["_id"])}",
- headers: [
- Cookie: session.cookie,
- "X-CouchDB-www-Authenticate": "Cookie"
- ]
+ Couch.Session.delete(
+ session,
+ "/#{db_name}/#{URI.encode(doc["_id"])}"
)
if use_session == nil do