From 5d92986ad177e902368c3e97261f09c9867ea784 Mon Sep 17 00:00:00 2001 From: sts Date: Wed, 8 Nov 2017 09:11:00 +0200 Subject: 404 status for non using uri in _scheduler (#953) --- src/couch_replicator/src/couch_replicator_httpd.erl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/couch_replicator/src/couch_replicator_httpd.erl b/src/couch_replicator/src/couch_replicator_httpd.erl index 0f78ce1d5..ebab1a614 100644 --- a/src/couch_replicator/src/couch_replicator_httpd.erl +++ b/src/couch_replicator/src/couch_replicator_httpd.erl @@ -82,6 +82,8 @@ handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"docs">>|Unquoted]} {error, invalid} -> throw(bad_request) end; +handle_scheduler_req(#httpd{method='GET'} = Req) -> + send_json(Req, 404, {[{error, <<"not found">>}]}); handle_scheduler_req(Req) -> send_method_not_allowed(Req, "GET,HEAD"). -- cgit v1.2.1 From 687ff881d60eccc8308f0467cc7e68ee5342a033 Mon Sep 17 00:00:00 2001 From: Joan Touzet Date: Sat, 11 Nov 2017 23:06:27 -0500 Subject: Revert to ubuntu Jenkins label, uids now standardised --- Jenkinsfile | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index fed976afc..221e96f45 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -12,6 +12,7 @@ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +// jenkins user == uid 910 for reference pipeline { // no top-level agent; agents must be declared for each stage agent none @@ -33,7 +34,7 @@ pipeline { // each time. Instead, manually insert docker pull then run with the // the docker image. node { - label 'couchdbtest' + label 'ubuntu' } } steps { @@ -78,7 +79,7 @@ pipeline { stage('Test') { steps { parallel(centos6erlang183: { - node(label: 'couchdbtest') { + node(label: 'ubuntu') { timeout(time: 60, unit: "MINUTES") { sh 'docker pull couchdbdev/centos-6-erlang-18.3' withDockerContainer(image: 'couchdbdev/centos-6-erlang-18.3', args: '-e LD_LIBRARY_PATH=/usr/local/bin') { @@ -118,7 +119,7 @@ pipeline { } // node }, centos7erlangdefault: { - node(label: 'couchdbtest') { + node(label: 'ubuntu') { timeout(time: 45, unit: "MINUTES") { sh 'docker pull couchdbdev/centos-7-erlang-default' withDockerContainer(image: 'couchdbdev/centos-7-erlang-default', args: '-e LD_LIBRARY_PATH=/usr/local/bin') { @@ -143,7 +144,7 @@ pipeline { } // node }, centos7erlang183: { - node(label: 'couchdbtest') { + node(label: 'ubuntu') { timeout(time: 60, unit: "MINUTES") { sh 'docker pull couchdbdev/centos-7-erlang-18.3' withDockerContainer(image: 'couchdbdev/centos-7-erlang-18.3', args: '-e LD_LIBRARY_PATH=/usr/local/bin') { @@ -183,7 +184,7 @@ pipeline { } // node }, ubuntu1404erlangdefault: { - node(label: 'couchdbtest') { + node(label: 'ubuntu') { timeout(time: 45, unit: "MINUTES") { sh 'docker pull couchdbdev/ubuntu-14.04-erlang-default' withDockerContainer(image: 'couchdbdev/ubuntu-14.04-erlang-default') { @@ -206,7 +207,7 @@ pipeline { } // node }, ubuntu1404erlang183: { - node(label: 'couchdbtest') { + node(label: 'ubuntu') { timeout(time: 60, unit: "MINUTES") { sh 'docker pull couchdbdev/ubuntu-14.04-erlang-18.3' withDockerContainer(image: 'couchdbdev/ubuntu-14.04-erlang-18.3') { @@ -246,7 +247,7 @@ pipeline { } // node }, ubuntu1604erlangdefault: { - node(label: 'couchdbtest') { + node(label: 'ubuntu') { timeout(time: 45, unit: "MINUTES") { sh 'docker pull couchdbdev/ubuntu-16.04-erlang-default' withDockerContainer(image: 'couchdbdev/ubuntu-16.04-erlang-default') { @@ -269,7 +270,7 @@ pipeline { } // node }, ubuntu1604erlang183: { - node(label: 'couchdbtest') { + node(label: 'ubuntu') { timeout(time: 60, unit: "MINUTES") { sh 'docker pull couchdbdev/ubuntu-16.04-erlang-18.3' withDockerContainer(image: 'couchdbdev/ubuntu-16.04-erlang-18.3') { @@ -309,7 +310,7 @@ pipeline { } // node }, debian8erlangdefault: { - node(label: 'couchdbtest') { + node(label: 'ubuntu') { timeout(time: 45, unit: "MINUTES") { sh 'docker pull couchdbdev/debian-8-erlang-default' withDockerContainer(image: 'couchdbdev/debian-8-erlang-default') { @@ -332,7 +333,7 @@ pipeline { } // node }, debian8erlang183: { - node(label: 'couchdbtest') { + node(label: 'ubuntu') { timeout(time: 60, unit: "MINUTES") { sh 'docker pull couchdbdev/debian-8-erlang-18.3' withDockerContainer(image: 'couchdbdev/debian-8-erlang-18.3') { @@ -372,7 +373,7 @@ pipeline { } // node }, debian9erlangdefault: { - node(label: 'couchdbtest') { + node(label: 'ubuntu') { timeout(time: 45, unit: "MINUTES") { sh 'docker pull couchdbdev/debian-9-erlang-default' withDockerContainer(image: 'couchdbdev/debian-9-erlang-default') { @@ -395,7 +396,7 @@ pipeline { } // node }, debian9erlang183: { - node(label: 'couchdbtest') { + node(label: 'ubuntu') { timeout(time: 60, unit: "MINUTES") { sh 'docker pull couchdbdev/debian-9-erlang-18.3' withDockerContainer(image: 'couchdbdev/debian-9-erlang-18.3') { @@ -447,7 +448,7 @@ pipeline { // each time. Instead, manually insert docker pull then run with the // the docker image. node { - label 'couchdbtest' + label 'ubuntu' } } steps { -- cgit v1.2.1 From 563b9049ab0cc989ece4a26f931933db926027f0 Mon Sep 17 00:00:00 2001 From: Will Holley Date: Tue, 14 Nov 2017 13:20:15 +0000 Subject: Fix Mango text index tests (#971) The text index tests are not routinely run by the Couch CI (due to an external dependency that isn't shipped with Couch). This fixes a number of tests that were broken as a result of recent feature changes. --- src/mango/test/05-index-selection-test.py | 8 ++---- src/mango/test/06-basic-text-test.py | 33 +++++++++++----------- .../test/10-disable-array-length-field-test.py | 2 +- src/mango/test/16-index-selectors-test.py | 2 +- 4 files changed, 22 insertions(+), 23 deletions(-) diff --git a/src/mango/test/05-index-selection-test.py b/src/mango/test/05-index-selection-test.py index 05571a7e8..49946171e 100644 --- a/src/mango/test/05-index-selection-test.py +++ b/src/mango/test/05-index-selection-test.py @@ -181,15 +181,13 @@ class JSONIndexSelectionTests(mango.UserDocsTests, IndexSelectionTests): @unittest.skipUnless(mango.has_text_service(), "requires text service") -class TextIndexSelectionTests(mango.UserDocsTests, IndexSelectionTests): +class TextIndexSelectionTests(mango.UserDocsTests): @classmethod def setUpClass(klass): super(TextIndexSelectionTests, klass).setUpClass() - - def setUp(self): - self.db.recreate() - user_docs.add_text_indexes(self.db, {}) + if mango.has_text_service(): + user_docs.add_text_indexes(klass.db, {}) def test_with_text(self): resp = self.db.find({ diff --git a/src/mango/test/06-basic-text-test.py b/src/mango/test/06-basic-text-test.py index c02950c46..3783006ab 100644 --- a/src/mango/test/06-basic-text-test.py +++ b/src/mango/test/06-basic-text-test.py @@ -450,14 +450,14 @@ class ElemMatchTests(mango.FriendDocsTextTests): } } docs = self.db.find(q) - assert len(docs) == 1 - assert docs[0]["bestfriends"] == ["Wolverine", "Cyclops"] + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["bestfriends"], ["Wolverine", "Cyclops"]) q = {"results": {"$elemMatch": {"$gte": 80, "$lt": 85}}} docs = self.db.find(q) - assert len(docs) == 1 - assert docs[0]["results"] == [82, 85, 88] + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["results"], [82, 85, 88]) def test_elem_match(self): q = {"friends": { @@ -466,9 +466,9 @@ class ElemMatchTests(mango.FriendDocsTextTests): } } docs = self.db.find(q) - assert len(docs) == 2 + self.assertEqual(len(docs), 2) for d in docs: - assert d["user_id"] in (0, 1) + self.assertIn(d["user_id"], (0, 1)) q = { "friends": { @@ -479,8 +479,8 @@ class ElemMatchTests(mango.FriendDocsTextTests): } } docs = self.db.find(q) - assert len(docs) == 1 - assert docs[0]["user_id"] == 4 + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["user_id"], 4) # Check that we can do logic in elemMatch @@ -490,8 +490,9 @@ class ElemMatchTests(mango.FriendDocsTextTests): }} } docs = self.db.find(q) - assert len(docs) == 1 - assert docs[0]["user_id"] == 1 + self.assertEqual(len(docs), 2) + for d in docs: + self.assertIn(d["user_id"], (1, 15)) q = { "friends": { @@ -505,9 +506,9 @@ class ElemMatchTests(mango.FriendDocsTextTests): } } docs = self.db.find(q) - assert len(docs) == 2 + self.assertEqual(len(docs), 3) for d in docs: - assert d["user_id"] in (1, 4) + self.assertIn(d["user_id"], (1, 4, 15)) # Same as last, but using $in q = { @@ -519,9 +520,9 @@ class ElemMatchTests(mango.FriendDocsTextTests): } } docs = self.db.find(q) - assert len(docs) == 2 + self.assertEqual(len(docs), 3) for d in docs: - assert d["user_id"] in (1, 4) + self.assertIn(d["user_id"], (1, 4, 15)) q = { "$and": [{ @@ -564,9 +565,9 @@ class ElemMatchTests(mango.FriendDocsTextTests): ] } docs = self.db.find(q) - assert len(docs) == 3 + self.assertEqual(len(docs), 3) for d in docs: - assert d["user_id"] in (10, 11,12) + self.assertIn(d["user_id"], (10, 11,12)) @unittest.skipUnless(mango.has_text_service(), "requires text service") class AllMatchTests(mango.FriendDocsTextTests): diff --git a/src/mango/test/10-disable-array-length-field-test.py b/src/mango/test/10-disable-array-length-field-test.py index ce7713b63..6b6d41926 100644 --- a/src/mango/test/10-disable-array-length-field-test.py +++ b/src/mango/test/10-disable-array-length-field-test.py @@ -16,7 +16,7 @@ import unittest @unittest.skipUnless(mango.has_text_service(), "requires text service") class DisableIndexArrayLengthsTest(mango.UserDocsTextTests): - def setUp(klass): + def setUp(self): self.db.recreate() self.db.create_text_index(ddoc="disable_index_array_lengths", analyzer="keyword", diff --git a/src/mango/test/16-index-selectors-test.py b/src/mango/test/16-index-selectors-test.py index 6d771cc4b..389f5f41e 100644 --- a/src/mango/test/16-index-selectors-test.py +++ b/src/mango/test/16-index-selectors-test.py @@ -273,6 +273,6 @@ class IndexSelectorJson(mango.DbPerClass): @unittest.skipUnless(mango.has_text_service(), "requires text service") def test_text_partial_filter_only_in_return_if_not_default(self): - self.db.create_text_index(fields=[{"name":"location"}]) + self.db.create_text_index(fields=[{"name":"location", "type":"string"}]) index = self.db.list_indexes()[1] self.assertEqual('partial_filter_selector' in index['def'], False) -- cgit v1.2.1 From 16370843adbf5e9df1e6496398db4b9bac5ef8f3 Mon Sep 17 00:00:00 2001 From: Tony Sun Date: Tue, 14 Nov 2017 10:35:42 -0800 Subject: fix exists false when field is explicitly defined (#863) When users explicitly defined a text index's fields, and used $exists with false, we tag false when constructing the query. This led to a function clause for indexable_fields since we did not account for it. This fix patches that up, but note that we don't care about the false value itself since we only care about fields. --- src/mango/src/mango_idx_text.erl | 3 ++ src/mango/test/07-text-custom-field-list-test.py | 50 ++++++++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/src/mango/src/mango_idx_text.erl b/src/mango/src/mango_idx_text.erl index 1d5ae9bad..369e2cd08 100644 --- a/src/mango/src/mango_idx_text.erl +++ b/src/mango/src/mango_idx_text.erl @@ -329,6 +329,9 @@ indexable_fields(Fields, {op_or, Args}) when is_list(Args) -> indexable_fields(Fields, {op_not, {ExistsQuery, Arg}}) when is_tuple(Arg) -> Fields0 = indexable_fields(Fields, ExistsQuery), indexable_fields(Fields0, Arg); +% forces "$exists" : false to use _all_docs +indexable_fields(Fields, {op_not, {ExistsQuery, false}}) -> + []; indexable_fields(Fields, {op_insert, Arg}) when is_binary(Arg) -> Fields; diff --git a/src/mango/test/07-text-custom-field-list-test.py b/src/mango/test/07-text-custom-field-list-test.py index a43e33003..9bfe07598 100644 --- a/src/mango/test/07-text-custom-field-list-test.py +++ b/src/mango/test/07-text-custom-field-list-test.py @@ -12,6 +12,7 @@ import mango import unittest +import user_docs @unittest.skipUnless(mango.has_text_service(), "requires text service") @@ -160,3 +161,52 @@ class CustomFieldsTest(mango.UserDocsTextTests): }) assert len(docs) == 1 assert docs[0]["user_id"] == 10 + +@unittest.skipUnless(mango.has_text_service(), "requires text service") +class CustomFieldsExistsTest(mango.UserDocsTextTests): + + FIELDS = [ + {"name": "exists_field", "type": "string"}, + {"name": "exists_array.[]", "type": "string"}, + {"name": "exists_object.should", "type": "string"}, + {"name": "twitter", "type": "string"} + ] + + def test_exists_field(self): + docs = self.db.find({"exists_field": {"$exists": True}}) + self.assertEqual(len(docs), 2) + for d in docs: + self.assertIn(d["user_id"], (7, 8)) + + docs = self.db.find({"exists_field": {"$exists": False}}) + self.assertEqual(len(docs), len(user_docs.DOCS) - 2) + for d in docs: + self.assertNotIn(d["user_id"], (7, 8)) + + def test_exists_array(self): + docs = self.db.find({"exists_array": {"$exists": True}}) + self.assertEqual(len(docs), 2) + for d in docs: + self.assertIn(d["user_id"], (9, 10)) + + docs = self.db.find({"exists_array": {"$exists": False}}) + self.assertEqual(len(docs), len(user_docs.DOCS) - 2) + for d in docs: + self.assertNotIn(d["user_id"], (9, 10)) + + def test_exists_object_member(self): + docs = self.db.find({"exists_object.should": {"$exists": True}}) + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["user_id"], 11) + + docs = self.db.find({"exists_object.should": {"$exists": False}}) + self.assertEqual(len(docs), len(user_docs.DOCS) - 1) + for d in docs: + self.assertNotEqual(d["user_id"], 11) + + def test_exists_false_same_as_views(self): + docs = self.db.find({ + "twitter": {"$exists": False} + }) + for d in docs: + self.assertNotIn(d["user_id"], (0, 1, 4, 13)) -- cgit v1.2.1 From ead77b26d559415e4bdd302ffae50027112567f4 Mon Sep 17 00:00:00 2001 From: Will Holley Date: Wed, 15 Nov 2017 14:43:01 +0000 Subject: Throw error on invalid use_index value (#989) Throw an error when a user specifies a value in "use_index", of the form [, ], that cannot be used for the current query selector. Fixes #988 --- src/mango/src/mango_cursor.erl | 4 ++-- src/mango/test/05-index-selection-test.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/src/mango/src/mango_cursor.erl b/src/mango/src/mango_cursor.erl index 98b2d52bd..7997d9ada 100644 --- a/src/mango/src/mango_cursor.erl +++ b/src/mango/src/mango_cursor.erl @@ -50,11 +50,11 @@ create(Db, Selector0, Opts) -> {use_index, IndexSpecified} = proplists:lookup(use_index, Opts), case {length(UsableIndexes), length(IndexSpecified)} of - {0, 1} -> - ?MANGO_ERROR({no_usable_index, selector_unsupported}); {0, 0} -> AllDocs = mango_idx:special(Db), create_cursor(Db, AllDocs, Selector, Opts); + {0, _} -> + ?MANGO_ERROR({no_usable_index, selector_unsupported}); _ -> create_cursor(Db, UsableIndexes, Selector, Opts) end. diff --git a/src/mango/test/05-index-selection-test.py b/src/mango/test/05-index-selection-test.py index 49946171e..fe36257e3 100644 --- a/src/mango/test/05-index-selection-test.py +++ b/src/mango/test/05-index-selection-test.py @@ -91,6 +91,20 @@ class IndexSelectionTests: else: raise AssertionError("did not reject bad use_index") + def test_reject_use_index_ddoc_and_name_invalid_fields(self): + # index on ["company","manager"] which should not be valid + ddocid = "_design/a0c425a60cf3c3c09e3c537c9ef20059dcef9198" + name = "a0c425a60cf3c3c09e3c537c9ef20059dcef9198" + selector = { + "company": "Pharmex" + } + try: + self.db.find(selector, use_index=[ddocid,name]) + except Exception as e: + self.assertEqual(e.response.status_code, 400) + else: + raise AssertionError("did not reject bad use_index") + def test_reject_use_index_sort_order(self): # index on ["company","manager"] which should not be valid ddocid = "_design/a0c425a60cf3c3c09e3c537c9ef20059dcef9198" @@ -105,6 +119,21 @@ class IndexSelectionTests: else: raise AssertionError("did not reject bad use_index") + def test_reject_use_index_ddoc_and_name_sort_order(self): + # index on ["company","manager"] which should not be valid + ddocid = "_design/a0c425a60cf3c3c09e3c537c9ef20059dcef9198" + name = "a0c425a60cf3c3c09e3c537c9ef20059dcef9198" + selector = { + "company": {"$gt": None}, + "manager": {"$gt": None} + } + try: + self.db.find(selector, use_index=[ddocid,name], sort=[{"manager":"desc"}]) + except Exception as e: + self.assertEqual(e.response.status_code, 400) + else: + raise AssertionError("did not reject bad use_index") + # This doc will not be saved given the new ddoc validation code # in couch_mrview def test_manual_bad_view_idx01(self): -- cgit v1.2.1 From 8b4e92ae240d82837a0dede78fe5c1f664486f1c Mon Sep 17 00:00:00 2001 From: Will Holley Date: Thu, 16 Nov 2017 08:21:56 +0000 Subject: Improve Mango test suite performance (#995) * Remove artificial delays from database and index create/delete. * Wait for indexes to report as created/deleted during test setup. * Skip unnecessary database delete/create cycles. * Default to n=1 when creating test databases. We don't have tests that explicitly test n=3 scenarios and the tests generally run on a single-node harness. Defaulting to n=1 allows the test behaviour to be consistent when run on multi-node clusters. * Add delay on cluster setup for Mango tests to mitigate tests running before async cluster setup completes. --- src/mango/test/mango.py | 39 +++++++++++++++++++++++++++------- test/build/test-run-couch-for-mango.sh | 3 +++ 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/src/mango/test/mango.py b/src/mango/test/mango.py index 03cc67c52..560914b8c 100644 --- a/src/mango/test/mango.py +++ b/src/mango/test/mango.py @@ -81,10 +81,14 @@ class Database(object): r = self.sess.delete(self.url) def recreate(self): + r = self.sess.get(self.url) + db_info = r.json() + docs = db_info["doc_count"] + db_info["doc_del_count"] + if docs == 0: + # db never used - create unnecessary + return self.delete() - delay() self.create() - delay() def save_doc(self, doc): self.save_docs([doc]) @@ -126,11 +130,17 @@ class Database(object): body["index"]["partial_filter_selector"] = partial_filter_selector body = json.dumps(body) r = self.sess.post(self.path("_index"), data=body) - delay() r.raise_for_status() assert r.json()["id"] is not None assert r.json()["name"] is not None - return r.json()["result"] == "created" + + created = r.json()["result"] == "created" + if created: + # wait until the database reports the index as available + while len(self.get_index(r.json()["id"], r.json()["name"])) < 1: + delay(t=0.1) + + return created def create_text_index(self, analyzer=None, idx_type="text", partial_filter_selector=None, default_field=None, fields=None, @@ -157,7 +167,6 @@ class Database(object): body["ddoc"] = ddoc body = json.dumps(body) r = self.sess.post(self.path("_index"), data=body) - delay() r.raise_for_status() return r.json()["result"] == "created" @@ -169,13 +178,28 @@ class Database(object): r = self.sess.get(self.path("_index?"+limit+";"+skip)) r.raise_for_status() return r.json()["indexes"] + + def get_index(self, ddocid, name): + if ddocid is None: + return [i for i in self.list_indexes() if i["name"] == name] + + ddocid = ddocid.replace("%2F", "/") + if not ddocid.startswith("_design/"): + ddocid = "_design/" + ddocid + + if name is None: + return [i for i in self.list_indexes() if i["ddoc"] == ddocid] + else: + return [i for i in self.list_indexes() if i["ddoc"] == ddocid and i["name"] == name] def delete_index(self, ddocid, name, idx_type="json"): path = ["_index", ddocid, idx_type, name] r = self.sess.delete(self.path(path), params={"w": "3"}) - delay() r.raise_for_status() + while len(self.get_index(ddocid, name)) == 1: + delay(t=0.1) + def bulk_delete(self, docs): body = { "docids" : docs, @@ -183,7 +207,6 @@ class Database(object): } body = json.dumps(body) r = self.sess.post(self.path("_index/_bulk_delete"), data=body) - delay(n=10) return r.json() def find(self, selector, limit=25, skip=0, sort=None, fields=None, @@ -245,7 +268,7 @@ class DbPerClass(unittest.TestCase): @classmethod def setUpClass(klass): klass.db = Database(random_db_name()) - klass.db.create(q=1, n=3) + klass.db.create(q=1, n=1) def setUp(self): self.db = self.__class__.db diff --git a/test/build/test-run-couch-for-mango.sh b/test/build/test-run-couch-for-mango.sh index 0597a8fca..472b19bd0 100755 --- a/test/build/test-run-couch-for-mango.sh +++ b/test/build/test-run-couch-for-mango.sh @@ -24,6 +24,9 @@ while ( [ $COUCH_STARTED -ne 0 ] ); do fi done +# wait for cluster setup to complete +sleep 5 + cd src/mango/ nosetests -- cgit v1.2.1 From 9b717814124f7209000eed451d7322dc6a73cf6a Mon Sep 17 00:00:00 2001 From: Diana Thayer Date: Fri, 17 Nov 2017 12:23:32 -0800 Subject: (typo) fetchig -> fetching --- src/couch_replicator/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/couch_replicator/README.md b/src/couch_replicator/README.md index f08ff357e..fe975c1d0 100644 --- a/src/couch_replicator/README.md +++ b/src/couch_replicator/README.md @@ -262,7 +262,7 @@ A description of each child: `error_backoff/1` is where the backoff period is calculated. Consecutive errors are held in the `errcnt` field in the ETS table. - 2. Fetchig filter code succeeds, replication ID is calculated and job is + 2. Fetching filter code succeeds, replication ID is calculated and job is added to the scheduler. However, because this is a filtered replication the source database could get an updated filter. Which means replication ID could change again. So the worker is spawned to -- cgit v1.2.1 From 44cca52393d6a0be7e6e9d8a5d6eb74717becabc Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Fri, 17 Nov 2017 17:25:59 -0500 Subject: Move cluster_start_period and cluster_quiet_period to replicator section Somehow entries ended up under [couch_per_user] but should be in the [replicator] section. --- rel/overlay/etc/default.ini | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini index 745e5a8e4..0b0ab75ae 100644 --- a/rel/overlay/etc/default.ini +++ b/rel/overlay/etc/default.ini @@ -88,10 +88,6 @@ enable = false ; If set to true and a user is deleted, the respective database gets ; deleted as well. delete_dbs = false -; Wait this many seconds after startup before attaching changes listeners -; cluster_start_period = 5 -; Re-check cluster state at least every cluster_quiet_period seconds -; cluster_quiet_period = 60 [httpd] port = {{backend_port}} @@ -408,6 +404,10 @@ ssl_certificate_max_depth = 3 ; avoid crashing the whole replication job, which would consume more resources ; and add log noise. ;missing_doc_retry_msec = 2000 +; Wait this many seconds after startup before attaching changes listeners +; cluster_start_period = 5 +; Re-check cluster state at least every cluster_quiet_period seconds +; cluster_quiet_period = 60 [compaction_daemon] ; The delay, in seconds, between each check for which database and view indexes -- cgit v1.2.1 From 7c789228bebfab7ea07f4181541fa57e1e744208 Mon Sep 17 00:00:00 2001 From: Diana Thayer Date: Mon, 20 Nov 2017 16:55:37 -0800 Subject: Update COMMITTERS.md --- COMMITTERS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/COMMITTERS.md b/COMMITTERS.md index 25866b857..7412efcb5 100644 --- a/COMMITTERS.md +++ b/COMMITTERS.md @@ -32,7 +32,7 @@ mean this in the sense of being loyal to the project and its interests. * Klaus Trainer * Benjamin Young * Robert Kowalski - * Max Thayer + * Diana Thayer * Gianugo Rabellino * Jenn Schiffer * Lena Reinhard -- cgit v1.2.1 From bdaeaff948b90d30686104349b8f01c19f0c482b Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Mon, 20 Nov 2017 12:29:43 -0500 Subject: Fix replicator cluster stability race condition Replicator clustering module is in charge of keeping track of when the cluster is stable or unstable. A cluster is said to be "stable" if there aren't any new nodes added or removed for some period of time. Replicator document processor uses cluster state to determine what to do with document updates as they come in. If a document update comes during a time when cluster is unstable, the update is skipped. The idea is that when cluster becomes stable again, all the documents will be rescanned anyway. In order for document updates to not be dropped, there is an implicit constraint when cluster becomes stable -- after(couch_replicator_notifier gen_event broadcasts `{cluster, stable}` event, any subsequent calls to `couch_replicator_clustering:is_stable()` would return `true`. If that's not the case then this sequence of events is possible: 1. `mem3_cluster` process calls the `cluster_stable` callback 2. `couch_replicator_notifier` broadcasts `{cluster, stable}` event 3. `couch_replicator_doc_processor` starts processing documents 4. On first document update `couch_replicator_clustering:is_stable()` is `false`, because that gen_server wasn't notifier yet. 5. Document update is dropped. 6. There won't be any rescans until cluster membership is changed again. To fix this, switch to setting stable state first via a `gen_server` call. This way after the `{cluster, stable}` has been called, `is_stable()` is guaranteed to return `true`. Note: This issue is mostly theoretical. It was noticed when examining the code related to another bug. The chance of the clustering process going to sleep immediately after gen_event broadcast, then not handling the cast long enough for a document to be processed by doc processor is pretty low in practice. --- .../src/couch_replicator_clustering.erl | 73 +++++++++++++++++++--- 1 file changed, 63 insertions(+), 10 deletions(-) diff --git a/src/couch_replicator/src/couch_replicator_clustering.erl b/src/couch_replicator/src/couch_replicator_clustering.erl index 3d5229b9f..a7f7573b6 100644 --- a/src/couch_replicator/src/couch_replicator_clustering.erl +++ b/src/couch_replicator/src/couch_replicator_clustering.erl @@ -114,17 +114,17 @@ link_cluster_event_listener(Mod, Fun, Args) % Mem3 cluster callbacks cluster_unstable(Server) -> + ok = gen_server:call(Server, set_unstable), couch_replicator_notifier:notify({cluster, unstable}), couch_stats:update_gauge([couch_replicator, cluster_is_stable], 0), couch_log:notice("~s : cluster unstable", [?MODULE]), - gen_server:cast(Server, cluster_unstable), Server. cluster_stable(Server) -> + ok = gen_server:call(Server, set_stable), couch_replicator_notifier:notify({cluster, stable}), couch_stats:update_gauge([couch_replicator, cluster_is_stable], 1), couch_log:notice("~s : cluster stable", [?MODULE]), - gen_server:cast(Server, cluster_stable), Server. @@ -147,18 +147,18 @@ terminate(_Reason, _State) -> handle_call(is_stable, _From, #state{cluster_stable = IsStable} = State) -> - {reply, IsStable, State}. + {reply, IsStable, State}; +handle_call(set_stable, _From, State) -> + {reply, ok, State#state{cluster_stable = true}}; -handle_cast({set_period, Period}, #state{mem3_cluster_pid = Pid} = State) -> - ok = mem3_cluster:set_period(Pid, Period), - {noreply, State}; +handle_call(set_unstable, _From, State) -> + {reply, ok, State#state{cluster_stable = false}}. -handle_cast(cluster_stable, State) -> - {noreply, State#state{cluster_stable = true}}; -handle_cast(cluster_unstable, State) -> - {noreply, State#state{cluster_stable = false}}. +handle_cast({set_period, Period}, #state{mem3_cluster_pid = Pid} = State) -> + ok = mem3_cluster:set_period(Pid, Period), + {noreply, State}. handle_info(restart_config_listener, State) -> @@ -193,3 +193,56 @@ owner_int(ShardName, DocId) -> Shards = mem3:shards(DbName, DocId), Nodes = [N || #shard{node=N} <- Shards, lists:member(N, Live)], mem3:owner(DbName, DocId, Nodes). + + + +-ifdef(TEST). + +-include_lib("eunit/include/eunit.hrl"). + + +replicator_clustering_test_() -> + { + foreach, + fun setup/0, + fun teardown/1, + [ + t_stable_callback(), + t_unstable_callback() + ] + }. + + +t_stable_callback() -> + ?_test(begin + ?assertEqual(false, is_stable()), + cluster_stable(whereis(?MODULE)), + ?assertEqual(true, is_stable()) + end). + + +t_unstable_callback() -> + ?_test(begin + cluster_stable(whereis(?MODULE)), + ?assertEqual(true, is_stable()), + cluster_unstable(whereis(?MODULE)), + ?assertEqual(false, is_stable()) + end). + + +setup() -> + meck:expect(couch_log, notice, 2, ok), + meck:expect(config, get, fun(_, _, Default) -> Default end), + meck:expect(config, listen_for_changes, 2, ok), + meck:expect(couch_stats, update_gauge, 2, ok), + meck:expect(couch_replicator_notifier, notify, 1, ok), + {ok, Pid} = start_link(), + Pid. + + +teardown(Pid) -> + unlink(Pid), + exit(Pid, kill), + meck:unload(). + +-endif. -- cgit v1.2.1 From ede5dd9675285157410311aa8e2ed01c7f5e597e Mon Sep 17 00:00:00 2001 From: Will Holley Date: Thu, 23 Nov 2017 09:04:17 +0100 Subject: Fix index validation for nested $and (#1014) mango_selector:has_required_fields checks that a list of indexed fields is covered by a given selector. The implementation recurses through the selector, tracking fields that encounters. Unfortunately, this skipped peers of combination operators. For example, "selector": { "$and":[ "$and":[ "A": "foo" ], "$and":[ "B": "bar" ] ] } would skip the first nested "$and" operator and only return "B" as a covered field. This commit explicitly handles this situation (the only combination operator we care about is $and), so for the above selector we would correctly indentify "A" and "B" as covered fields. --- src/mango/src/mango_selector.erl | 57 ++++++++++++++++++++++++------- src/mango/test/05-index-selection-test.py | 10 ++++++ 2 files changed, 54 insertions(+), 13 deletions(-) diff --git a/src/mango/src/mango_selector.erl b/src/mango/src/mango_selector.erl index 4ff36945a..2a546c9ba 100644 --- a/src/mango/src/mango_selector.erl +++ b/src/mango/src/mango_selector.erl @@ -578,36 +578,46 @@ match({[_, _ | _] = _Props} = Sel, _Value, _Cmp) -> % until we match then all or run out of selector to % match against. +has_required_fields(Selector, RequiredFields) -> + Remainder = has_required_fields_int(Selector, RequiredFields), + Remainder == []. + % Empty selector -has_required_fields({[]}, _) -> - false; +has_required_fields_int({[]}, Remainder) -> + Remainder; % No more required fields -has_required_fields(_, []) -> - true; +has_required_fields_int(_, []) -> + []; % No more selector -has_required_fields([], _) -> - false; +has_required_fields_int([], Remainder) -> + Remainder; -has_required_fields(Selector, RequiredFields) when not is_list(Selector) -> - has_required_fields([Selector], RequiredFields); +has_required_fields_int(Selector, RequiredFields) when not is_list(Selector) -> + has_required_fields_int([Selector], RequiredFields); % We can "see" through $and operator. We ignore other % combination operators because they can't be used to restrict % an index. -has_required_fields([{[{<<"$and">>, Args}]}], RequiredFields) +has_required_fields_int([{[{<<"$and">>, Args}]}], RequiredFields) + when is_list(Args) -> + has_required_fields_int(Args, RequiredFields); + +% Handle $and operator where it has peers +has_required_fields_int([{[{<<"$and">>, Args}]} | Rest], RequiredFields) when is_list(Args) -> - has_required_fields(Args, RequiredFields); + Remainder = has_required_fields_int(Args, RequiredFields), + has_required_fields_int(Rest, Remainder); -has_required_fields([{[{Field, Cond}]} | Rest], RequiredFields) -> +has_required_fields_int([{[{Field, Cond}]} | Rest], RequiredFields) -> case Cond of % $exists:false is a special case - this is the only operator % that explicitly does not require a field to exist {[{<<"$exists">>, false}]} -> - has_required_fields(Rest, RequiredFields); + has_required_fields_int(Rest, RequiredFields); _ -> - has_required_fields(Rest, lists:delete(Field, RequiredFields)) + has_required_fields_int(Rest, lists:delete(Field, RequiredFields)) end. @@ -651,6 +661,27 @@ has_required_fields_and_true_test() -> Normalized = normalize(Selector), ?assertEqual(true, has_required_fields(Normalized, RequiredFields)). +has_required_fields_nested_and_true_test() -> + RequiredFields = [<<"A">>, <<"B">>], + Selector1 = {[{<<"$and">>, + [ + {[{<<"A">>, <<"foo">>}]} + ] + }]}, + Selector2 = {[{<<"$and">>, + [ + {[{<<"B">>, <<"foo">>}]} + ] + }]}, + Selector = {[{<<"$and">>, + [ + Selector1, + Selector2 + ] + }]}, + + ?assertEqual(true, has_required_fields(Selector, RequiredFields)). + has_required_fields_and_false_test() -> RequiredFields = [<<"A">>, <<"C">>], Selector = {[{<<"$and">>, diff --git a/src/mango/test/05-index-selection-test.py b/src/mango/test/05-index-selection-test.py index fe36257e3..f8cc82576 100644 --- a/src/mango/test/05-index-selection-test.py +++ b/src/mango/test/05-index-selection-test.py @@ -28,6 +28,16 @@ class IndexSelectionTests: }, explain=True) self.assertEqual(resp["index"]["type"], "json") + def test_with_nested_and(self): + resp = self.db.find({ + "name.first": { + "$gt": "a", + "$lt": "z" + }, + "name.last": "Foo" + }, explain=True) + self.assertEqual(resp["index"]["type"], "json") + def test_use_most_columns(self): # ddoc id for the age index ddocid = "_design/ad3d537c03cd7c6a43cf8dff66ef70ea54c2b40f" -- cgit v1.2.1 From a406cc0b6f9ebd1e7aecd258b2b015daab9393e0 Mon Sep 17 00:00:00 2001 From: Will Holley Date: Thu, 23 Nov 2017 10:26:07 +0100 Subject: Test duplicate fields in Mango selector (#998) Adds a test to verify the behaviour of duplicate fields in a Mango selector. The fix for CVE-2017-12635 resulted in CouchDB's JSON parser only recognising the last instance of duplicated fields in a JSON object. This represents a breaking change to Mango (_find) because, previuously, all instances would have been considered when evaluating a selector. This test verifies that Mango now only considers the last instance of a field, silently ignoring those that appear before it. TBD whether we can or should show an error when this occurs, since this leads to predicates silently being ignored. --- src/mango/test/02-basic-find-test.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/mango/test/02-basic-find-test.py b/src/mango/test/02-basic-find-test.py index 82554a112..f7e151ad8 100644 --- a/src/mango/test/02-basic-find-test.py +++ b/src/mango/test/02-basic-find-test.py @@ -159,6 +159,18 @@ class BasicFindTests(mango.UserDocsTests): assert len(docs) == 1 assert docs[0]["user_id"] == 7 + def test_multi_cond_duplicate_field(self): + # need to explicitly define JSON as dict won't allow duplicate keys + body = ("{\"selector\":{\"location.city\":{\"$regex\": \"^L+\"}," + "\"location.city\":{\"$exists\":true}}}") + r = self.db.sess.post(self.db.path("_find"), data=body) + r.raise_for_status() + docs = r.json()["docs"] + + # expectation is that only the second instance + # of the "location.city" field is used + self.assertEqual(len(docs), 15) + def test_multi_cond_or(self): docs = self.db.find({ "$and":[ -- cgit v1.2.1 From 27dcd6b3eb85017d4103c89182fd11f82d1a7752 Mon Sep 17 00:00:00 2001 From: Will Holley Date: Tue, 28 Nov 2017 11:30:14 +0000 Subject: Fix _explain for sort descending (#1025) Handle the case when startkey and endkey are reversed. To create valid JSON we need to replace the internal representation of the startkey with a string. Fixes #1023 --- src/mango/src/mango_cursor_view.erl | 2 +- src/mango/test/05-index-selection-test.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl index 3fcec07be..7c57b1414 100644 --- a/src/mango/src/mango_cursor_view.erl +++ b/src/mango/src/mango_cursor_view.erl @@ -66,7 +66,7 @@ explain(Cursor) -> {include_docs, Args#mrargs.include_docs}, {view_type, Args#mrargs.view_type}, {reduce, Args#mrargs.reduce}, - {start_key, Args#mrargs.start_key}, + {start_key, maybe_replace_max_json(Args#mrargs.start_key)}, {end_key, maybe_replace_max_json(Args#mrargs.end_key)}, {direction, Args#mrargs.direction}, {stable, Args#mrargs.stable}, diff --git a/src/mango/test/05-index-selection-test.py b/src/mango/test/05-index-selection-test.py index f8cc82576..ef662a918 100644 --- a/src/mango/test/05-index-selection-test.py +++ b/src/mango/test/05-index-selection-test.py @@ -181,6 +181,14 @@ class IndexSelectionTests: self.db.save_doc(design_doc) + def test_explain_sort_reverse(self): + selector = { + "manager": {"$gt": None} + } + resp_explain = self.db.find(selector, fields=["manager"], sort=[{"manager":"desc"}], explain=True) + self.assertEqual(resp_explain["index"]["type"], "json") + + class JSONIndexSelectionTests(mango.UserDocsTests, IndexSelectionTests): @classmethod -- cgit v1.2.1 From 3e511b37bde8238918edc18c1bed9ab7ca1cbc5f Mon Sep 17 00:00:00 2001 From: jiangphcn Date: Fri, 24 Nov 2017 22:41:37 +0800 Subject: Allow replicator documents to include params for db creation - specify q in "create_target_params": {"q": "1", ...} issue-887 --- .../src/couch_replicator_api_wrap.erl | 23 ++- src/couch_replicator/src/couch_replicator_docs.erl | 5 + .../src/couch_replicator_scheduler_job.erl | 3 +- ...replicator_create_target_with_options_tests.erl | 154 +++++++++++++++++++++ 4 files changed, 177 insertions(+), 8 deletions(-) create mode 100644 src/couch_replicator/test/couch_replicator_create_target_with_options_tests.erl diff --git a/src/couch_replicator/src/couch_replicator_api_wrap.erl b/src/couch_replicator/src/couch_replicator_api_wrap.erl index ab8eb7f29..b5ea57c3c 100644 --- a/src/couch_replicator/src/couch_replicator_api_wrap.erl +++ b/src/couch_replicator/src/couch_replicator_api_wrap.erl @@ -24,7 +24,7 @@ -export([ db_open/2, - db_open/3, + db_open/4, db_close/1, get_db_info/1, get_pending_count/2, @@ -68,20 +68,21 @@ db_uri(Db) -> db_open(Db, Options) -> - db_open(Db, Options, false). + db_open(Db, Options, false, []). -db_open(#httpdb{} = Db1, _Options, Create) -> +db_open(#httpdb{} = Db1, _Options, Create, CreateParams) -> {ok, Db} = couch_replicator_httpc:setup(Db1), try case Create of false -> ok; true -> - send_req(Db, [{method, put}], + Db2 = maybe_append_create_query_params(Db, CreateParams), + send_req(Db2, [{method, put}], fun(401, _, _) -> - throw({unauthorized, ?l2b(db_uri(Db))}); + throw({unauthorized, ?l2b(db_uri(Db2))}); (403, _, _) -> - throw({forbidden, ?l2b(db_uri(Db))}); + throw({forbidden, ?l2b(db_uri(Db2))}); (_, _, _) -> ok end) @@ -118,7 +119,7 @@ db_open(#httpdb{} = Db1, _Options, Create) -> db_close(Db), erlang:exit(Error) end; -db_open(DbName, Options, Create) -> +db_open(DbName, Options, Create, _CreateParams) -> try case Create of false -> @@ -1020,6 +1021,14 @@ normalize_db(<>) -> DbName. +maybe_append_create_query_params(Db, []) -> + Db; + +maybe_append_create_query_params(Db, CreateParams) -> + NewUrl = Db#httpdb.url ++ "?" ++ mochiweb_util:urlencode(CreateParams), + Db#httpdb{url = NewUrl}. + + -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl index d22b85f89..6666cba53 100644 --- a/src/couch_replicator/src/couch_replicator_docs.erl +++ b/src/couch_replicator/src/couch_replicator_docs.erl @@ -499,6 +499,11 @@ convert_options([{<<"create_target">>, V} | _R]) when not is_boolean(V)-> throw({bad_request, <<"parameter `create_target` must be a boolean">>}); convert_options([{<<"create_target">>, V} | R]) -> [{create_target, V} | convert_options(R)]; +convert_options([{<<"create_target_params">>, V} | _R]) when not is_tuple(V) -> + throw({bad_request, + <<"parameter `create_target_params` must be a JSON object">>}); +convert_options([{<<"create_target_params">>, V} | R]) -> + [{create_target_params, V} | convert_options(R)]; convert_options([{<<"continuous">>, V} | _R]) when not is_boolean(V)-> throw({bad_request, <<"parameter `continuous` must be a boolean">>}); convert_options([{<<"continuous">>, V} | R]) -> diff --git a/src/couch_replicator/src/couch_replicator_scheduler_job.erl b/src/couch_replicator/src/couch_replicator_scheduler_job.erl index e2d8fb6d6..0438249be 100644 --- a/src/couch_replicator/src/couch_replicator_scheduler_job.erl +++ b/src/couch_replicator/src/couch_replicator_scheduler_job.erl @@ -587,8 +587,9 @@ init_state(Rep) -> % Adjust minimum number of http source connections to 2 to avoid deadlock Src = adjust_maxconn(Src0, BaseId), {ok, Source} = couch_replicator_api_wrap:db_open(Src, [{user_ctx, UserCtx}]), + {CreateTargetParams} = get_value(create_target_params, Options, {[]}), {ok, Target} = couch_replicator_api_wrap:db_open(Tgt, [{user_ctx, UserCtx}], - get_value(create_target, Options, false)), + get_value(create_target, Options, false), CreateTargetParams), {ok, SourceInfo} = couch_replicator_api_wrap:get_db_info(Source), {ok, TargetInfo} = couch_replicator_api_wrap:get_db_info(Target), diff --git a/src/couch_replicator/test/couch_replicator_create_target_with_options_tests.erl b/src/couch_replicator/test/couch_replicator_create_target_with_options_tests.erl new file mode 100644 index 000000000..31bfd48c7 --- /dev/null +++ b/src/couch_replicator/test/couch_replicator_create_target_with_options_tests.erl @@ -0,0 +1,154 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_replicator_create_target_with_options_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-include_lib("couch_replicator/src/couch_replicator.hrl"). + + +setup(_) -> + Ctx1 = test_util:start_couch([fabric, mem3, couch_replicator]), + Ctx2 = chttpd_test_util:start_couch(), + Source = ?tempdb(), + Target = ?tempdb(), + {Ctx1, Ctx2, {Source, Target}}. + + +teardown(_, {Ctx1, Ctx2, {_Source, _Target}}) -> + ok = test_util:stop_couch(Ctx1), + ok = chttpd_test_util:stop_couch(Ctx2). + + +create_target_with_options_replication_test_() -> + Ps = [{local, remote}, {remote, remote}], + { + "Create target with range partitions tests", + { + foreachx, + fun setup/1, fun teardown/2, + [{P, fun should_create_target_with_q_4/2} || P <- Ps] ++ + [{P, fun should_create_target_with_q_2_n_1/2} || P <- Ps] ++ + [{P, fun should_create_target_with_default/2} || P <- Ps] ++ + [{P, fun should_not_create_target_with_q_any/2} || P <- Ps] + } + }. + + +should_create_target_with_q_4({From, To}, {_Ctx1, _Ctx2, {Source, Target}}) -> + RepObject = {[ + {<<"source">>, db_url(From, Source)}, + {<<"target">>, db_url(To, Target)}, + {<<"create_target">>, true}, + {<<"create_target_params">>, {[{<<"q">>, <<"4">>}]}} + ]}, + create_db(From, Source), + create_doc(From, Source), + {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), + + {ok, TargetInfo} = fabric:get_db_info(Target), + {ClusterInfo} = couch_util:get_value(cluster, TargetInfo), + delete_db(From, Source), + delete_db(To, Target), + ?_assertEqual(4, couch_util:get_value(q, ClusterInfo)). + + +should_create_target_with_q_2_n_1( + {From, To}, {_Ctx1, _Ctx2, {Source, Target}}) -> + RepObject = {[ + {<<"source">>, db_url(From, Source)}, + {<<"target">>, db_url(To, Target)}, + {<<"create_target">>, true}, + {<<"create_target_params">>, + {[{<<"q">>, <<"2">>}, {<<"n">>, <<"1">>}]}} + ]}, + create_db(From, Source), + create_doc(From, Source), + {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), + + {ok, TargetInfo} = fabric:get_db_info(Target), + {ClusterInfo} = couch_util:get_value(cluster, TargetInfo), + delete_db(From, Source), + delete_db(To, Target), + [ + ?_assertEqual(2, couch_util:get_value(q, ClusterInfo)), + ?_assertEqual(1, couch_util:get_value(n, ClusterInfo)) + ]. + + +should_create_target_with_default( + {From, To}, {_Ctx1, _Ctx2, {Source, Target}}) -> + RepObject = {[ + {<<"source">>, db_url(From, Source)}, + {<<"target">>, db_url(To, Target)}, + {<<"create_target">>, true} + ]}, + create_db(From, Source), + create_doc(From, Source), + {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), + + {ok, TargetInfo} = fabric:get_db_info(Target), + {ClusterInfo} = couch_util:get_value(cluster, TargetInfo), + Q = config:get("cluster", "q", "8"), + delete_db(From, Source), + delete_db(To, Target), + ?_assertEqual(list_to_integer(Q), couch_util:get_value(q, ClusterInfo)). + + +should_not_create_target_with_q_any( + {From, To}, {_Ctx1, _Ctx2, {Source, Target}}) -> + RepObject = {[ + {<<"source">>, db_url(From, Source)}, + {<<"target">>, db_url(To, Target)}, + {<<"create_target">>, false}, + {<<"create_target_params">>, {[{<<"q">>, <<"1">>}]}} + ]}, + create_db(From, Source), + create_doc(From, Source), + {error, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), + DbExist = is_list(catch mem3:shards(Target)), + delete_db(From, Source), + ?_assertEqual(false, DbExist). + + +create_doc(local, DbName) -> + {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]), + Body = {[{<<"foo">>, <<"bar">>}]}, + NewDoc = #doc{body = Body}, + {ok, _} = couch_db:update_doc(Db, NewDoc, []), + couch_db:close(Db); +create_doc(remote, DbName) -> + Body = {[{<<"foo">>, <<"bar">>}]}, + NewDoc = #doc{body = Body}, + {ok, _} = fabric:update_doc(DbName, NewDoc, [?ADMIN_CTX]). + + +create_db(local, DbName) -> + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + ok = couch_db:close(Db); +create_db(remote, DbName) -> + ok = fabric:create_db(DbName, [?ADMIN_CTX]). + + +delete_db(local, DbName) -> + ok = couch_server:delete(DbName, [?ADMIN_CTX]); +delete_db(remote, DbName) -> + ok = fabric:delete_db(DbName, [?ADMIN_CTX]). + + +db_url(local, DbName) -> + DbName; +db_url(remote, DbName) -> + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(chttpd, port), + ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])). -- cgit v1.2.1 From b66e52ab9992654e1cb5ac626f0427270f0fd122 Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Thu, 30 Nov 2017 13:17:35 -0400 Subject: Add missing methods to fake index Mocked index module missing a couple of methods called on late compaction stages. This leads to a crash, but since it's happening after the test's assertions, it bring the test to fail. Also small refactoring to encapsulate all mocking in a single function and move unrelated parts back to test's setup. --- src/couch_index/test/couch_index_compaction_tests.erl | 12 +++++++----- src/couch_index/test/couch_index_ddoc_updated_tests.erl | 3 ++- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/src/couch_index/test/couch_index_compaction_tests.erl b/src/couch_index/test/couch_index_compaction_tests.erl index 062be872a..164e9836a 100644 --- a/src/couch_index/test/couch_index_compaction_tests.erl +++ b/src/couch_index/test/couch_index_compaction_tests.erl @@ -21,12 +21,12 @@ setup() -> DbName = ?tempdb(), {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), couch_db:close(Db), - {ok, IndexerPid} = fake_index(Db), + fake_index(DbName), + {ok, IndexerPid} = couch_index_server:get_index(test_index, Db, undefined), ?assertNot(is_opened(Db)), {Db, IndexerPid}. -fake_index(Db) -> - DbName = couch_db:name(Db), +fake_index(DbName) -> ok = meck:new([test_index], [non_strict]), ok = meck:expect(test_index, init, ['_', '_'], {ok, 10}), ok = meck:expect(test_index, open, fun(_Db, State) -> @@ -45,8 +45,10 @@ fake_index(Db) -> (update_seq, Seq) -> Seq end), - - couch_index_server:get_index(test_index, Db, undefined). + ok = meck:expect(test_index, close, ['_'], ok), + ok = meck:expect(test_index, swap_compacted, fun(_, NewState) -> + {ok, NewState} + end). teardown(_) -> (catch meck:unload(test_index)), diff --git a/src/couch_index/test/couch_index_ddoc_updated_tests.erl b/src/couch_index/test/couch_index_ddoc_updated_tests.erl index d1bbc43d2..aaf36c71f 100644 --- a/src/couch_index/test/couch_index_ddoc_updated_tests.erl +++ b/src/couch_index/test/couch_index_ddoc_updated_tests.erl @@ -121,7 +121,8 @@ fake_index() -> crypto:hash(md5, term_to_binary(DDoc)); (update_seq, Seq) -> Seq - end). + end), + ok = meck:expect(test_index, shutdown, ['_'], ok). get_indexes_by_ddoc(DDocID, N) -> -- cgit v1.2.1 From ccb657ea736d21c35ff967cabad149ee5bda4431 Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Thu, 30 Nov 2017 13:25:01 -0400 Subject: Remove invalid meck unload Remove invalid meck unload and catch around unload that was hiding this issue. --- src/couch_index/test/couch_index_compaction_tests.erl | 4 +--- src/couch_index/test/couch_index_ddoc_updated_tests.erl | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/couch_index/test/couch_index_compaction_tests.erl b/src/couch_index/test/couch_index_compaction_tests.erl index 164e9836a..53316d944 100644 --- a/src/couch_index/test/couch_index_compaction_tests.erl +++ b/src/couch_index/test/couch_index_compaction_tests.erl @@ -51,9 +51,7 @@ fake_index(DbName) -> end). teardown(_) -> - (catch meck:unload(test_index)), - (catch meck:unload(couch_util)), - ok. + meck:unload(test_index). compaction_test_() -> { diff --git a/src/couch_index/test/couch_index_ddoc_updated_tests.erl b/src/couch_index/test/couch_index_ddoc_updated_tests.erl index aaf36c71f..40dadcc62 100644 --- a/src/couch_index/test/couch_index_ddoc_updated_tests.erl +++ b/src/couch_index/test/couch_index_ddoc_updated_tests.erl @@ -25,7 +25,7 @@ start() -> stop({Ctx, DbName}) -> - (catch meck:unload(test_index)), + meck:unload(test_index), ok = fabric:delete_db(DbName, [?ADMIN_CTX]), DbDir = config:get("couchdb", "database_dir", "."), WaitFun = fun() -> -- cgit v1.2.1 From 743bd8820cf612e830c1ff6dd0b9b07c12aab8fb Mon Sep 17 00:00:00 2001 From: Will Holley Date: Thu, 30 Nov 2017 19:53:19 +0000 Subject: warn instead of error when use_index not valid (#962) If a user specifies a value for use_index that is not valid for the selector - i.e. it does not meet the coverage requirements of the selector or sort fields - attempt to fall back to a valid index (or database scan) rather than returning a 400 error. When a fallback occurs, populate the "warning" field in the response (as we already do when a full database scan takes place) with details of the fallback. This change is partially as mitigation for #816, which may lead to some previously valid indexes being deemed invalid, and also to make use_index less brittle in general. If an index that is used explicitly by active queries is removed, Couch will now generate warnings and there may be a performance impact, but the client will still get correct results. --- src/mango/src/mango_cursor.erl | 76 ++++++++++++++++++++++------- src/mango/src/mango_cursor_text.erl | 2 +- src/mango/src/mango_cursor_view.erl | 2 +- src/mango/src/mango_error.erl | 19 -------- src/mango/src/mango_idx.erl | 14 ++---- src/mango/test/05-index-selection-test.py | 80 ++++++++++++++++++------------- 6 files changed, 112 insertions(+), 81 deletions(-) diff --git a/src/mango/src/mango_cursor.erl b/src/mango/src/mango_cursor.erl index 7997d9ada..5108d36b2 100644 --- a/src/mango/src/mango_cursor.erl +++ b/src/mango/src/mango_cursor.erl @@ -18,6 +18,7 @@ explain/1, execute/3, maybe_filter_indexes_by_ddoc/2, + remove_indexes_with_partial_filter_selector/1, maybe_add_warning/3 ]). @@ -47,16 +48,18 @@ create(Db, Selector0, Opts) -> Selector = mango_selector:normalize(Selector0), UsableIndexes = mango_idx:get_usable_indexes(Db, Selector, Opts), - - {use_index, IndexSpecified} = proplists:lookup(use_index, Opts), - case {length(UsableIndexes), length(IndexSpecified)} of - {0, 0} -> + case length(UsableIndexes) of + 0 -> AllDocs = mango_idx:special(Db), create_cursor(Db, AllDocs, Selector, Opts); - {0, _} -> - ?MANGO_ERROR({no_usable_index, selector_unsupported}); _ -> - create_cursor(Db, UsableIndexes, Selector, Opts) + case mango_cursor:maybe_filter_indexes_by_ddoc(UsableIndexes, Opts) of + [] -> + % use_index doesn't match a valid index - fall back to a valid one + create_cursor(Db, UsableIndexes, Selector, Opts); + UserSpecifiedIndex -> + create_cursor(Db, UserSpecifiedIndex, Selector, Opts) + end end. @@ -90,9 +93,7 @@ execute(#cursor{index=Idx}=Cursor, UserFun, UserAcc) -> maybe_filter_indexes_by_ddoc(Indexes, Opts) -> case lists:keyfind(use_index, 1, Opts) of {use_index, []} -> - % We remove any indexes that have a selector - % since they are only used when specified via use_index - remove_indexes_with_partial_filter_selector(Indexes); + []; {use_index, [DesignId]} -> filter_indexes(Indexes, DesignId); {use_index, [DesignId, ViewName]} -> @@ -150,12 +151,53 @@ group_indexes_by_type(Indexes) -> end, ?CURSOR_MODULES). -maybe_add_warning(UserFun, #idx{type = IndexType}, UserAcc) -> - case IndexType of +maybe_add_warning(UserFun, #cursor{index = Index, opts = Opts}, UserAcc) -> + NoIndexWarning = case Index#idx.type of <<"special">> -> - Arg = {add_key, warning, <<"no matching index found, create an index to optimize query time">>}, - {_Go, UserAcc0} = UserFun(Arg, UserAcc), - UserAcc0; + <<"no matching index found, create an index to optimize query time">>; _ -> - UserAcc - end. \ No newline at end of file + ok + end, + + UseIndexInvalidWarning = case lists:keyfind(use_index, 1, Opts) of + {use_index, []} -> + NoIndexWarning; + {use_index, [DesignId]} -> + case filter_indexes([Index], DesignId) of + [] -> + fmt("_design/~s was not used because it does not contain a valid index for this query.", + [ddoc_name(DesignId)]); + _ -> + NoIndexWarning + end; + {use_index, [DesignId, ViewName]} -> + case filter_indexes([Index], DesignId, ViewName) of + [] -> + fmt("_design/~s, ~s was not used because it is not a valid index for this query.", + [ddoc_name(DesignId), ViewName]); + _ -> + NoIndexWarning + end + end, + + maybe_add_warning_int(UseIndexInvalidWarning, UserFun, UserAcc). + + +maybe_add_warning_int(ok, _, UserAcc) -> + UserAcc; + +maybe_add_warning_int(Warning, UserFun, UserAcc) -> + Arg = {add_key, warning, Warning}, + {_Go, UserAcc0} = UserFun(Arg, UserAcc), + UserAcc0. + + +fmt(Format, Args) -> + iolist_to_binary(io_lib:format(Format, Args)). + + +ddoc_name(<<"_design/", Name/binary>>) -> + Name; + +ddoc_name(Name) -> + Name. diff --git a/src/mango/src/mango_cursor_text.erl b/src/mango/src/mango_cursor_text.erl index 88abfc00a..3883bc8f2 100644 --- a/src/mango/src/mango_cursor_text.erl +++ b/src/mango/src/mango_cursor_text.erl @@ -124,7 +124,7 @@ execute(Cursor, UserFun, UserAcc) -> Arg = {add_key, bookmark, JsonBM}, {_Go, FinalUserAcc} = UserFun(Arg, LastUserAcc), FinalUserAcc0 = mango_execution_stats:maybe_add_stats(Opts, UserFun, Stats0, FinalUserAcc), - FinalUserAcc1 = mango_cursor:maybe_add_warning(UserFun, Idx, FinalUserAcc0), + FinalUserAcc1 = mango_cursor:maybe_add_warning(UserFun, Cursor, FinalUserAcc0), {ok, FinalUserAcc1} end. diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl index 7c57b1414..1e2108b7d 100644 --- a/src/mango/src/mango_cursor_view.erl +++ b/src/mango/src/mango_cursor_view.erl @@ -137,7 +137,7 @@ execute(#cursor{db = Db, index = Idx, execution_stats = Stats} = Cursor0, UserFu {_Go, FinalUserAcc} = UserFun(Arg, LastCursor#cursor.user_acc), Stats0 = LastCursor#cursor.execution_stats, FinalUserAcc0 = mango_execution_stats:maybe_add_stats(Opts, UserFun, Stats0, FinalUserAcc), - FinalUserAcc1 = mango_cursor:maybe_add_warning(UserFun, Idx, FinalUserAcc0), + FinalUserAcc1 = mango_cursor:maybe_add_warning(UserFun, Cursor, FinalUserAcc0), {ok, FinalUserAcc1}; {error, Reason} -> {error, Reason} diff --git a/src/mango/src/mango_error.erl b/src/mango/src/mango_error.erl index 4c55ef3f6..ad665e2f3 100644 --- a/src/mango/src/mango_error.erl +++ b/src/mango/src/mango_error.erl @@ -21,31 +21,12 @@ ]). -info(mango_idx, {no_usable_index, no_indexes_defined}) -> - { - 400, - <<"no_usable_index">>, - <<"There are no indexes defined in this database.">> - }; -info(mango_idx, {no_usable_index, no_index_matching_name}) -> - { - 400, - <<"no_usable_index">>, - <<"No index matches the index specified with \"use_index\"">> - }; info(mango_idx, {no_usable_index, missing_sort_index}) -> { 400, <<"no_usable_index">>, <<"No index exists for this sort, try indexing by the sort fields.">> }; -info(mango_cursor, {no_usable_index, selector_unsupported}) -> - { - 400, - <<"no_usable_index">>, - <<"The index specified with \"use_index\" is not usable for the query.">> - }; - info(mango_json_bookmark, {invalid_bookmark, BadBookmark}) -> { 400, diff --git a/src/mango/src/mango_idx.erl b/src/mango/src/mango_idx.erl index 8e19ebff8..ea5949c02 100644 --- a/src/mango/src/mango_idx.erl +++ b/src/mango/src/mango_idx.erl @@ -59,20 +59,16 @@ list(Db) -> get_usable_indexes(Db, Selector, Opts) -> ExistingIndexes = mango_idx:list(Db), - if ExistingIndexes /= [] -> ok; true -> - ?MANGO_ERROR({no_usable_index, no_indexes_defined}) - end, - FilteredIndexes = mango_cursor:maybe_filter_indexes_by_ddoc(ExistingIndexes, Opts), - if FilteredIndexes /= [] -> ok; true -> - ?MANGO_ERROR({no_usable_index, no_index_matching_name}) - end, + GlobalIndexes = mango_cursor:remove_indexes_with_partial_filter_selector(ExistingIndexes), + UserSpecifiedIndex = mango_cursor:maybe_filter_indexes_by_ddoc(ExistingIndexes, Opts), + UsableIndexes0 = lists:usort(GlobalIndexes ++ UserSpecifiedIndex), SortFields = get_sort_fields(Opts), UsableFilter = fun(I) -> is_usable(I, Selector, SortFields) end, - UsableIndexes0 = lists:filter(UsableFilter, FilteredIndexes), + UsableIndexes1 = lists:filter(UsableFilter, UsableIndexes0), - case maybe_filter_by_sort_fields(UsableIndexes0, SortFields) of + case maybe_filter_by_sort_fields(UsableIndexes1, SortFields) of {ok, SortIndexes} -> SortIndexes; {error, no_usable_index} -> diff --git a/src/mango/test/05-index-selection-test.py b/src/mango/test/05-index-selection-test.py index ef662a918..eec0bd9a6 100644 --- a/src/mango/test/05-index-selection-test.py +++ b/src/mango/test/05-index-selection-test.py @@ -66,12 +66,8 @@ class IndexSelectionTests: def test_invalid_use_index(self): # ddoc id for the age index ddocid = "_design/ad3d537c03cd7c6a43cf8dff66ef70ea54c2b40f" - try: - self.db.find({}, use_index=ddocid) - except Exception as e: - self.assertEqual(e.response.status_code, 400) - else: - raise AssertionError("bad find") + r = self.db.find({}, use_index=ddocid, return_raw=True) + self.assertEqual(r["warning"], '{0} was not used because it does not contain a valid index for this query.'.format(ddocid)) def test_uses_index_when_no_range_or_equals(self): # index on ["manager"] should be valid because @@ -87,19 +83,18 @@ class IndexSelectionTests: resp_explain = self.db.find(selector, explain=True) self.assertEqual(resp_explain["index"]["type"], "json") - def test_reject_use_index_invalid_fields(self): # index on ["company","manager"] which should not be valid ddocid = "_design/a0c425a60cf3c3c09e3c537c9ef20059dcef9198" selector = { "company": "Pharmex" } - try: - self.db.find(selector, use_index=ddocid) - except Exception as e: - self.assertEqual(e.response.status_code, 400) - else: - raise AssertionError("did not reject bad use_index") + r = self.db.find(selector, use_index=ddocid, return_raw=True) + self.assertEqual(r["warning"], '{0} was not used because it does not contain a valid index for this query.'.format(ddocid)) + + # should still return a correct result + for d in r["docs"]: + self.assertEqual(d["company"], "Pharmex") def test_reject_use_index_ddoc_and_name_invalid_fields(self): # index on ["company","manager"] which should not be valid @@ -108,41 +103,58 @@ class IndexSelectionTests: selector = { "company": "Pharmex" } - try: - self.db.find(selector, use_index=[ddocid,name]) - except Exception as e: - self.assertEqual(e.response.status_code, 400) - else: - raise AssertionError("did not reject bad use_index") + + resp = self.db.find(selector, use_index=[ddocid,name], return_raw=True) + self.assertEqual(resp["warning"], "{0}, {1} was not used because it is not a valid index for this query.".format(ddocid, name)) + + # should still return a correct result + for d in resp["docs"]: + self.assertEqual(d["company"], "Pharmex") def test_reject_use_index_sort_order(self): # index on ["company","manager"] which should not be valid + # and there is no valid fallback (i.e. an index on ["company"]) ddocid = "_design/a0c425a60cf3c3c09e3c537c9ef20059dcef9198" selector = { - "company": {"$gt": None}, - "manager": {"$gt": None} + "company": {"$gt": None} } try: - self.db.find(selector, use_index=ddocid, sort=[{"manager":"desc"}]) + self.db.find(selector, use_index=ddocid, sort=[{"company":"desc"}]) except Exception as e: self.assertEqual(e.response.status_code, 400) else: raise AssertionError("did not reject bad use_index") - def test_reject_use_index_ddoc_and_name_sort_order(self): - # index on ["company","manager"] which should not be valid - ddocid = "_design/a0c425a60cf3c3c09e3c537c9ef20059dcef9198" - name = "a0c425a60cf3c3c09e3c537c9ef20059dcef9198" + def test_use_index_fallback_if_valid_sort(self): + ddocid_valid = "_design/fallbackfoo" + ddocid_invalid = "_design/fallbackfoobar" + self.db.create_index(fields=["foo"], ddoc=ddocid_invalid) + self.db.create_index(fields=["foo", "bar"], ddoc=ddocid_valid) selector = { - "company": {"$gt": None}, - "manager": {"$gt": None} + "foo": {"$gt": None} } - try: - self.db.find(selector, use_index=[ddocid,name], sort=[{"manager":"desc"}]) - except Exception as e: - self.assertEqual(e.response.status_code, 400) - else: - raise AssertionError("did not reject bad use_index") + + resp_explain = self.db.find(selector, sort=["foo", "bar"], use_index=ddocid_invalid, explain=True) + self.assertEqual(resp_explain["index"]["ddoc"], ddocid_valid) + + resp = self.db.find(selector, sort=["foo", "bar"], use_index=ddocid_invalid, return_raw=True) + self.assertEqual(resp["warning"], '{0} was not used because it does not contain a valid index for this query.'.format(ddocid_invalid)) + self.assertEqual(len(resp["docs"]), 0) + + def test_prefer_use_index_over_optimal_index(self): + # index on ["company"] even though index on ["company", "manager"] is better + ddocid_preferred = "_design/testsuboptimal" + self.db.create_index(fields=["baz"], ddoc=ddocid_preferred) + self.db.create_index(fields=["baz", "bar"]) + selector = { + "baz": {"$gt": None}, + "bar": {"$gt": None} + } + resp = self.db.find(selector, use_index=ddocid_preferred, return_raw=True) + self.assertTrue("warning" not in resp) + + resp_explain = self.db.find(selector, use_index=ddocid_preferred, explain=True) + self.assertEqual(resp_explain["index"]["ddoc"], ddocid_preferred) # This doc will not be saved given the new ddoc validation code # in couch_mrview -- cgit v1.2.1 From 9eb845b75ed53a59ef7b70833599296ae80e0297 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Tue, 28 Nov 2017 17:36:18 -0500 Subject: Fix eunit "suites" example As far as I can tell rebar will run all tests in an application if the specified suites are not found. Also, rebar appends the _tests suffix to the name of the suite automatically when it executes the search. --- README-DEV.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README-DEV.rst b/README-DEV.rst index f8d80ac41..3cff476c2 100644 --- a/README-DEV.rst +++ b/README-DEV.rst @@ -150,7 +150,7 @@ to make targets:: make eunit apps=couch,chttpd # Run only tests from couch_btree_tests suite - make eunit suites=couch_btree_tests + make eunit apps=couch suites=couch_btree # Run only only specific tests make eunit tests=btree_open_test,reductions_test -- cgit v1.2.1 From fda4c67782edb535ac784aa889fa09fa80c0b153 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Tue, 28 Nov 2017 17:44:49 -0500 Subject: Remove Spidermonkey as an "optional" depedency It's required, and thus would already have been isntalled via the instructions in the INSTALL file. Also we don't mention it for any other operating system here. --- README-DEV.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README-DEV.rst b/README-DEV.rst index 3cff476c2..3587e8586 100644 --- a/README-DEV.rst +++ b/README-DEV.rst @@ -89,7 +89,7 @@ Unless you want to install the optional dependencies, skip to the next section. Install what else we can with Homebrew:: - brew install help2man gnupg md5sha1sum node spidermonkey + brew install help2man gnupg md5sha1sum node If you don't already have pip installed, install it:: -- cgit v1.2.1 From c5e48a8429b3a76fdb63db80085875dea5e9b2b8 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Tue, 28 Nov 2017 17:58:06 -0500 Subject: Remove Bob's 2.0 TODO list :) --- TODO | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 TODO diff --git a/TODO b/TODO deleted file mode 100644 index d9d1929b1..000000000 --- a/TODO +++ /dev/null @@ -1,10 +0,0 @@ -CouchDB 2.0 TODO - -The remaining work after the merge of 1843-feature-bigcouch for the -bigcouch side of things; - -1) Restore documentation (couchdb-documentation and build scripts) -2) Restore couch-plugins -3) Restore my-first-couchdb-plugin (to couchdb-examples) -4) Restore _db_updates -5) Sundries (AUTHORS, INSTALL.*, LICENSE, NOTICE, etc) -- cgit v1.2.1 From 57b615d762c645317fad299ef7a329ab03feb62f Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Thu, 30 Nov 2017 19:32:16 -0500 Subject: Remove references to etap --- LICENSE | 26 -------------------------- NOTICE | 4 ---- license.skip | 12 ------------ src/couch/src/couch_key_tree.erl | 4 ---- 4 files changed, 46 deletions(-) diff --git a/LICENSE b/LICENSE index 1f0b270b4..e9a9c81e8 100644 --- a/LICENSE +++ b/LICENSE @@ -272,32 +272,6 @@ For the src/ibrowse component: (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -For the test/etap/etap.erl component: - - Copyright (c) 2008-2009 Nick Gerakines - - Permission is hereby granted, free of charge, to any person - obtaining a copy of this software and associated documentation - files (the "Software"), to deal in the Software without - restriction, including without limitation the rights to use, - copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the - Software is furnished to do so, subject to the following - conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - OTHER DEALINGS IN THE SOFTWARE. - - For the src/couch_log/src/couch_log_trunc_io.erl and the src/couch_log/src/couch_log_trunc_io_fmt.erl components diff --git a/NOTICE b/NOTICE index 5fddffb3e..a1f06ae7b 100644 --- a/NOTICE +++ b/NOTICE @@ -22,10 +22,6 @@ This product also includes the following third-party components: Copyright 2005-2012, Chandrashekhar Mullaparthi - * ETap (http://github.com/ngerakines/etap/) - - Copyright 2009, Nick Gerakines - * mimeparse.js (http://code.google.com/p/mimeparse/) Copyright 2009, Chris Anderson diff --git a/license.skip b/license.skip index 143639251..35f91e13c 100644 --- a/license.skip +++ b/license.skip @@ -135,7 +135,6 @@ ^src/couchjs-node/Makefile.in ^src/couch_dbupdates ^src/ejson/.* -^src/etap/.* ^src/fauxton/app/addons/config/assets/less/config.less ^src/fauxton/assets/css/codemirror.css ^src/fauxton/assets/css/nv.d3.css @@ -171,9 +170,7 @@ ^src/ddoc_cache/README.md ^src/ets_lru/ebin/.*.beam ^src/ets_lru/ebin/ets_lru.app -^src/ets_lru/test/etap.erl ^src/ejson/.* -^src/etap/.* ^src/fabric/ebin/.*.beam ^src/fabric/ebin/fabric.app ^src/ibrowse/.* @@ -190,15 +187,6 @@ ^stamp-h1 ^test/Makefile ^test/Makefile.in -^test/etap/.*.beam -^test/etap/.*.o -^test/etap/etap.erl -^test/etap/.deps/.* -^test/etap/test_cfg_register -^test/etap/Makefile -^test/etap/Makefile.in -^test/etap/temp..* -^test/etap/fixtures/* ^test/javascript/Makefile ^test/javascript/Makefile.in ^test/local.ini diff --git a/src/couch/src/couch_key_tree.erl b/src/couch/src/couch_key_tree.erl index bc4076abc..e2e187eb1 100644 --- a/src/couch/src/couch_key_tree.erl +++ b/src/couch/src/couch_key_tree.erl @@ -498,7 +498,3 @@ value_pref(Other, ?REV_MISSING) -> Other; value_pref(Last, _) -> Last. - - -% Tests moved to test/etap/06?-*.t - -- cgit v1.2.1 From 6fce0fe102f9b033aa1b1e606b66da71ef51d0f4 Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Tue, 5 Dec 2017 15:42:47 -0500 Subject: Fix replicator create target options test Don't need to use local as source since it doesn't test anything specific to the code in question. Use credentials with http requests as it's a more realistic scenario. Avoid spawning couch server twice - once to start chttpd and then to start other couch servers. --- ...replicator_create_target_with_options_tests.erl | 113 ++++++++++----------- 1 file changed, 51 insertions(+), 62 deletions(-) diff --git a/src/couch_replicator/test/couch_replicator_create_target_with_options_tests.erl b/src/couch_replicator/test/couch_replicator_create_target_with_options_tests.erl index 31bfd48c7..63310d39e 100644 --- a/src/couch_replicator/test/couch_replicator_create_target_with_options_tests.erl +++ b/src/couch_replicator/test/couch_replicator_create_target_with_options_tests.erl @@ -16,139 +16,128 @@ -include_lib("couch/include/couch_db.hrl"). -include_lib("couch_replicator/src/couch_replicator.hrl"). +-define(USERNAME, "rep_admin"). +-define(PASSWORD, "secret"). -setup(_) -> - Ctx1 = test_util:start_couch([fabric, mem3, couch_replicator]), - Ctx2 = chttpd_test_util:start_couch(), +setup() -> + Ctx = test_util:start_couch([fabric, mem3, couch_replicator, chttpd]), + Hashed = couch_passwords:hash_admin_password(?PASSWORD), + ok = config:set("admins", ?USERNAME, ?b2l(Hashed), _Persist=false), Source = ?tempdb(), Target = ?tempdb(), - {Ctx1, Ctx2, {Source, Target}}. + {Ctx, {Source, Target}}. -teardown(_, {Ctx1, Ctx2, {_Source, _Target}}) -> - ok = test_util:stop_couch(Ctx1), - ok = chttpd_test_util:stop_couch(Ctx2). +teardown({Ctx, {_Source, _Target}}) -> + config:delete("admins", ?USERNAME), + ok = test_util:stop_couch(Ctx). create_target_with_options_replication_test_() -> - Ps = [{local, remote}, {remote, remote}], { "Create target with range partitions tests", { - foreachx, - fun setup/1, fun teardown/2, - [{P, fun should_create_target_with_q_4/2} || P <- Ps] ++ - [{P, fun should_create_target_with_q_2_n_1/2} || P <- Ps] ++ - [{P, fun should_create_target_with_default/2} || P <- Ps] ++ - [{P, fun should_not_create_target_with_q_any/2} || P <- Ps] + foreach, + fun setup/0, fun teardown/1, + [ + fun should_create_target_with_q_4/1, + fun should_create_target_with_q_2_n_1/1, + fun should_create_target_with_default/1, + fun should_not_create_target_with_q_any/1 + ] } }. -should_create_target_with_q_4({From, To}, {_Ctx1, _Ctx2, {Source, Target}}) -> +should_create_target_with_q_4({_Ctx, {Source, Target}}) -> RepObject = {[ - {<<"source">>, db_url(From, Source)}, - {<<"target">>, db_url(To, Target)}, + {<<"source">>, db_url(Source)}, + {<<"target">>, db_url(Target)}, {<<"create_target">>, true}, {<<"create_target_params">>, {[{<<"q">>, <<"4">>}]}} ]}, - create_db(From, Source), - create_doc(From, Source), + create_db(Source), + create_doc(Source), {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), {ok, TargetInfo} = fabric:get_db_info(Target), {ClusterInfo} = couch_util:get_value(cluster, TargetInfo), - delete_db(From, Source), - delete_db(To, Target), + delete_db(Source), + delete_db(Target), ?_assertEqual(4, couch_util:get_value(q, ClusterInfo)). -should_create_target_with_q_2_n_1( - {From, To}, {_Ctx1, _Ctx2, {Source, Target}}) -> +should_create_target_with_q_2_n_1({_Ctx, {Source, Target}}) -> RepObject = {[ - {<<"source">>, db_url(From, Source)}, - {<<"target">>, db_url(To, Target)}, + {<<"source">>, db_url(Source)}, + {<<"target">>, db_url(Target)}, {<<"create_target">>, true}, {<<"create_target_params">>, {[{<<"q">>, <<"2">>}, {<<"n">>, <<"1">>}]}} ]}, - create_db(From, Source), - create_doc(From, Source), + create_db(Source), + create_doc(Source), {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), {ok, TargetInfo} = fabric:get_db_info(Target), {ClusterInfo} = couch_util:get_value(cluster, TargetInfo), - delete_db(From, Source), - delete_db(To, Target), + delete_db(Source), + delete_db(Target), [ ?_assertEqual(2, couch_util:get_value(q, ClusterInfo)), ?_assertEqual(1, couch_util:get_value(n, ClusterInfo)) ]. -should_create_target_with_default( - {From, To}, {_Ctx1, _Ctx2, {Source, Target}}) -> +should_create_target_with_default({_Ctx, {Source, Target}}) -> RepObject = {[ - {<<"source">>, db_url(From, Source)}, - {<<"target">>, db_url(To, Target)}, + {<<"source">>, db_url(Source)}, + {<<"target">>, db_url(Target)}, {<<"create_target">>, true} ]}, - create_db(From, Source), - create_doc(From, Source), + create_db(Source), + create_doc(Source), {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), {ok, TargetInfo} = fabric:get_db_info(Target), {ClusterInfo} = couch_util:get_value(cluster, TargetInfo), Q = config:get("cluster", "q", "8"), - delete_db(From, Source), - delete_db(To, Target), + delete_db(Source), + delete_db(Target), ?_assertEqual(list_to_integer(Q), couch_util:get_value(q, ClusterInfo)). -should_not_create_target_with_q_any( - {From, To}, {_Ctx1, _Ctx2, {Source, Target}}) -> +should_not_create_target_with_q_any({_Ctx, {Source, Target}}) -> RepObject = {[ - {<<"source">>, db_url(From, Source)}, - {<<"target">>, db_url(To, Target)}, + {<<"source">>, db_url(Source)}, + {<<"target">>, db_url(Target)}, {<<"create_target">>, false}, {<<"create_target_params">>, {[{<<"q">>, <<"1">>}]}} ]}, - create_db(From, Source), - create_doc(From, Source), + create_db(Source), + create_doc(Source), {error, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), DbExist = is_list(catch mem3:shards(Target)), - delete_db(From, Source), + delete_db(Source), ?_assertEqual(false, DbExist). -create_doc(local, DbName) -> - {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]), - Body = {[{<<"foo">>, <<"bar">>}]}, - NewDoc = #doc{body = Body}, - {ok, _} = couch_db:update_doc(Db, NewDoc, []), - couch_db:close(Db); -create_doc(remote, DbName) -> +create_doc(DbName) -> Body = {[{<<"foo">>, <<"bar">>}]}, NewDoc = #doc{body = Body}, {ok, _} = fabric:update_doc(DbName, NewDoc, [?ADMIN_CTX]). -create_db(local, DbName) -> - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), - ok = couch_db:close(Db); -create_db(remote, DbName) -> +create_db(DbName) -> ok = fabric:create_db(DbName, [?ADMIN_CTX]). -delete_db(local, DbName) -> - ok = couch_server:delete(DbName, [?ADMIN_CTX]); -delete_db(remote, DbName) -> +delete_db(DbName) -> ok = fabric:delete_db(DbName, [?ADMIN_CTX]). -db_url(local, DbName) -> - DbName; -db_url(remote, DbName) -> +db_url(DbName) -> Addr = config:get("chttpd", "bind_address", "127.0.0.1"), Port = mochiweb_socket_server:get(chttpd, port), - ?l2b(io_lib:format("http://~s:~b/~s", [Addr, Port, DbName])). + ?l2b(io_lib:format("http://~s:~s@~s:~b/~s", [?USERNAME, ?PASSWORD, Addr, + Port, DbName])). -- cgit v1.2.1 From ac7a00cd9f654825e4a3bb39b7a07871598ecef6 Mon Sep 17 00:00:00 2001 From: jiangphcn Date: Thu, 30 Nov 2017 11:58:08 +0800 Subject: Make q configurable for peruser dbs issue 875 --- rel/overlay/etc/default.ini | 3 ++ rel/overlay/etc/local.ini | 3 ++ src/couch_peruser/src/couch_peruser.erl | 26 ++++++++----- src/couch_peruser/test/couch_peruser_test.erl | 54 ++++++++++++++++++++++++--- 4 files changed, 70 insertions(+), 16 deletions(-) diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini index 0b0ab75ae..c473495fe 100644 --- a/rel/overlay/etc/default.ini +++ b/rel/overlay/etc/default.ini @@ -88,6 +88,9 @@ enable = false ; If set to true and a user is deleted, the respective database gets ; deleted as well. delete_dbs = false +; Set a default q value for peruser-created databases that is different from +; cluster / q +;q = 1 [httpd] port = {{backend_port}} diff --git a/rel/overlay/etc/local.ini b/rel/overlay/etc/local.ini index cd3080ecf..6b46f0fa1 100644 --- a/rel/overlay/etc/local.ini +++ b/rel/overlay/etc/local.ini @@ -17,6 +17,9 @@ ; If set to true and a user is deleted, the respective database gets ; deleted as well. ;delete_dbs = true +; Set a default q value for peruser-created databases that is different from +; cluster / q +;q = 1 [chttpd] ;port = 5984 diff --git a/src/couch_peruser/src/couch_peruser.erl b/src/couch_peruser/src/couch_peruser.erl index 0c769324a..bbf40126c 100644 --- a/src/couch_peruser/src/couch_peruser.erl +++ b/src/couch_peruser/src/couch_peruser.erl @@ -34,7 +34,8 @@ db_name :: binary(), delete_dbs :: boolean(), changes_pid :: pid(), - changes_ref :: reference() + changes_ref :: reference(), + q_for_peruser_db :: integer() }). -record(state, { @@ -43,7 +44,8 @@ delete_dbs :: boolean(), states :: list(), mem3_cluster_pid :: pid(), - cluster_stable :: boolean() + cluster_stable :: boolean(), + q_for_peruser_db :: integer() }). -define(USERDB_PREFIX, "userdb-"). @@ -70,6 +72,8 @@ init_state() -> DbName = ?l2b(config:get( "couch_httpd_auth", "authentication_db", "_users")), DeleteDbs = config:get_boolean("couch_peruser", "delete_dbs", false), + Q = config:get_integer("couch_peruser", "q", 1), + % set up cluster-stable listener Period = abs(config:get_integer("couch_peruser", "cluster_quiet_period", @@ -85,7 +89,8 @@ init_state() -> db_name = DbName, delete_dbs = DeleteDbs, mem3_cluster_pid = Mem3Cluster, - cluster_stable = false + cluster_stable = false, + q_for_peruser_db = Q } end. @@ -95,14 +100,15 @@ start_listening(#state{states=ChangesStates}=State) when length(ChangesStates) > 0 -> % couch_log:debug("peruser: start_listening() already run on node ~p in pid ~p", [node(), self()]), State; -start_listening(#state{db_name=DbName, delete_dbs=DeleteDbs} = State) -> +start_listening(#state{db_name=DbName, delete_dbs=DeleteDbs, q_for_peruser_db = Q} = State) -> % couch_log:debug("peruser: start_listening() on node ~p", [node()]), try States = lists:map(fun (A) -> S = #changes_state{ parent = State#state.parent, db_name = A#shard.name, - delete_dbs = DeleteDbs + delete_dbs = DeleteDbs, + q_for_peruser_db = Q }, {Pid, Ref} = spawn_opt( ?MODULE, init_changes_handler, [S], [link, monitor]), @@ -138,7 +144,7 @@ init_changes_handler(#changes_state{db_name=DbName} = ChangesState) -> changes_handler( {change, {Doc}, _Prepend}, _ResType, - ChangesState=#changes_state{db_name=DbName}) -> + ChangesState=#changes_state{db_name=DbName, q_for_peruser_db = Q}) -> % couch_log:debug("peruser: changes_handler() on DbName/Doc ~p/~p", [DbName, Doc]), case couch_util:get_value(<<"id">>, Doc) of @@ -147,7 +153,7 @@ changes_handler( true -> case couch_util:get_value(<<"deleted">>, Doc, false) of false -> - UserDb = ensure_user_db(User), + UserDb = ensure_user_db(User, Q), ok = ensure_security(User, UserDb, fun add_user/3), ChangesState; true -> @@ -214,13 +220,13 @@ delete_user_db(User) -> end, UserDb. --spec ensure_user_db(User :: binary()) -> binary(). -ensure_user_db(User) -> +-spec ensure_user_db(User :: binary(), Q :: integer()) -> binary(). +ensure_user_db(User, Q) -> UserDb = user_db_name(User), try {ok, _DbInfo} = fabric:get_db_info(UserDb) catch error:database_does_not_exist -> - case fabric:create_db(UserDb, [?ADMIN_CTX]) of + case fabric:create_db(UserDb, [?ADMIN_CTX, {q, integer_to_list(Q)}]) of {error, file_exists} -> ok; ok -> ok; accepted -> ok diff --git a/src/couch_peruser/test/couch_peruser_test.erl b/src/couch_peruser/test/couch_peruser_test.erl index 2bc98af66..04ef2ea90 100644 --- a/src/couch_peruser/test/couch_peruser_test.erl +++ b/src/couch_peruser/test/couch_peruser_test.erl @@ -66,6 +66,11 @@ set_config(Section, Key, Value) -> get_base_url(), "/_config/", Section, "/", Key]), do_request(put, Url, "\"" ++ Value ++ "\""). +delete_config(Section, Key, Value) -> + Url = lists:concat([ + get_base_url(), "/_config/", Section, "/", Key]), + do_request(delete, Url, "\"" ++ Value ++ "\""). + do_request(Method, Url) -> Headers = [{basic_auth, {?ADMIN_USERNAME, ?ADMIN_PASSWORD}}], {ok, _, _, _} = test_request:request(Method, Url, Headers). @@ -141,15 +146,50 @@ get_cluster_base_url() -> "http://" ++ Addr ++ ":" ++ Port. -should_create_user_db(TestAuthDb) -> +should_create_user_db_with_default(TestAuthDb) -> + create_user(TestAuthDb, "foo"), + wait_for_db_create(<<"userdb-666f6f">>), + {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f">>), + {ClusterInfo} = couch_util:get_value(cluster, DbInfo), + [ + ?_assert(lists:member(<<"userdb-666f6f">>, all_dbs())), + ?_assertEqual(1, couch_util:get_value(q, ClusterInfo)) + ]. + +should_create_anon_user_db_with_default(TestAuthDb) -> + create_anon_user(TestAuthDb, "fooo"), + wait_for_db_create(<<"userdb-666f6f6f">>), + {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f6f">>), + {ClusterInfo} = couch_util:get_value(cluster, DbInfo), + [ + ?_assert(lists:member(<<"userdb-666f6f6f">>, all_dbs())), + ?_assertEqual(1, couch_util:get_value(q, ClusterInfo)) + ]. + +should_create_user_db_with_q4(TestAuthDb) -> + set_config("couch_peruser", "q", "4"), create_user(TestAuthDb, "foo"), wait_for_db_create(<<"userdb-666f6f">>), - ?_assert(lists:member(<<"userdb-666f6f">>, all_dbs())). + {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f">>), + {ClusterInfo} = couch_util:get_value(cluster, DbInfo), + delete_config("couch_peruser", "q", "4"), -should_create_anon_user_db(TestAuthDb) -> + [ + ?_assert(lists:member(<<"userdb-666f6f">>, all_dbs())), + ?_assertEqual(4, couch_util:get_value(q, ClusterInfo)) + ]. + +should_create_anon_user_db_with_q4(TestAuthDb) -> + set_config("couch_peruser", "q", "4"), create_anon_user(TestAuthDb, "fooo"), wait_for_db_create(<<"userdb-666f6f6f">>), - ?_assert(lists:member(<<"userdb-666f6f6f">>, all_dbs())). + {ok, TargetInfo} = fabric:get_db_info(<<"userdb-666f6f6f">>), + {ClusterInfo} = couch_util:get_value(cluster, TargetInfo), + delete_config("couch_peruser", "q", "4"), + [ + ?_assert(lists:member(<<"userdb-666f6f6f">>, all_dbs())), + ?_assertEqual(4, couch_util:get_value(q, ClusterInfo)) + ]. should_not_delete_user_db(TestAuthDb) -> User = "foo", @@ -381,8 +421,10 @@ couch_peruser_test_() -> foreach, fun setup/0, fun teardown/1, [ - fun should_create_anon_user_db/1, - fun should_create_user_db/1, + fun should_create_anon_user_db_with_default/1, + fun should_create_user_db_with_default/1, + fun should_create_user_db_with_q4/1, + fun should_create_anon_user_db_with_q4/1, fun should_not_delete_user_db/1, fun should_delete_user_db/1, fun should_reflect_config_changes/1, -- cgit v1.2.1 From 0493988a7509a4059429ff48dc93bf5ce287da1f Mon Sep 17 00:00:00 2001 From: Will Holley Date: Mon, 11 Dec 2017 13:25:53 +0000 Subject: Allow to use index with or (#1038) Since #816, mango JSON index on compound fields can be selected only if the selector make sure that all the fields listed in the index are always present. This adds a special case where all clauses of an `$or` can ensure that a field is present. For instance, if I had an index: [A, B] is_usable would now return true for the selector: { "A": "foo", "$or": { "B": "bar", "B": "baz" } } but false for: { "A": "foo", "$or": { "B": "bar", "C": "bar" } } --- src/mango/src/mango_selector.erl | 172 ++++++++++++++++++++++++++++-- src/mango/test/05-index-selection-test.py | 16 +++ 2 files changed, 181 insertions(+), 7 deletions(-) diff --git a/src/mango/src/mango_selector.erl b/src/mango/src/mango_selector.erl index 2a546c9ba..968dc3c74 100644 --- a/src/mango/src/mango_selector.erl +++ b/src/mango/src/mango_selector.erl @@ -597,14 +597,31 @@ has_required_fields_int([], Remainder) -> has_required_fields_int(Selector, RequiredFields) when not is_list(Selector) -> has_required_fields_int([Selector], RequiredFields); -% We can "see" through $and operator. We ignore other -% combination operators because they can't be used to restrict -% an index. +% We can "see" through $and operator. Iterate +% through the list of child operators. has_required_fields_int([{[{<<"$and">>, Args}]}], RequiredFields) when is_list(Args) -> has_required_fields_int(Args, RequiredFields); -% Handle $and operator where it has peers +% We can "see" through $or operator. Required fields +% must be covered by all children. +has_required_fields_int([{[{<<"$or">>, Args}]} | Rest], RequiredFields) + when is_list(Args) -> + Remainder0 = lists:foldl(fun(Arg, Acc) -> + % for each child test coverage against the full + % set of required fields + Remainder = has_required_fields_int(Arg, RequiredFields), + + % collect the remaining fields across all children + Acc ++ Remainder + end, [], Args), + + % remove duplicate fields + Remainder1 = lists:usort(Remainder0), + has_required_fields_int(Rest, Remainder1); + +% Handle $and operator where it has peers. Required fields +% can be covered by any child. has_required_fields_int([{[{<<"$and">>, Args}]} | Rest], RequiredFields) when is_list(Args) -> Remainder = has_required_fields_int(Args, RequiredFields), @@ -680,7 +697,8 @@ has_required_fields_nested_and_true_test() -> ] }]}, - ?assertEqual(true, has_required_fields(Selector, RequiredFields)). + Normalized = normalize(Selector), + ?assertEqual(true, has_required_fields(Normalized, RequiredFields)). has_required_fields_and_false_test() -> RequiredFields = [<<"A">>, <<"C">>], @@ -693,7 +711,7 @@ has_required_fields_and_false_test() -> Normalized = normalize(Selector), ?assertEqual(false, has_required_fields(Normalized, RequiredFields)). -has_required_fields_or_test() -> +has_required_fields_or_false_test() -> RequiredFields = [<<"A">>], Selector = {[{<<"$or">>, [ @@ -704,4 +722,144 @@ has_required_fields_or_test() -> Normalized = normalize(Selector), ?assertEqual(false, has_required_fields(Normalized, RequiredFields)). --endif. \ No newline at end of file +has_required_fields_or_true_test() -> + RequiredFields = [<<"A">>, <<"B">>, <<"C">>], + Selector = {[{<<"A">>, "foo"}, + {<<"$or">>, + [ + {[{<<"B">>, <<"bar">>}]}, + {[{<<"B">>, <<"baz">>}]} + ] + }, + {<<"C">>, "qux"} + ]}, + Normalized = normalize(Selector), + ?assertEqual(true, has_required_fields(Normalized, RequiredFields)). + +has_required_fields_and_nested_or_true_test() -> + RequiredFields = [<<"A">>, <<"B">>], + Selector1 = {[{<<"$and">>, + [ + {[{<<"A">>, <<"foo">>}]} + ] + }]}, + Selector2 = {[{<<"$or">>, + [ + {[{<<"B">>, <<"foo">>}]}, + {[{<<"B">>, <<"foo">>}]} + ] + }]}, + Selector = {[{<<"$and">>, + [ + Selector1, + Selector2 + ] + }]}, + Normalized = normalize(Selector), + ?assertEqual(true, has_required_fields(Normalized, RequiredFields)), + + SelectorReverse = {[{<<"$and">>, + [ + Selector2, + Selector1 + ] + }]}, + NormalizedReverse = normalize(SelectorReverse), + ?assertEqual(true, has_required_fields(NormalizedReverse, RequiredFields)). + +has_required_fields_and_nested_or_false_test() -> + RequiredFields = [<<"A">>, <<"B">>], + Selector1 = {[{<<"$and">>, + [ + {[{<<"A">>, <<"foo">>}]} + ] + }]}, + Selector2 = {[{<<"$or">>, + [ + {[{<<"A">>, <<"foo">>}]}, + {[{<<"B">>, <<"foo">>}]} + ] + }]}, + Selector = {[{<<"$and">>, + [ + Selector1, + Selector2 + ] + }]}, + Normalized = normalize(Selector), + ?assertEqual(false, has_required_fields(Normalized, RequiredFields)), + + SelectorReverse = {[{<<"$and">>, + [ + Selector2, + Selector1 + ] + }]}, + + NormalizedReverse = normalize(SelectorReverse), + ?assertEqual(false, has_required_fields(NormalizedReverse, RequiredFields)). + +has_required_fields_or_nested_and_true_test() -> + RequiredFields = [<<"A">>], + Selector1 = {[{<<"$and">>, + [ + {[{<<"A">>, <<"foo">>}]} + ] + }]}, + Selector2 = {[{<<"$and">>, + [ + {[{<<"A">>, <<"foo">>}]} + ] + }]}, + Selector = {[{<<"$or">>, + [ + Selector1, + Selector2 + ] + }]}, + Normalized = normalize(Selector), + ?assertEqual(true, has_required_fields(Normalized, RequiredFields)). + +has_required_fields_or_nested_or_true_test() -> + RequiredFields = [<<"A">>], + Selector1 = {[{<<"$or">>, + [ + {[{<<"A">>, <<"foo">>}]} + ] + }]}, + Selector2 = {[{<<"$or">>, + [ + {[{<<"A">>, <<"bar">>}]} + ] + }]}, + Selector = {[{<<"$or">>, + [ + Selector1, + Selector2 + ] + }]}, + Normalized = normalize(Selector), + ?assertEqual(true, has_required_fields(Normalized, RequiredFields)). + +has_required_fields_or_nested_or_false_test() -> + RequiredFields = [<<"A">>], + Selector1 = {[{<<"$or">>, + [ + {[{<<"A">>, <<"foo">>}]} + ] + }]}, + Selector2 = {[{<<"$or">>, + [ + {[{<<"B">>, <<"bar">>}]} + ] + }]}, + Selector = {[{<<"$or">>, + [ + Selector1, + Selector2 + ] + }]}, + Normalized = normalize(Selector), + ?assertEqual(false, has_required_fields(Normalized, RequiredFields)). + +-endif. diff --git a/src/mango/test/05-index-selection-test.py b/src/mango/test/05-index-selection-test.py index eec0bd9a6..2a40fda38 100644 --- a/src/mango/test/05-index-selection-test.py +++ b/src/mango/test/05-index-selection-test.py @@ -38,6 +38,22 @@ class IndexSelectionTests: }, explain=True) self.assertEqual(resp["index"]["type"], "json") + def test_with_or(self): + # index on ["company","manager"] + ddocid = "_design/a0c425a60cf3c3c09e3c537c9ef20059dcef9198" + + resp = self.db.find({ + "company": { + "$gt": "a", + "$lt": "z" + }, + "$or": [ + {"manager": "Foo"}, + {"manager": "Bar"} + ] + }, explain=True) + self.assertEqual(resp["index"]["ddoc"], ddocid) + def test_use_most_columns(self): # ddoc id for the age index ddocid = "_design/ad3d537c03cd7c6a43cf8dff66ef70ea54c2b40f" -- cgit v1.2.1 From 718f89db65dad1642cfbf8d5c18da8c23e93fb9a Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Mon, 11 Dec 2017 19:11:03 -0400 Subject: Multiple fixes and refactoring of couch tests. (#1062) --- src/couch/test/couch_db_tests.erl | 223 +++++++++++++++---------- src/couch/test/couchdb_cookie_domain_tests.erl | 78 ++++----- src/couch/test/global_changes_tests.erl | 2 +- 3 files changed, 169 insertions(+), 134 deletions(-) diff --git a/src/couch/test/couch_db_tests.erl b/src/couch/test/couch_db_tests.erl index c57a0d497..62e059070 100644 --- a/src/couch/test/couch_db_tests.erl +++ b/src/couch/test/couch_db_tests.erl @@ -17,25 +17,55 @@ -define(TIMEOUT, 120). -setup() -> - Ctx = test_util:start_couch(), - config:set("log", "include_sasl", "false", false), - Ctx. - create_delete_db_test_()-> { "Database create/delete tests", { setup, - fun setup/0, fun test_util:stop_couch/1, - fun(_) -> - [should_create_db(), - should_delete_db(), - should_create_multiple_dbs(), - should_delete_multiple_dbs(), - should_create_delete_database_continuously()] - end + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun() -> ?tempdb() end, + [ + fun should_create_db/1, + fun should_delete_db/1 + ] + } + } + }. + +create_delete_multiple_dbs_test_()-> + { + "Multiple database create/delete tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun() -> [?tempdb() || _ <- lists:seq(1, 6)] end, + [ + fun should_create_multiple_dbs/1, + fun should_delete_multiple_dbs/1 + ] + } + } + }. + +create_delete_database_continuously_test_() -> + { + "Continious database create/delete tests", + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreachx, + fun(_) -> ?tempdb() end, + [ + {10, fun should_create_delete_database_continuously/2}, + {100, fun should_create_delete_database_continuously/2} + ] + } } }. @@ -44,87 +74,100 @@ open_db_test_()-> "Database open tests", { setup, - fun setup/0, fun test_util:stop_couch/1, - fun(_) -> - [should_create_db_if_missing()] - end + fun test_util:start_couch/0, fun test_util:stop_couch/1, + { + foreach, + fun() -> ?tempdb() end, + [ + fun should_create_db_if_missing/1, + fun should_open_db_if_exists/1 + ] + } } }. -should_create_db() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, []), - ok = couch_db:close(Db), - {ok, AllDbs} = couch_server:all_databases(), - ?_assert(lists:member(DbName, AllDbs)). - -should_delete_db() -> - DbName = ?tempdb(), - couch_db:create(DbName, []), - couch_server:delete(DbName, []), - {ok, AllDbs} = couch_server:all_databases(), - ?_assertNot(lists:member(DbName, AllDbs)). - -should_create_multiple_dbs() -> - gen_server:call(couch_server, {set_max_dbs_open, 3}), - - DbNames = [?tempdb() || _ <- lists:seq(1, 6)], - lists:foreach(fun(DbName) -> - {ok, Db} = couch_db:create(DbName, []), - ok = couch_db:close(Db) - end, DbNames), - - {ok, AllDbs} = couch_server:all_databases(), - NumCreated = lists:foldl(fun(DbName, Acc) -> - ?assert(lists:member(DbName, AllDbs)), - Acc+1 - end, 0, DbNames), - - ?_assertEqual(NumCreated, 6). - -should_delete_multiple_dbs() -> - DbNames = [?tempdb() || _ <- lists:seq(1, 6)], - lists:foreach(fun(DbName) -> - {ok, Db} = couch_db:create(DbName, []), - ok = couch_db:close(Db) - end, DbNames), - - lists:foreach(fun(DbName) -> - ok = couch_server:delete(DbName, []) - end, DbNames), - - {ok, AllDbs} = couch_server:all_databases(), - NumDeleted = lists:foldl(fun(DbName, Acc) -> - ?assertNot(lists:member(DbName, AllDbs)), - Acc + 1 - end, 0, DbNames), - - ?_assertEqual(NumDeleted, 6). - -should_create_delete_database_continuously() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, []), - couch_db:close(Db), - [{timeout, ?TIMEOUT, {integer_to_list(N) ++ " times", - ?_assert(loop(DbName, N))}} - || N <- [10, 100]]. - -should_create_db_if_missing() -> - DbName = ?tempdb(), - {ok, Db} = couch_db:open(DbName, [{create_if_missing, true}]), +should_create_db(DbName) -> + ?_test(begin + {ok, Before} = couch_server:all_databases(), + ?assertNot(lists:member(DbName, Before)), + ?assert(create_db(DbName)), + {ok, After} = couch_server:all_databases(), + ?assert(lists:member(DbName, After)) + end). + +should_delete_db(DbName) -> + ?_test(begin + ?assert(create_db(DbName)), + {ok, Before} = couch_server:all_databases(), + ?assert(lists:member(DbName, Before)), + couch_server:delete(DbName, []), + {ok, After} = couch_server:all_databases(), + ?assertNot(lists:member(DbName, After)) + end). + +should_create_multiple_dbs(DbNames) -> + ?_test(begin + gen_server:call(couch_server, {set_max_dbs_open, 3}), + {ok, Before} = couch_server:all_databases(), + [?assertNot(lists:member(DbName, Before)) || DbName <- DbNames], + [?assert(create_db(DbName)) || DbName <- DbNames], + {ok, After} = couch_server:all_databases(), + [?assert(lists:member(DbName, After)) || DbName <- DbNames] + end). + +should_delete_multiple_dbs(DbNames) -> + ?_test(begin + [?assert(create_db(DbName)) || DbName <- DbNames], + {ok, Before} = couch_server:all_databases(), + [?assert(lists:member(DbName, Before)) || DbName <- DbNames], + [?assert(delete_db(DbName)) || DbName <- DbNames], + {ok, After} = couch_server:all_databases(), + [?assertNot(lists:member(DbName, After)) || DbName <- DbNames] + end). + +should_create_delete_database_continuously(Times, DbName) -> + {lists:flatten(io_lib:format("~b times", [Times])), + ?_test(begin + ?assert(create_db(DbName)), + lists:foreach(fun(_) -> + {timeout, ?TIMEOUT, [ + ?assert(delete_db(DbName)), + ?assert(create_db(DbName)) + ]} + end, lists:seq(1, Times)) + end)}. + +should_create_db_if_missing(DbName) -> + ?_test(begin + {ok, Before} = couch_server:all_databases(), + ?assertNot(lists:member(DbName, Before)), + {ok, Db} = couch_db:open(DbName, [{create_if_missing, true}]), + ok = couch_db:close(Db), + {ok, After} = couch_server:all_databases(), + ?assert(lists:member(DbName, After)) + end). + +should_open_db_if_exists(DbName) -> + ?_test(begin + ?assert(create_db(DbName)), + {ok, Before} = couch_server:all_databases(), + ?assert(lists:member(DbName, Before)), + {ok, Db} = couch_db:open(DbName, [{create_if_missing, true}]), + ok = couch_db:close(Db), + {ok, After} = couch_server:all_databases(), + ?assert(lists:member(DbName, After)) + end). + + +create_db(DbName) -> + create_db(DbName, []). + +create_db(DbName, Opts) -> + {ok, Db} = couch_db:create(DbName, Opts), ok = couch_db:close(Db), - {ok, AllDbs} = couch_server:all_databases(), - ?_assert(lists:member(DbName, AllDbs)). - -loop(_, 0) -> - true; -loop(DbName, N) -> - ok = cycle(DbName), - loop(DbName, N - 1). + true. -cycle(DbName) -> +delete_db(DbName) -> ok = couch_server:delete(DbName, []), - {ok, Db} = couch_db:create(DbName, []), - couch_db:close(Db), - ok. + true. diff --git a/src/couch/test/couchdb_cookie_domain_tests.erl b/src/couch/test/couchdb_cookie_domain_tests.erl index 1a9aedb93..e66ab31e6 100755 --- a/src/couch/test/couchdb_cookie_domain_tests.erl +++ b/src/couch/test/couchdb_cookie_domain_tests.erl @@ -18,60 +18,52 @@ -define(USER, "cookie_domain_test_admin"). -define(PASS, "pass"). -setup(PortType) -> +setup() -> + Ctx = test_util:start_couch([chttpd]), Hashed = couch_passwords:hash_admin_password(?PASS), ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), Addr = config:get("httpd", "bind_address", "127.0.0.1"), - lists:concat(["http://", Addr, ":", port(PortType), "/_session"]). + Port = mochiweb_socket_server:get(chttpd, port), + Url = ?l2b(io_lib:format("http://~s:~b/_session", [Addr, Port])), + ContentType = [{"Content-Type", "application/json"}], + Payload = jiffy:encode({[{name, ?l2b(?USER)}, {password, ?l2b(?PASS)}]}), + {ok, ?b2l(Url), ContentType, ?b2l(Payload), Ctx}. -teardown(_,_) -> - ok = config:delete("admins", ?USER, _Persist=false). +teardown({ok, _, _, _, Ctx}) -> + ok = config:delete("admins", ?USER, _Persist=false), + test_util:stop_couch(Ctx). cookie_test_() -> - Tests = [ - fun should_set_cookie_domain/2, - fun should_not_set_cookie_domain/2 - ], { "Cookie domain tests", { setup, - fun() -> test_util:start_couch([chttpd]) end, fun test_util:stop_couch/1, - [ - make_test_case(clustered, Tests) - ] + fun setup/0, + fun teardown/1, + fun({ok, Url, ContentType, Payload, _}) -> + [ + should_set_cookie_domain(Url, ContentType, Payload), + should_not_set_cookie_domain(Url, ContentType, Payload) + ] + end } }. -make_test_case(Mod, Funs) -> -{ - lists:flatten(io_lib:format("~s", [Mod])), - {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]} -}. +should_set_cookie_domain(Url, ContentType, Payload) -> + ?_test(begin + ok = config:set("couch_httpd_auth", "cookie_domain", + "example.com", false), + {ok, Code, Headers, _} = test_request:post(Url, ContentType, Payload), + ?assertEqual(200, Code), + Cookie = proplists:get_value("Set-Cookie", Headers), + ?assert(string:str(Cookie, "; Domain=example.com") > 0) + end). -should_set_cookie_domain(_PortType, Url) -> - ?_assertEqual(true, - begin - ok = config:set("couch_httpd_auth", "cookie_domain", "example.com", false), - {ok, Code, Headers, _} = test_request:post(Url, [{"Content-Type", "application/json"}], - "{\"name\":\"" ++ ?USER ++ "\", \"password\": \"" ++ ?PASS ++ "\"}"), - ?_assert(Code =:= 200), - Cookie = proplists:get_value("Set-Cookie", Headers), - string:str(Cookie, "; Domain=example.com") > 0 - end). - -should_not_set_cookie_domain(_PortType, Url) -> - ?_assertEqual(0, - begin - ok = config:set("couch_httpd_auth", "cookie_domain", "", false), - {ok, Code, Headers, _} = test_request:post(Url, [{"Content-Type", "application/json"}], - "{\"name\":\"" ++ ?USER ++ "\", \"password\": \"" ++ ?PASS ++ "\"}"), - ?_assert(Code =:= 200), - Cookie = proplists:get_value("Set-Cookie", Headers), - string:str(Cookie, "; Domain=") - end). - -port(clustered) -> - integer_to_list(mochiweb_socket_server:get(chttpd, port)); -port(backdoor) -> - integer_to_list(mochiweb_socket_server:get(couch_httpd, port)). +should_not_set_cookie_domain(Url, ContentType, Payload) -> + ?_test(begin + ok = config:set("couch_httpd_auth", "cookie_domain", "", false), + {ok, Code, Headers, _} = test_request:post(Url, ContentType, Payload), + ?assertEqual(200, Code), + Cookie = proplists:get_value("Set-Cookie", Headers), + ?assertEqual(0, string:str(Cookie, "; Domain=")) + end). diff --git a/src/couch/test/global_changes_tests.erl b/src/couch/test/global_changes_tests.erl index 864a6a0ec..4392aafac 100644 --- a/src/couch/test/global_changes_tests.erl +++ b/src/couch/test/global_changes_tests.erl @@ -32,7 +32,7 @@ teardown({_, DbName}) -> ok. http_create_db(Name) -> - Resp = {ok, Status, _, _} = test_request:put(db_url(Name), [?AUTH], ""), + {ok, Status, _, _} = test_request:put(db_url(Name), [?AUTH], ""), true = lists:member(Status, [201, 202]), ok. -- cgit v1.2.1 From f8e56e91b2fe68c392858c2b8770d305c3d6b3b0 Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Mon, 11 Dec 2017 16:50:30 -0500 Subject: Add coverage reports to more applications --- src/chttpd/rebar.config | 2 ++ src/couch_index/rebar.config | 2 ++ src/couch_mrview/rebar.config | 2 ++ src/fabric/rebar.config | 5 ++--- src/rexi/rebar.config | 2 ++ 5 files changed, 10 insertions(+), 3 deletions(-) create mode 100644 src/chttpd/rebar.config create mode 100644 src/couch_index/rebar.config create mode 100644 src/couch_mrview/rebar.config create mode 100644 src/rexi/rebar.config diff --git a/src/chttpd/rebar.config b/src/chttpd/rebar.config new file mode 100644 index 000000000..e0d18443b --- /dev/null +++ b/src/chttpd/rebar.config @@ -0,0 +1,2 @@ +{cover_enabled, true}. +{cover_print_enabled, true}. diff --git a/src/couch_index/rebar.config b/src/couch_index/rebar.config new file mode 100644 index 000000000..e0d18443b --- /dev/null +++ b/src/couch_index/rebar.config @@ -0,0 +1,2 @@ +{cover_enabled, true}. +{cover_print_enabled, true}. diff --git a/src/couch_mrview/rebar.config b/src/couch_mrview/rebar.config new file mode 100644 index 000000000..e0d18443b --- /dev/null +++ b/src/couch_mrview/rebar.config @@ -0,0 +1,2 @@ +{cover_enabled, true}. +{cover_print_enabled, true}. diff --git a/src/fabric/rebar.config b/src/fabric/rebar.config index df35ac639..362c8785e 100644 --- a/src/fabric/rebar.config +++ b/src/fabric/rebar.config @@ -10,6 +10,5 @@ % License for the specific language governing permissions and limitations under % the License. -{deps, [ - {meck, ".*", {git, "https://github.com/apache/couchdb-meck.git", {tag, "0.8.8"}}} -]}. +{cover_enabled, true}. +{cover_print_enabled, true}. diff --git a/src/rexi/rebar.config b/src/rexi/rebar.config new file mode 100644 index 000000000..e0d18443b --- /dev/null +++ b/src/rexi/rebar.config @@ -0,0 +1,2 @@ +{cover_enabled, true}. +{cover_print_enabled, true}. -- cgit v1.2.1 From beb87817f7dfaeb9ce2763147ddde9d6d29ffc6e Mon Sep 17 00:00:00 2001 From: Peng Hui Jiang Date: Fri, 15 Dec 2017 10:28:19 +0800 Subject: Add couch_stats tracking back to couch_log (#1064) issue #832 --- src/couch_log/src/couch_log.erl | 32 ++++++++++++++++------ src/couch_log/test/couch_log_test_util.erl | 7 +++-- src/couch_stats/src/couch_stats.app.src | 2 +- src/couch_stats/src/couch_stats.erl | 2 +- .../src/couch_stats_process_tracker.erl | 8 +++--- 5 files changed, 35 insertions(+), 16 deletions(-) diff --git a/src/couch_log/src/couch_log.erl b/src/couch_log/src/couch_log.erl index 0ce4739a4..a80d4a477 100644 --- a/src/couch_log/src/couch_log.erl +++ b/src/couch_log/src/couch_log.erl @@ -28,35 +28,51 @@ -spec debug(string(), list()) -> ok. -debug(Fmt, Args) -> log(debug, Fmt, Args). +debug(Fmt, Args) -> + couch_stats:increment_counter([couch_log, level, debug]), + log(debug, Fmt, Args). -spec info(string(), list()) -> ok. -info(Fmt, Args) -> log(info, Fmt, Args). +info(Fmt, Args) -> + couch_stats:increment_counter([couch_log, level, info]), + log(info, Fmt, Args). -spec notice(string(), list()) -> ok. -notice(Fmt, Args) -> log(notice, Fmt, Args). +notice(Fmt, Args) -> + couch_stats:increment_counter([couch_log, level, notice]), + log(notice, Fmt, Args). -spec warning(string(), list()) -> ok. -warning(Fmt, Args) -> log(warning, Fmt, Args). +warning(Fmt, Args) -> + couch_stats:increment_counter([couch_log, level, warning]), + log(warning, Fmt, Args). -spec error(string(), list()) -> ok. -error(Fmt, Args) -> log(error, Fmt, Args). +error(Fmt, Args) -> + couch_stats:increment_counter([couch_log, level, error]), + log(error, Fmt, Args). -spec critical(string(), list()) -> ok. -critical(Fmt, Args) -> log(critical, Fmt, Args). +critical(Fmt, Args) -> + couch_stats:increment_counter([couch_log, level, critical]), + log(critical, Fmt, Args). -spec alert(string(), list()) -> ok. -alert(Fmt, Args) -> log(alert, Fmt, Args). +alert(Fmt, Args) -> + couch_stats:increment_counter([couch_log, level, alert]), + log(alert, Fmt, Args). -spec emergency(string(), list()) -> ok. -emergency(Fmt, Args) -> log(emergency, Fmt, Args). +emergency(Fmt, Args) -> + couch_stats:increment_counter([couch_log, level, emergency]), + log(emergency, Fmt, Args). -spec set_level(atom() | string() | integer()) -> true. diff --git a/src/couch_log/test/couch_log_test_util.erl b/src/couch_log/test/couch_log_test_util.erl index 250366982..05d64d8a9 100644 --- a/src/couch_log/test/couch_log_test_util.erl +++ b/src/couch_log/test/couch_log_test_util.erl @@ -22,12 +22,15 @@ start() -> application:set_env(config, ini_files, config_files()), application:start(config), ignore_common_loggers(), - application:start(couch_log). + application:start(couch_log), + meck:new(couch_stats), + ok = meck:expect(couch_stats, increment_counter, ['_'], ok). stop(_) -> application:stop(config), - application:stop(couch_log). + application:stop(couch_log), + meck:unload(couch_stats). with_level(Name, Fun) -> diff --git a/src/couch_stats/src/couch_stats.app.src b/src/couch_stats/src/couch_stats.app.src index 6339a0f1d..990f8de62 100644 --- a/src/couch_stats/src/couch_stats.app.src +++ b/src/couch_stats/src/couch_stats.app.src @@ -14,7 +14,7 @@ {description, "Simple statistics collection"}, {vsn, git}, {registered, [couch_stats_aggregator, couch_stats_process_tracker]}, - {applications, [kernel, stdlib, folsom, couch_log]}, + {applications, [kernel, stdlib, folsom]}, {mod, {couch_stats_app, []}}, {env, []} ]}. diff --git a/src/couch_stats/src/couch_stats.erl b/src/couch_stats/src/couch_stats.erl index 59175f7a8..4fde14acb 100644 --- a/src/couch_stats/src/couch_stats.erl +++ b/src/couch_stats/src/couch_stats.erl @@ -119,7 +119,7 @@ notify_existing_metric(Name, Op, Type) -> try ok = folsom_metrics:notify_existing_metric(Name, Op, Type) catch _:_ -> - couch_log:notice("unknown metric: ~p", [Name]), + error_logger:error_msg("unknown metric: ~p", [Name]), {error, unknown_metric} end. diff --git a/src/couch_stats/src/couch_stats_process_tracker.erl b/src/couch_stats/src/couch_stats_process_tracker.erl index 4765734e9..7d16deb8a 100644 --- a/src/couch_stats/src/couch_stats_process_tracker.erl +++ b/src/couch_stats/src/couch_stats_process_tracker.erl @@ -48,7 +48,7 @@ init([]) -> {ok, #st{}}. handle_call(Msg, _From, State) -> - couch_log:notice("~p received unknown call ~p", [?MODULE, Msg]), + error_logger:error_msg("~p received unknown call ~p", [?MODULE, Msg]), {noreply, State}. handle_cast({track, Pid, Name}, State) -> @@ -57,13 +57,13 @@ handle_cast({track, Pid, Name}, State) -> ets:insert(?MODULE, {Ref, Name}), {noreply, State}; handle_cast(Msg, State) -> - couch_log:notice("~p received unknown cast ~p", [?MODULE, Msg]), + error_logger:error_msg("~p received unknown cast ~p", [?MODULE, Msg]), {noreply, State}. handle_info({'DOWN', Ref, _, _, _}=Msg, State) -> case ets:lookup(?MODULE, Ref) of [] -> - couch_log:notice( + error_logger:error_msg( "~p received unknown exit; message was ~p", [?MODULE, Msg] ); [{Ref, Name}] -> @@ -72,7 +72,7 @@ handle_info({'DOWN', Ref, _, _, _}=Msg, State) -> end, {noreply, State}; handle_info(Msg, State) -> - couch_log:notice("~p received unknown message ~p", [?MODULE, Msg]), + error_logger:error_msg("~p received unknown message ~p", [?MODULE, Msg]), {noreply, State}. terminate(_Reason, _State) -> -- cgit v1.2.1 From 8e0e8b3dda1b152589bd5b8909d3489d3b733b02 Mon Sep 17 00:00:00 2001 From: Geoff Cox Date: Fri, 15 Dec 2017 12:54:40 -0800 Subject: Fix haproxy stats (#1039) - `stats scope .` ends up blocking all backends - Renaming endpoint from `_stats` to `_haproxy_stats` so that it doesn't collide with CouchDB's `_stats` endpoint - Added a commented out stats auth line so that you can easily password protect --- rel/haproxy.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rel/haproxy.cfg b/rel/haproxy.cfg index 73ec987f4..45affaffe 100644 --- a/rel/haproxy.cfg +++ b/rel/haproxy.cfg @@ -30,8 +30,8 @@ defaults timeout connect 500 stats enable - stats scope . - stats uri /_stats + stats uri /_haproxy_stats + # stats auth admin:admin # Uncomment for basic auth frontend http-in # This requires HAProxy 1.5.x -- cgit v1.2.1 From 7c37e5863905185902f2e07dc8ce0d3019df34e4 Mon Sep 17 00:00:00 2001 From: Will Holley Date: Tue, 19 Dec 2017 13:04:44 +0000 Subject: Mango: change catch-all field range priority (#1069) 01252f97 introduced a "catch-all" feature to Mango that allowed queries to fall back on a full database scan (_all_docs) when no valid index was available. This worked by creating a special index range representing the full database scan. For example, a selector: { "_id": "foo" } would be translated into a field range of: [{ "startkey": "foo", "endkey": "foo"}] then prepending the catch-all field range, we would have: [ { "startkey": null, "endkey": max_json_value}, { "startkey": "foo", "endkey": "foo"} ] This set gets passed into mango_cursor_view:choose_best_index to determine most selective index and field range combination to use. Unfortunately, in the event that we have one possible index (all_docs) and multiple valid ranges, it just chooses the first range it finds - the full index scan in this case. This commit makes the catch-all field range the last available option, ensuring we use the more selective range where available. --- src/mango/src/mango_cursor_special.erl | 7 +++++-- src/mango/test/03-operator-test.py | 9 ++++++++- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/src/mango/src/mango_cursor_special.erl b/src/mango/src/mango_cursor_special.erl index 78cac7f5d..f4a760d1c 100644 --- a/src/mango/src/mango_cursor_special.erl +++ b/src/mango/src/mango_cursor_special.erl @@ -31,10 +31,13 @@ create(Db, Indexes, Selector, Opts) -> InitialRange = mango_idx_view:field_ranges(Selector), CatchAll = [{<<"_id">>, {'$gt', null, '$lt', mango_json_max}}], - FieldRanges = lists:append(CatchAll, InitialRange), + % order matters here - we only want to use the catchall index + % if no other range can fulfill the query (because we know) + % catchall is the most expensive range + FieldRanges = InitialRange ++ CatchAll, Composited = mango_cursor_view:composite_indexes(Indexes, FieldRanges), {Index, IndexRanges} = mango_cursor_view:choose_best_index(Db, Composited), - + Limit = couch_util:get_value(limit, Opts, mango_opts:default_limit()), Skip = couch_util:get_value(skip, Opts, 0), Fields = couch_util:get_value(fields, Opts, all_fields), diff --git a/src/mango/test/03-operator-test.py b/src/mango/test/03-operator-test.py index 239cc7d41..4650c7e84 100644 --- a/src/mango/test/03-operator-test.py +++ b/src/mango/test/03-operator-test.py @@ -264,6 +264,13 @@ class OperatorTextTests(mango.UserDocsTextTests, OperatorTests): class OperatorAllDocsTests(mango.UserDocsTestsNoIndexes, OperatorTests): - pass + def test_range_id_eq(self): + doc_id = "8e1c90c0-ac18-4832-8081-40d14325bde0" + r = self.db.find({ + "_id": doc_id + }, explain=True, return_raw=True) + + self.assertEqual(r["mrargs"]["end_key"], doc_id) + self.assertEqual(r["mrargs"]["start_key"], doc_id) -- cgit v1.2.1 From 3b28b84da610114626fb6c1f83e44ae874e33d8b Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Wed, 20 Dec 2017 16:44:33 -0400 Subject: Fix mango native proc crash (#1067) Don't crash on receiving cast stop When soft limit is reached couch_proc_manager evicts idle processes by casting on them `stop` message. Since mango_native_proc doesn't handle this message it results to its crash with `invalid_cast` reason. --- src/mango/src/mango_idx_text.erl | 2 +- src/mango/src/mango_native_proc.erl | 36 +++++++++++++++++++++++++++++++++++- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/src/mango/src/mango_idx_text.erl b/src/mango/src/mango_idx_text.erl index 369e2cd08..29b4441a1 100644 --- a/src/mango/src/mango_idx_text.erl +++ b/src/mango/src/mango_idx_text.erl @@ -330,7 +330,7 @@ indexable_fields(Fields, {op_not, {ExistsQuery, Arg}}) when is_tuple(Arg) -> Fields0 = indexable_fields(Fields, ExistsQuery), indexable_fields(Fields0, Arg); % forces "$exists" : false to use _all_docs -indexable_fields(Fields, {op_not, {ExistsQuery, false}}) -> +indexable_fields(_, {op_not, {_, false}}) -> []; indexable_fields(Fields, {op_insert, Arg}) when is_binary(Arg) -> diff --git a/src/mango/src/mango_native_proc.erl b/src/mango/src/mango_native_proc.erl index 61d79b7ec..7a3420193 100644 --- a/src/mango/src/mango_native_proc.erl +++ b/src/mango/src/mango_native_proc.erl @@ -113,6 +113,9 @@ handle_cast(garbage_collect, St) -> erlang:garbage_collect(), {noreply, St}; +handle_cast(stop, St) -> + {stop, normal, St}; + handle_cast(Msg, St) -> {stop, {invalid_cast, Msg}, St}. @@ -363,4 +366,35 @@ validate_index_info(IndexInfo) -> [invalid_index | Results0] end end, [], IdxTypes), - lists:member(valid_index, Results). \ No newline at end of file + lists:member(valid_index, Results). + + +-ifdef(TEST). + +-include_lib("eunit/include/eunit.hrl"). + +handle_garbage_collect_cast_test() -> + Pid = self(), + {_, TracerRef} = spawn_monitor(fun() -> + erlang:trace(Pid, true, [garbage_collection]), + receive {trace, Pid, gc_start, _} -> + erlang:trace(Pid, false, [garbage_collection]), + exit(gc_start) + end + end), + erlang:yield(), + ?assertEqual({noreply, []}, handle_cast(garbage_collect, [])), + receive + {'DOWN', TracerRef, _, _, Msg} -> ?assertEqual(gc_start, Msg) + after 1000 -> + erlang:error({assertion_failed, [{module, ?MODULE}, {line, ?LINE}, + {expected, gc_start}, {reason, timeout}]}) + end. + +handle_stop_cast_test() -> + ?assertEqual({stop, normal, []}, handle_cast(stop, [])). + +handle_invalid_cast_test() -> + ?assertEqual({stop, {invalid_cast, random}, []}, handle_cast(random, [])). + +-endif. -- cgit v1.2.1 From dd7cb4e835f03392f95f0b02e94dbfa38fdf94c6 Mon Sep 17 00:00:00 2001 From: jiangphcn Date: Wed, 20 Dec 2017 13:37:51 +0800 Subject: Refactor couch_log issue 832 --- src/couch_log/src/couch_log.erl | 33 +++++++++------------------------ 1 file changed, 9 insertions(+), 24 deletions(-) diff --git a/src/couch_log/src/couch_log.erl b/src/couch_log/src/couch_log.erl index a80d4a477..a8dc5d48d 100644 --- a/src/couch_log/src/couch_log.erl +++ b/src/couch_log/src/couch_log.erl @@ -28,51 +28,35 @@ -spec debug(string(), list()) -> ok. -debug(Fmt, Args) -> - couch_stats:increment_counter([couch_log, level, debug]), - log(debug, Fmt, Args). +debug(Fmt, Args) -> log(debug, Fmt, Args). -spec info(string(), list()) -> ok. -info(Fmt, Args) -> - couch_stats:increment_counter([couch_log, level, info]), - log(info, Fmt, Args). +info(Fmt, Args) -> log(info, Fmt, Args). -spec notice(string(), list()) -> ok. -notice(Fmt, Args) -> - couch_stats:increment_counter([couch_log, level, notice]), - log(notice, Fmt, Args). +notice(Fmt, Args) -> log(notice, Fmt, Args). -spec warning(string(), list()) -> ok. -warning(Fmt, Args) -> - couch_stats:increment_counter([couch_log, level, warning]), - log(warning, Fmt, Args). +warning(Fmt, Args) -> log(warning, Fmt, Args). -spec error(string(), list()) -> ok. -error(Fmt, Args) -> - couch_stats:increment_counter([couch_log, level, error]), - log(error, Fmt, Args). +error(Fmt, Args) -> log(error, Fmt, Args). -spec critical(string(), list()) -> ok. -critical(Fmt, Args) -> - couch_stats:increment_counter([couch_log, level, critical]), - log(critical, Fmt, Args). +critical(Fmt, Args) -> log(critical, Fmt, Args). -spec alert(string(), list()) -> ok. -alert(Fmt, Args) -> - couch_stats:increment_counter([couch_log, level, alert]), - log(alert, Fmt, Args). +alert(Fmt, Args) -> log(alert, Fmt, Args). -spec emergency(string(), list()) -> ok. -emergency(Fmt, Args) -> - couch_stats:increment_counter([couch_log, level, emergency]), - log(emergency, Fmt, Args). +emergency(Fmt, Args) -> log(emergency, Fmt, Args). -spec set_level(atom() | string() | integer()) -> true. @@ -84,6 +68,7 @@ set_level(Level) -> log(Level, Fmt, Args) -> case couch_log_util:should_log(Level) of true -> + couch_stats:increment_counter([couch_log, level, Level]), Entry = couch_log_formatter:format(Level, self(), Fmt, Args), ok = couch_log_server:log(Entry); false -> -- cgit v1.2.1 From b7911065ec52432ed5f04ad49b1e601ec34d6672 Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Thu, 4 Jan 2018 10:57:48 -0400 Subject: Cleanup data dirs in eunit_plugin before test run We use `unique_monotonic_integer` to name the test databases in unit testing. That means that on erlang R > 18 we are always creating databases with same names. When unit tests crashing or don't properly cleaning up on teardown, they are leaving behind old database files, son on a next run tests' setup fails with `database exists` exception. This fix cleans up old database files making sure we are always running unit tests from same blank state. --- rel/plugins/eunit_plugin.erl | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/rel/plugins/eunit_plugin.erl b/rel/plugins/eunit_plugin.erl index bbf83d2ec..1de20b394 100644 --- a/rel/plugins/eunit_plugin.erl +++ b/rel/plugins/eunit_plugin.erl @@ -32,8 +32,28 @@ build_eunit_config(Config0, AppFile) -> Cwd = filename:absname(rebar_utils:get_cwd()), DataDir = Cwd ++ "/tmp/data", ViewIndexDir = Cwd ++ "/tmp/data", + TmpDataDir = Cwd ++ "/tmp/tmp_data", + cleanup_dirs([DataDir, TmpDataDir]), Config1 = rebar_config:set_global(Config0, template, "setup_eunit"), Config2 = rebar_config:set_global(Config1, prefix, Cwd), Config3 = rebar_config:set_global(Config2, data_dir, DataDir), Config = rebar_config:set_global(Config3, view_index_dir, ViewIndexDir), rebar_templater:create(Config, AppFile). + + +cleanup_dirs(Dirs) -> + lists:foreach(fun(Dir) -> + case filelib:is_dir(Dir) of + true -> del_dir(Dir); + false -> ok + end + end, Dirs). + + +del_dir(Dir) -> + All = filelib:wildcard(Dir ++ "/**"), + {Dirs, Files} = lists:partition(fun filelib:is_dir/1, All), + ok = lists:foreach(fun file:delete/1, Files), + SortedDirs = lists:sort(fun(A, B) -> length(A) > length(B) end, Dirs), + ok = lists:foreach(fun file:del_dir/1, SortedDirs), + ok = file:del_dir(Dir). -- cgit v1.2.1 From f3ecd13c08de71a39074b6e579e46f5195c4817a Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Thu, 4 Jan 2018 12:56:43 -0400 Subject: Use uuid in tmp db names in unit tests This changes naming of temporary eunit databases and files from using unique_monotonic_integer to couch_uuids:random to remove performance penalty and normalize naming across erlang releases. Also getting rid of unnecessary inter-types convertion. --- src/couch/include/couch_eunit.hrl | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/couch/include/couch_eunit.hrl b/src/couch/include/couch_eunit.hrl index 8eb763aff..f4617e1d3 100644 --- a/src/couch/include/couch_eunit.hrl +++ b/src/couch/include/couch_eunit.hrl @@ -40,17 +40,14 @@ -define(tempfile, fun() -> - A = integer_to_list(couch_util:unique_monotonic_integer()), - N = node(), - FileName = lists:flatten(io_lib:format("~p-~s", [N, A])), + Suffix = couch_uuids:random(), + FileName = io_lib:format("~p-~s", [node(), Suffix]), filename:join([?TEMPDIR, FileName]) end). -define(tempdb, fun() -> - Nums = integer_to_list(couch_util:unique_monotonic_integer()), - Prefix = "eunit-test-db", - Suffix = lists:concat([integer_to_list(Num) || Num <- Nums]), - list_to_binary(Prefix ++ "-" ++ Suffix) + Suffix = couch_uuids:random(), + iolist_to_binary(["eunit-test-db-", Suffix]) end). -define(docid, fun() -> -- cgit v1.2.1 From 0414ef3e5d39de159cb6e985110773ea5522aea4 Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Thu, 4 Jan 2018 12:05:36 -0400 Subject: Make sure mango tests's recreate fun creates db Rapid same database deletion/creation is a known antipattern prone to a race condition, especially on slow VMs. This fix modifies mango test's helper function used for db recreation to ensure that we are actually starting tests when a database created and empty. --- src/mango/test/mango.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/mango/test/mango.py b/src/mango/test/mango.py index 560914b8c..ecf969e04 100644 --- a/src/mango/test/mango.py +++ b/src/mango/test/mango.py @@ -82,13 +82,15 @@ class Database(object): def recreate(self): r = self.sess.get(self.url) - db_info = r.json() - docs = db_info["doc_count"] + db_info["doc_del_count"] - if docs == 0: - # db never used - create unnecessary - return - self.delete() + if r.status_code == 200: + db_info = r.json() + docs = db_info["doc_count"] + db_info["doc_del_count"] + if docs == 0: + # db never used - create unnecessary + return + self.delete() self.create() + self.recreate() def save_doc(self, doc): self.save_docs([doc]) -- cgit v1.2.1 From 6fb357747ee3a3b0aefb1d80401a3b68c034ca24 Mon Sep 17 00:00:00 2001 From: Peng Hui Jiang Date: Sat, 6 Jan 2018 08:05:38 +0800 Subject: Return friendly error message when creating user with invalid password (#1087) * Return friendly error message when creating user with invalid password - Return friendly error message instead of returning unknown_error and function_clause when creating a use with non-string password. issue 1051 * Add check for salt issue 1051 --- src/chttpd/test/chttpd_security_tests.erl | 22 ++++++++++++++++++++++ src/couch/src/couch_passwords.erl | 20 ++++++++++++++++++-- 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/src/chttpd/test/chttpd_security_tests.erl b/src/chttpd/test/chttpd_security_tests.erl index b80238c78..737a32e11 100644 --- a/src/chttpd/test/chttpd_security_tests.erl +++ b/src/chttpd/test/chttpd_security_tests.erl @@ -102,6 +102,8 @@ all_test_() -> fun setup/0, fun teardown/1, [ fun should_allow_admin_db_compaction/1, + fun should_allow_valid_password_to_create_user/1, + fun should_disallow_invalid_password_to_create_user/1, fun should_disallow_anonymous_db_compaction/1, fun should_disallow_db_member_db_compaction/1, fun should_allow_db_admin_db_compaction/1, @@ -124,6 +126,26 @@ should_allow_admin_db_compaction([Url,_UsersUrl]) -> couch_util:get_value(<<"ok">>, InnerJson, undefined) end). + +should_allow_valid_password_to_create_user([_Url, UsersUrl]) -> + UserDoc = "{\"_id\": \"org.couchdb.user:foo\", \"name\": \"foo\", + \"type\": \"user\", \"roles\": [], \"password\": \"bar\"}", + {ok, _, _, ResultBody} = test_request:post(UsersUrl, + [?CONTENT_JSON, ?AUTH], UserDoc), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + ?_assertEqual(true, couch_util:get_value(<<"ok">>, InnerJson)). + +should_disallow_invalid_password_to_create_user([_Url, UsersUrl]) -> + UserDoc = "{\"_id\": \"org.couchdb.user:foo\", \"name\": \"foo\", + \"type\": \"user\", \"roles\": [], \"password\": 123}", + {ok, _, _, ResultBody} = test_request:post(UsersUrl, + [?CONTENT_JSON, ?AUTH], UserDoc), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + ErrType = couch_util:get_value(<<"error">>, InnerJson), + ?_assertEqual(<<"forbidden">>, ErrType). + should_disallow_anonymous_db_compaction([Url,_UsersUrl]) -> {ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact", [?CONTENT_JSON], ""), diff --git a/src/couch/src/couch_passwords.erl b/src/couch/src/couch_passwords.erl index 677ef6559..baf78f5d5 100644 --- a/src/couch/src/couch_passwords.erl +++ b/src/couch/src/couch_passwords.erl @@ -23,7 +23,13 @@ %% legacy scheme, not used for new passwords. -spec simple(binary(), binary()) -> binary(). simple(Password, Salt) when is_binary(Password), is_binary(Salt) -> - ?l2b(couch_util:to_hex(crypto:hash(sha, <>))). + ?l2b(couch_util:to_hex(crypto:hash(sha, <>))); +simple(Password, Salt) when is_binary(Salt) -> + Msg = io_lib:format("Password value of '~p' is invalid.", [Password]), + throw({forbidden, Msg}); +simple(Password, Salt) when is_binary(Password) -> + Msg = io_lib:format("Salt value of '~p' is invalid.", [Salt]), + throw({forbidden, Msg}). %% CouchDB utility functions -spec hash_admin_password(binary() | list()) -> binary(). @@ -66,7 +72,17 @@ pbkdf2(Password, Salt, Iterations) when is_binary(Password), is_integer(Iterations), Iterations > 0 -> {ok, Result} = pbkdf2(Password, Salt, Iterations, ?SHA1_OUTPUT_LENGTH), - Result. + Result; +pbkdf2(Password, Salt, Iterations) when is_binary(Salt), + is_integer(Iterations), + Iterations > 0 -> + Msg = io_lib:format("Password value of '~p' is invalid.", [Password]), + throw({forbidden, Msg}); +pbkdf2(Password, Salt, Iterations) when is_binary(Password), + is_integer(Iterations), + Iterations > 0 -> + Msg = io_lib:format("Salt value of '~p' is invalid.", [Salt]), + throw({forbidden, Msg}). -spec pbkdf2(binary(), binary(), integer(), integer()) -> {ok, binary()} | {error, derived_key_too_long}. -- cgit v1.2.1 From 65fbcd01c55d8268e384fc3b20e6d99c6003258a Mon Sep 17 00:00:00 2001 From: Will Holley Date: Tue, 9 Jan 2018 08:25:14 +0000 Subject: fallback to "selector" on empty "partial_filter_selector" (#1098) Mango text indexes historically supported partial indexes defined via a "selector" field. This was renamed to "partial_filter_selector" in b98de40 but the fallback code did not correctly handle the case where a "selector" existed alongside a "partial_filter_selector". This situation can occur when a the _index endpoint is used to create a text index with a "selector". The resulting design document contains an empty "partial_filter_selector" field *and* the "selector" field that was passed in. The previous implementation of the fallback would detect the presence of "partial_filter_selector" and use the empty value (match all docs) instead of faling back to the "selector" field. This commit changes the behaviour so that a "selector" will be used even if an empty "partial_filter_selector" is present. A secondary fix would be to change the index creation so that we never use "selector" in the underlying index design document, even if it is passed to the _index API. --- src/mango/src/mango_native_proc.erl | 15 ++++----------- src/mango/test/16-index-selectors-test.py | 10 ++++++++++ src/mango/test/mango.py | 4 +++- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/src/mango/src/mango_native_proc.erl b/src/mango/src/mango_native_proc.erl index 7a3420193..6150e1d19 100644 --- a/src/mango/src/mango_native_proc.erl +++ b/src/mango/src/mango_native_proc.erl @@ -172,19 +172,12 @@ get_text_entries({IdxProps}, Doc) -> get_index_partial_filter_selector(IdxProps) -> - case couch_util:get_value(<<"partial_filter_selector">>, IdxProps) of - undefined -> + case couch_util:get_value(<<"partial_filter_selector">>, IdxProps, {[]}) of + {[]} -> % this is to support legacy text indexes that had the partial_filter_selector % set as selector - case couch_util:get_value(<<"selector">>, IdxProps, []) of - [] -> - {[]}; - Else -> - Else - end; - [] -> - {[]}; - Else -> + couch_util:get_value(<<"selector">>, IdxProps, {[]}); + Else -> Else end. diff --git a/src/mango/test/16-index-selectors-test.py b/src/mango/test/16-index-selectors-test.py index 389f5f41e..a876dc68f 100644 --- a/src/mango/test/16-index-selectors-test.py +++ b/src/mango/test/16-index-selectors-test.py @@ -271,6 +271,16 @@ class IndexSelectorJson(mango.DbPerClass): docs = self.db.find(selector, use_index='oldschooltext') self.assertEqual(len(docs), 3) + @unittest.skipUnless(mango.has_text_service(), "requires text service") + def test_text_old_selector_still_supported_via_api(self): + selector = {"location": {"$gte": "FRA"}} + self.db.create_text_index(fields=[{"name":"location", "type":"string"}], + selector=selector, + ddoc="Selected", + name="Selected") + docs = self.db.find({"location": {"$exists":True}}, use_index='Selected') + self.assertEqual(len(docs), 3) + @unittest.skipUnless(mango.has_text_service(), "requires text service") def test_text_partial_filter_only_in_return_if_not_default(self): self.db.create_text_index(fields=[{"name":"location", "type":"string"}]) diff --git a/src/mango/test/mango.py b/src/mango/test/mango.py index ecf969e04..9b6b998cd 100644 --- a/src/mango/test/mango.py +++ b/src/mango/test/mango.py @@ -145,7 +145,7 @@ class Database(object): return created def create_text_index(self, analyzer=None, idx_type="text", - partial_filter_selector=None, default_field=None, fields=None, + partial_filter_selector=None, selector=None, default_field=None, fields=None, name=None, ddoc=None,index_array_lengths=None): body = { "index": { @@ -161,6 +161,8 @@ class Database(object): body["index"]["default_field"] = default_field if index_array_lengths is not None: body["index"]["index_array_lengths"] = index_array_lengths + if selector is not None: + body["index"]["selector"] = selector if partial_filter_selector is not None: body["index"]["partial_filter_selector"] = partial_filter_selector if fields is not None: -- cgit v1.2.1 From 730dcf795117c51939e491b04400757f8d61a9f6 Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Fri, 12 Jan 2018 01:56:21 -0500 Subject: Simplify couch_key_tree test setup Avoid starting 4 applications (config, couch_log, ioq and couch_epi) just to handle a call to config:get. Instead just mock the config directly. This also speeds up the test suite a bit. --- src/couch/test/couch_key_tree_tests.erl | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/couch/test/couch_key_tree_tests.erl b/src/couch/test/couch_key_tree_tests.erl index 8aa886fc8..88d920363 100644 --- a/src/couch/test/couch_key_tree_tests.erl +++ b/src/couch/test/couch_key_tree_tests.erl @@ -17,14 +17,18 @@ -define(DEPTH, 10). setup() -> - test_util:start(?MODULE, [], [{dont_mock, [config]}]). + meck:new(config), + meck:expect(config, get, fun(_, _, Default) -> Default end). + +teardown(_) -> + meck:unload(config). key_tree_merge_test_()-> { "Key tree merge", { setup, - fun setup/0, fun test_util:stop/1, + fun setup/0, fun teardown/1, [ should_merge_with_empty_tree(), should_merge_reflexive(), @@ -51,7 +55,7 @@ key_tree_missing_leaves_test_()-> "Missing tree leaves", { setup, - fun setup/0, fun test_util:stop/1, + fun setup/0, fun teardown/1, [ should_not_find_missing_leaves(), should_find_missing_leaves() @@ -64,7 +68,7 @@ key_tree_remove_leaves_test_()-> "Remove tree leaves", { setup, - fun setup/0, fun test_util:stop/1, + fun setup/0, fun teardown/1, [ should_have_no_effect_on_removing_no_leaves(), should_have_no_effect_on_removing_non_existant_branch(), @@ -81,7 +85,7 @@ key_tree_get_leaves_test_()-> "Leaves retrieving", { setup, - fun setup/0, fun test_util:stop/1, + fun setup/0, fun teardown/1, [ should_extract_subtree(), should_extract_subsubtree(), @@ -103,7 +107,7 @@ key_tree_leaf_counting_test_()-> "Leaf counting", { setup, - fun setup/0, fun test_util:stop/1, + fun setup/0, fun teardown/1, [ should_have_no_leaves_for_empty_tree(), should_have_single_leaf_for_tree_with_single_node(), @@ -118,7 +122,7 @@ key_tree_stemming_test_()-> "Stemming", { setup, - fun setup/0, fun test_util:stop/1, + fun setup/0, fun teardown/1, [ should_have_no_effect_for_stemming_more_levels_than_exists(), should_return_one_deepest_node(), -- cgit v1.2.1 From 1768aeab8b9100034e374a45f1bc9b7bd72dad2e Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Fri, 12 Jan 2018 10:56:18 -0400 Subject: Remove warning on `couch_epi_codegen` compile Change runtime choice between `erl_scan:token_info` and `erl_scan:line` to compilation conditional one. This is getting rid of compilation deprecation warning and also should speed up things a bit. --- src/couch_epi/rebar.config | 4 ++++ src/couch_epi/src/couch_epi_codegen.erl | 19 ++++++++++++------- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/src/couch_epi/rebar.config b/src/couch_epi/rebar.config index 82db830a2..3c7f8af73 100644 --- a/src/couch_epi/rebar.config +++ b/src/couch_epi/rebar.config @@ -1,3 +1,7 @@ {cover_enabled, true}. {cover_print_enabled, true}. + +{erl_opts, [ + {platform_define, "^R16", 'pre18'}, + {platform_define, "^17", 'pre18'}]}. diff --git a/src/couch_epi/src/couch_epi_codegen.erl b/src/couch_epi/src/couch_epi_codegen.erl index 978f0bb58..89b82a1f9 100644 --- a/src/couch_epi/src/couch_epi_codegen.erl +++ b/src/couch_epi/src/couch_epi_codegen.erl @@ -70,11 +70,16 @@ fixup_terminator(Tokens) -> Tokens ++ [{dot, Line}] end. + +-ifdef(pre18). + line(Token) -> - case erlang:function_exported(erl_scan, line, 1) of - true -> - erl_scan:line(Token); - false -> - {line, Line} = erl_scan:token_info(Token, line), - Line - end. + {line, Line} = erl_scan:token_info(Token, line), + Line. + +-else. + +line(Token) -> + erl_scan:line(Token). + +-endif. -- cgit v1.2.1 From 649b808af04720027c80390aecb6a5a01deb70b5 Mon Sep 17 00:00:00 2001 From: Edwin Fine Date: Sat, 13 Jan 2018 22:16:43 -0500 Subject: Allow override of `-args_file` and `-config` parameters (#1095) The existing `couchdb` start script hard-codes the arguments to `-args_file` and `-config`. Although it is possible to copy this script and modify it, or modify it in place, that is less than ideal and can lead to all kinds of difficulties. This PR adds the following environment variables: - `ARGS_FILE`: By default, set to the existing hard-coded value. - `SYSCONFIG_FILE`: By default, set to the existing hard-coded value. - `COUCHDB_ARGS_FILE`: If non-empty, overrides `ARGS_FILE`. - `COUCHDB_SYSCONFIG_FILE`: If non-empty, overrides `SYSCONFIG_FILE`. By changing the script to use these environment variables, it makes it easily possible to use different settings without tinkering with the pristine installed CouchDB environment. --- rel/overlay/bin/couchdb | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/rel/overlay/bin/couchdb b/rel/overlay/bin/couchdb index c82f581f4..a9e6e9bea 100755 --- a/rel/overlay/bin/couchdb +++ b/rel/overlay/bin/couchdb @@ -26,6 +26,10 @@ export BINDIR="$ROOTDIR/erts-$ERTS_VSN/bin" export EMU=beam export PROGNAME=`echo $0 | sed 's/.*\///'` +ARGS_FILE="${COUCHDB_ARGS_FILE:-$ROOTDIR/etc/vm.args}" +SYSCONFIG_FILE="${COUCHDB_SYSCONFIG_FILE:-$ROOTDIR/releases/$APP_VSN/sys.config}" + exec "$BINDIR/erlexec" -boot "$ROOTDIR/releases/$APP_VSN/couchdb" \ - -args_file "$ROOTDIR/etc/vm.args" \ - -config "$ROOTDIR/releases/$APP_VSN/sys.config" "$@" + -args_file "${ARGS_FILE}" \ + -config "${SYSCONFIG_FILE}" "$@" + -- cgit v1.2.1 From 567a16e51f4b1c41462eb10bc22f3f7ad7051a51 Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Mon, 15 Jan 2018 11:29:34 -0400 Subject: Fix couch_peruser_test Fix a random mashup of test object generators with normal assertions. --- src/couch_peruser/test/couch_peruser_test.erl | 67 ++++++++++++++++++--------- 1 file changed, 45 insertions(+), 22 deletions(-) diff --git a/src/couch_peruser/test/couch_peruser_test.erl b/src/couch_peruser/test/couch_peruser_test.erl index 04ef2ea90..1ce1964ed 100644 --- a/src/couch_peruser/test/couch_peruser_test.erl +++ b/src/couch_peruser/test/couch_peruser_test.erl @@ -196,10 +196,11 @@ should_not_delete_user_db(TestAuthDb) -> UserDbName = <<"userdb-666f6f">>, create_user(TestAuthDb, User), wait_for_db_create(<<"userdb-666f6f">>), - ?assert(lists:member(UserDbName, all_dbs())), + AfterCreate = lists:member(UserDbName, all_dbs()), delete_user(TestAuthDb, User), timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT), - ?_assert(lists:member(UserDbName, all_dbs())). + AfterDelete = lists:member(UserDbName, all_dbs()), + [?_assert(AfterCreate), ?_assert(AfterDelete)]. should_delete_user_db(TestAuthDb) -> User = "bar", @@ -207,10 +208,11 @@ should_delete_user_db(TestAuthDb) -> set_config("couch_peruser", "delete_dbs", "true"), create_user(TestAuthDb, User), wait_for_db_create(UserDbName), - ?assert(lists:member(UserDbName, all_dbs())), + AfterCreate = lists:member(UserDbName, all_dbs()), delete_user(TestAuthDb, User), wait_for_db_delete(UserDbName), - ?_assert(not lists:member(UserDbName, all_dbs())). + AfterDelete = lists:member(UserDbName, all_dbs()), + [?_assert(AfterCreate), ?_assertNot(AfterDelete)]. should_reflect_config_changes(TestAuthDb) -> User = "baz", @@ -218,28 +220,37 @@ should_reflect_config_changes(TestAuthDb) -> set_config("couch_peruser", "delete_dbs", "true"), create_user(TestAuthDb, User), wait_for_db_create(UserDbName), - ?assert(lists:member(UserDbName, all_dbs())), + AfterCreate1 = lists:member(UserDbName, all_dbs()), delete_user(TestAuthDb, User), timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT), wait_for_db_delete(UserDbName), - ?assert(not lists:member(UserDbName, all_dbs())), + AfterDelete1 = lists:member(UserDbName, all_dbs()), create_user(TestAuthDb, User), wait_for_db_create(UserDbName), - ?assert(lists:member(UserDbName, all_dbs())), + AfterCreate2 = lists:member(UserDbName, all_dbs()), set_config("couch_peruser", "delete_dbs", "false"), delete_user(TestAuthDb, User), timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT), - ?assert(lists:member(UserDbName, all_dbs())), + AfterDelete2 = lists:member(UserDbName, all_dbs()), create_user(TestAuthDb, User), wait_for_db_create(UserDbName), set_config("couch_peruser", "delete_dbs", "true"), delete_user(TestAuthDb, User), wait_for_db_delete(UserDbName), - ?assert(not lists:member(UserDbName, all_dbs())), + AfterDelete3 = lists:member(UserDbName, all_dbs()), set_config("couch_peruser", "enable", "false"), create_user(TestAuthDb, User), timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT), - ?_assert(not lists:member(UserDbName, all_dbs())). + AfterCreate3 = lists:member(UserDbName, all_dbs()), + [ + ?_assert(AfterCreate1), + ?_assertNot(AfterDelete1), + ?_assert(AfterCreate2), + ?_assert(AfterDelete2), + ?_assertNot(AfterDelete3), + ?_assertNot(AfterCreate3) + ]. + should_add_user_to_db_admins(TestAuthDb) -> User = "qux", @@ -313,18 +324,24 @@ should_remove_user_from_db_admins(TestAuthDb) -> {AdminProperties} = proplists:get_value(<<"admins">>, get_security(UserDbName)), AdminNames = proplists:get_value(<<"names">>, AdminProperties), - ?assert(lists:member(<<"foo">>, AdminNames)), - ?assert(lists:member(<<"bar">>, AdminNames)), - ?assert(lists:member(<<"qux">>, AdminNames)), + FooBefore = lists:member(<<"foo">>, AdminNames), + BarBefore = lists:member(<<"bar">>, AdminNames), + QuxBefore = lists:member(<<"qux">>, AdminNames), delete_user(TestAuthDb, User), wait_for_security_delete(<<"admins">>, User, UserDbName), {NewAdminProperties} = proplists:get_value(<<"admins">>, get_security(UserDbName)), NewAdminNames = proplists:get_value(<<"names">>, NewAdminProperties), + FooAfter = lists:member(<<"foo">>, NewAdminNames), + BarAfter = lists:member(<<"bar">>, NewAdminNames), + QuxAfter = lists:member(<<"qux">>, NewAdminNames), [ - ?_assert(lists:member(<<"foo">>, NewAdminNames)), - ?_assert(lists:member(<<"bar">>, NewAdminNames)), - ?_assert(not lists:member(<<"qux">>, NewAdminNames)) + ?_assert(FooBefore), + ?_assert(BarBefore), + ?_assert(QuxBefore), + ?_assert(FooAfter), + ?_assert(BarAfter), + ?_assertNot(QuxAfter) ]. should_remove_user_from_db_members(TestAuthDb) -> @@ -341,18 +358,24 @@ should_remove_user_from_db_members(TestAuthDb) -> {MemberProperties} = proplists:get_value(<<"members">>, get_security(UserDbName)), MemberNames = proplists:get_value(<<"names">>, MemberProperties), - ?assert(lists:member(<<"pow">>, MemberNames)), - ?assert(lists:member(<<"wow">>, MemberNames)), - ?assert(lists:member(<<"qux">>, MemberNames)), + PowBefore = lists:member(<<"pow">>, MemberNames), + WowBefore = lists:member(<<"wow">>, MemberNames), + QuxBefore = lists:member(<<"qux">>, MemberNames), delete_user(TestAuthDb, User), wait_for_security_delete(<<"members">>, User, UserDbName), {NewMemberProperties} = proplists:get_value(<<"members">>, get_security(UserDbName)), NewMemberNames = proplists:get_value(<<"names">>, NewMemberProperties), + PowAfter = lists:member(<<"pow">>, NewMemberNames), + WowAfter = lists:member(<<"wow">>, NewMemberNames), + QuxAfter = lists:member(<<"qux">>, NewMemberNames), [ - ?_assert(lists:member(<<"pow">>, NewMemberNames)), - ?_assert(lists:member(<<"wow">>, NewMemberNames)), - ?_assert(not lists:member(<<"qux">>, NewMemberNames)) + ?_assert(PowBefore), + ?_assert(WowBefore), + ?_assert(QuxBefore), + ?_assert(PowAfter), + ?_assert(WowAfter), + ?_assertNot(QuxAfter) ]. % infinite loop waiting for a db to be created, either this returns true -- cgit v1.2.1 From ba82c4e31d967c6f157392e264986bf48e146967 Mon Sep 17 00:00:00 2001 From: jiangphcn Date: Wed, 3 Jan 2018 17:58:54 +0800 Subject: Return null for update_seq and offset if update_seq is true issue 969 --- src/chttpd/test/chttpd_db_test.erl | 33 ++++++++++++++++++++++++++++++++- src/fabric/src/fabric_view_all_docs.erl | 10 ++++++++-- 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/src/chttpd/test/chttpd_db_test.erl b/src/chttpd/test/chttpd_db_test.erl index f3c779bd3..a83e33acc 100644 --- a/src/chttpd/test/chttpd_db_test.erl +++ b/src/chttpd/test/chttpd_db_test.erl @@ -20,6 +20,7 @@ -define(AUTH, {basic_auth, {?USER, ?PASS}}). -define(CONTENT_JSON, {"Content-Type", "application/json"}). -define(FIXTURE_TXT, ?ABS_PATH(?FILE)). +-define(i2l(I), integer_to_list(I)). setup() -> Hashed = couch_passwords:hash_admin_password(?PASS), @@ -62,7 +63,9 @@ all_test_() -> fun should_return_404_for_delete_att_on_notadoc/1, fun should_return_409_for_del_att_without_rev/1, fun should_return_200_for_del_att_with_rev/1, - fun should_return_409_for_put_att_nonexistent_rev/1 + fun should_return_409_for_put_att_nonexistent_rev/1, + fun should_return_update_seq_when_set_on_all_docs/1, + fun should_not_return_update_seq_when_unset_on_all_docs/1 ] } } @@ -187,6 +190,34 @@ should_return_409_for_put_att_nonexistent_rev(Url) -> end). +should_return_update_seq_when_set_on_all_docs(Url) -> + ?_test(begin + [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)], + {ok, RC, _, RespBody} = test_request:get(Url ++ "/_all_docs/" + ++ "?update_seq=true&keys=[\"testdoc1\"]",[?CONTENT_JSON, ?AUTH]), + ?assertEqual(200, RC), + {ResultJson} = ?JSON_DECODE(RespBody), + ?assertNotEqual(undefined, + couch_util:get_value(<<"update_seq">>, ResultJson)), + ?assertNotEqual(undefined, + couch_util:get_value(<<"offset">>, ResultJson)) + end). + + +should_not_return_update_seq_when_unset_on_all_docs(Url) -> + ?_test(begin + [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)], + {ok, RC, _, RespBody} = test_request:get(Url ++ "/_all_docs/" + ++ "?update_seq=false&keys=[\"testdoc1\"]",[?CONTENT_JSON, ?AUTH]), + ?assertEqual(200, RC), + {ResultJson} = ?JSON_DECODE(RespBody), + ?assertEqual(undefined, + couch_util:get_value(<<"update_seq">>, ResultJson)), + ?assertNotEqual(undefined, + couch_util:get_value(<<"offset">>, ResultJson)) + end). + + attachment_doc() -> {ok, Data} = file:read_file(?FIXTURE_TXT), {[ diff --git a/src/fabric/src/fabric_view_all_docs.erl b/src/fabric/src/fabric_view_all_docs.erl index de21dde08..ac16dac52 100644 --- a/src/fabric/src/fabric_view_all_docs.erl +++ b/src/fabric/src/fabric_view_all_docs.erl @@ -59,7 +59,8 @@ go(DbName, Options, QueryArgs, Callback, Acc0) -> conflicts = Conflicts, skip = Skip, keys = Keys0, - extra = Extra + extra = Extra, + update_seq = UpdateSeq } = QueryArgs, DocOptions1 = case Conflicts of true -> [conflicts|DocOptions0]; @@ -97,7 +98,12 @@ go(DbName, Options, QueryArgs, Callback, Acc0) -> end, case Resp of {ok, TotalRows} -> - {ok, Acc1} = Callback({meta, [{total, TotalRows}]}, Acc0), + Meta = case UpdateSeq of + false -> [{total, TotalRows}, {offset, null}]; + true -> + [{total, TotalRows}, {offset, null}, {update_seq, null}] + end, + {ok, Acc1} = Callback({meta, Meta}, Acc0), {ok, Acc2} = doc_receive_loop( Keys3, queue:new(), SpawnFun, MaxJobs, Callback, Acc1 ), -- cgit v1.2.1 From b43c401f6e15676dd1514628132898c67b1763c0 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 17 Jan 2018 13:19:57 -0500 Subject: Create all needed directories to build docs (#1115) --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 248dddc58..e95d04419 100644 --- a/Makefile +++ b/Makefile @@ -276,6 +276,7 @@ ifeq ($(IN_RELEASE), true) @cp -R share/docs/html/* rel/couchdb/share/www/docs/ @cp share/docs/man/apachecouchdb.1 rel/couchdb/share/docs/couchdb.1 else + @mkdir -p rel/couchdb/share/www/docs/ @mkdir -p rel/couchdb/share/docs/ @cp -R src/docs/build/html/ rel/couchdb/share/www/docs @cp src/docs/build/man/apachecouchdb.1 rel/couchdb/share/docs/couchdb.1 -- cgit v1.2.1 From 0fd950905a76f3eaab685fc361bd3f38471761d5 Mon Sep 17 00:00:00 2001 From: jiangphcn Date: Thu, 30 Nov 2017 17:52:08 +0800 Subject: Add support for queries in /{db}/_all_docs POST Fixes #820 --- src/chttpd/src/chttpd_db.erl | 42 ++++++++++++--- src/chttpd/test/chttpd_db_test.erl | 87 +++++++++++++++++++++++++++++- src/couch_mrview/src/couch_mrview_util.erl | 4 +- test/javascript/tests/basics.js | 2 +- test/javascript/tests/view_errors.js | 2 +- 5 files changed, 124 insertions(+), 13 deletions(-) diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl index dbbb454cb..e621d657a 100644 --- a/src/chttpd/src/chttpd_db.erl +++ b/src/chttpd/src/chttpd_db.erl @@ -520,14 +520,18 @@ db_req(#httpd{method='GET',path_parts=[_,OP]}=Req, Db) when ?IS_ALL_DOCS(OP) -> db_req(#httpd{method='POST',path_parts=[_,OP]}=Req, Db) when ?IS_ALL_DOCS(OP) -> chttpd:validate_ctype(Req, "application/json"), - {Fields} = chttpd:json_body_obj(Req), - case couch_util:get_value(<<"keys">>, Fields, nil) of - Keys when is_list(Keys) -> - all_docs_view(Req, Db, Keys, OP); - nil -> - all_docs_view(Req, Db, undefined, OP); - _ -> - throw({bad_request, "`keys` body member must be an array."}) + Props = chttpd:json_body_obj(Req), + Keys = couch_mrview_util:get_view_keys(Props), + Queries = couch_mrview_util:get_view_queries(Props), + case {Queries, Keys} of + {Queries, undefined} when is_list(Queries) -> + multi_all_docs_view(Req, Db, OP, Queries); + {undefined, Keys} when is_list(Keys) -> + all_docs_view(Req, Db, Keys, OP); + {undefined, undefined} -> + all_docs_view(Req, Db, undefined, OP); + {_, _} -> + throw({bad_request, "`keys` and `queries` are mutually exclusive"}) end; db_req(#httpd{path_parts=[_,OP]}=Req, _Db) when ?IS_ALL_DOCS(OP) -> @@ -636,6 +640,28 @@ db_req(#httpd{path_parts=[_, DocId]}=Req, Db) -> db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) -> db_attachment_req(Req, Db, DocId, FileNameParts). +multi_all_docs_view(Req, Db, OP, Queries) -> + Args0 = couch_mrview_http:parse_params(Req, undefined), + Args1 = Args0#mrargs{view_type=map}, + ArgQueries = lists:map(fun({Query}) -> + QueryArg1 = couch_mrview_http:parse_params(Query, undefined, + Args1, [decoded]), + QueryArgs2 = couch_mrview_util:validate_args(QueryArg1), + set_namespace(OP, QueryArgs2) + end, Queries), + Options = [{user_ctx, Req#httpd.user_ctx}], + VAcc0 = #vacc{db=Db, req=Req, prepend="\r\n"}, + FirstChunk = "{\"results\":[", + {ok, Resp0} = chttpd:start_delayed_json_response(VAcc0#vacc.req, 200, [], FirstChunk), + VAcc1 = VAcc0#vacc{resp=Resp0}, + VAcc2 = lists:foldl(fun(Args, Acc0) -> + {ok, Acc1} = fabric:all_docs(Db, Options, + fun couch_mrview_http:view_cb/2, Acc0, Args), + Acc1 + end, VAcc1, ArgQueries), + {ok, Resp1} = chttpd:send_delayed_chunk(VAcc2#vacc.resp, "\r\n]}"), + chttpd:end_delayed_json_response(Resp1). + all_docs_view(Req, Db, Keys, OP) -> Args0 = couch_mrview_http:parse_params(Req, Keys), Args1 = Args0#mrargs{view_type=map}, diff --git a/src/chttpd/test/chttpd_db_test.erl b/src/chttpd/test/chttpd_db_test.erl index a83e33acc..f6732939c 100644 --- a/src/chttpd/test/chttpd_db_test.erl +++ b/src/chttpd/test/chttpd_db_test.erl @@ -65,7 +65,12 @@ all_test_() -> fun should_return_200_for_del_att_with_rev/1, fun should_return_409_for_put_att_nonexistent_rev/1, fun should_return_update_seq_when_set_on_all_docs/1, - fun should_not_return_update_seq_when_unset_on_all_docs/1 + fun should_not_return_update_seq_when_unset_on_all_docs/1, + fun should_succeed_on_all_docs_with_queries_keys/1, + fun should_succeed_on_all_docs_with_queries_limit_skip/1, + fun should_succeed_on_all_docs_with_multiple_queries/1, + fun should_succeed_on_design_docs_with_multiple_queries/1, + fun should_fail_on_multiple_queries_with_keys_and_queries/1 ] } } @@ -218,6 +223,86 @@ should_not_return_update_seq_when_unset_on_all_docs(Url) -> end). +should_succeed_on_all_docs_with_queries_keys(Url) -> + ?_test(begin + [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], + QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]}]}", + {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/", + [?CONTENT_JSON, ?AUTH], QueryDoc), + ?assertEqual(200, RC), + {ResultJson} = ?JSON_DECODE(RespBody), + ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), + {InnerJson} = lists:nth(1, ResultJsonBody), + ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson))) + end). + + +should_succeed_on_all_docs_with_queries_limit_skip(Url) -> + ?_test(begin + [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], + QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}", + {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/", + [?CONTENT_JSON, ?AUTH], QueryDoc), + ?assertEqual(200, RC), + {ResultJson} = ?JSON_DECODE(RespBody), + ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), + {InnerJson} = lists:nth(1, ResultJsonBody), + ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)), + ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson))) + end). + + +should_succeed_on_all_docs_with_multiple_queries(Url) -> + ?_test(begin + [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], + QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]}, + {\"limit\": 5, \"skip\": 2}]}", + {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/", + [?CONTENT_JSON, ?AUTH], QueryDoc), + ?assertEqual(200, RC), + {ResultJson} = ?JSON_DECODE(RespBody), + ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), + {InnerJson1} = lists:nth(1, ResultJsonBody), + ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))), + {InnerJson2} = lists:nth(2, ResultJsonBody), + ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)), + ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2))) + end). + + +should_succeed_on_design_docs_with_multiple_queries(Url) -> + ?_test(begin + [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], + QueryDoc = "{\"queries\": [{\"keys\": [ \"_design/ddoc3\", + \"_design/ddoc8\"]}, {\"limit\": 5, \"skip\": 2}]}", + {ok, RC, _, RespBody} = test_request:post(Url ++ "/_design_docs/", + [?CONTENT_JSON, ?AUTH], QueryDoc), + ?assertEqual(200, RC), + {ResultJson} = ?JSON_DECODE(RespBody), + ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), + {InnerJson1} = lists:nth(1, ResultJsonBody), + ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))), + {InnerJson2} = lists:nth(2, ResultJsonBody), + ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)), + ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2))) + end). + + +should_fail_on_multiple_queries_with_keys_and_queries(Url) -> + ?_test(begin + [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], + QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]}], + \"keys\": [ \"testdoc4\", \"testdoc9\"]}", + {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/", + [?CONTENT_JSON, ?AUTH], QueryDoc), + ?assertEqual(400, RC), + ?assertMatch({[ + {<<"error">>,<<"bad_request">>}, + {<<"reason">>,<<"`keys` and `queries` are mutually exclusive">>}]}, + ?JSON_DECODE(RespBody)) + end). + + attachment_doc() -> {ok, Data} = file:read_file(?FIXTURE_TXT), {[ diff --git a/src/couch_mrview/src/couch_mrview_util.erl b/src/couch_mrview/src/couch_mrview_util.erl index d26df94f2..bc6686b8a 100644 --- a/src/couch_mrview/src/couch_mrview_util.erl +++ b/src/couch_mrview/src/couch_mrview_util.erl @@ -1161,7 +1161,7 @@ get_view_keys({Props}) -> Keys when is_list(Keys) -> Keys; _ -> - throw({bad_request, "`keys` member must be a array."}) + throw({bad_request, "`keys` member must be an array."}) end. @@ -1172,7 +1172,7 @@ get_view_queries({Props}) -> Queries when is_list(Queries) -> Queries; _ -> - throw({bad_request, "`queries` member must be a array."}) + throw({bad_request, "`queries` member must be an array."}) end. diff --git a/test/javascript/tests/basics.js b/test/javascript/tests/basics.js index a36b3035d..79599516d 100644 --- a/test/javascript/tests/basics.js +++ b/test/javascript/tests/basics.js @@ -268,7 +268,7 @@ couchTests.basics = function(debug) { T(xhr.status == 400); result = JSON.parse(xhr.responseText); T(result.error == "bad_request"); - T(result.reason == "`keys` body member must be an array."); + T(result.reason == "`keys` member must be an array."); // oops, the doc id got lost in code nirwana xhr = CouchDB.request("DELETE", "/" + db_name + "/?rev=foobarbaz"); diff --git a/test/javascript/tests/view_errors.js b/test/javascript/tests/view_errors.js index dd60292a3..f135b749a 100644 --- a/test/javascript/tests/view_errors.js +++ b/test/javascript/tests/view_errors.js @@ -169,7 +169,7 @@ couchTests.view_errors = function(debug) { T(xhr.status == 400); result = JSON.parse(xhr.responseText); T(result.error == "bad_request"); - T(result.reason == "`keys` member must be a array."); + T(result.reason == "`keys` member must be an array."); // if the reduce grows to fast, throw an overflow error var path = "/" + db_name + "/_design/testbig/_view/reduce_too_big"; -- cgit v1.2.1 From 91f5985e224db1936c41d6bf31eddba27f90bf98 Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Mon, 22 Jan 2018 16:34:26 -0400 Subject: Set eunit timeout on a whole test object --- src/couch/test/couch_db_tests.erl | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/couch/test/couch_db_tests.erl b/src/couch/test/couch_db_tests.erl index 62e059070..d64f7c640 100644 --- a/src/couch/test/couch_db_tests.erl +++ b/src/couch/test/couch_db_tests.erl @@ -128,15 +128,13 @@ should_delete_multiple_dbs(DbNames) -> should_create_delete_database_continuously(Times, DbName) -> {lists:flatten(io_lib:format("~b times", [Times])), - ?_test(begin + {timeout, ?TIMEOUT, ?_test(begin ?assert(create_db(DbName)), lists:foreach(fun(_) -> - {timeout, ?TIMEOUT, [ - ?assert(delete_db(DbName)), - ?assert(create_db(DbName)) - ]} + ?assert(delete_db(DbName)), + ?assert(create_db(DbName)) end, lists:seq(1, Times)) - end)}. + end)}}. should_create_db_if_missing(DbName) -> ?_test(begin -- cgit v1.2.1 From d16f2db901c9b3b24c7189acfec35ec42895bd25 Mon Sep 17 00:00:00 2001 From: jiangphcn Date: Fri, 15 Dec 2017 15:07:04 +0800 Subject: Make peruser database prefix configurable Fixes #876 --- rel/overlay/etc/default.ini | 4 ++ src/couch_peruser/src/couch_peruser.erl | 52 ++++++++++++-------- src/couch_peruser/test/couch_peruser_test.erl | 68 +++++++++++++++++++++++++++ 3 files changed, 105 insertions(+), 19 deletions(-) diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini index c473495fe..7e429f624 100644 --- a/rel/overlay/etc/default.ini +++ b/rel/overlay/etc/default.ini @@ -91,6 +91,10 @@ delete_dbs = false ; Set a default q value for peruser-created databases that is different from ; cluster / q ;q = 1 +; prefix for user databases. If you change this after user dbs have been +; created, the existing databases won’t get deleted if the associated user +; gets deleted because of the then prefix mismatch. +database_prefix = userdb- [httpd] port = {{backend_port}} diff --git a/src/couch_peruser/src/couch_peruser.erl b/src/couch_peruser/src/couch_peruser.erl index bbf40126c..886fb4f6e 100644 --- a/src/couch_peruser/src/couch_peruser.erl +++ b/src/couch_peruser/src/couch_peruser.erl @@ -35,7 +35,8 @@ delete_dbs :: boolean(), changes_pid :: pid(), changes_ref :: reference(), - q_for_peruser_db :: integer() + q_for_peruser_db :: integer(), + peruser_dbname_prefix :: binary() }). -record(state, { @@ -45,10 +46,11 @@ states :: list(), mem3_cluster_pid :: pid(), cluster_stable :: boolean(), - q_for_peruser_db :: integer() + q_for_peruser_db :: integer(), + peruser_dbname_prefix :: binary() }). --define(USERDB_PREFIX, "userdb-"). +-define(DEFAULT_USERDB_PREFIX, "userdb-"). -define(RELISTEN_DELAY, 5000). -define(DEFAULT_QUIET_PERIOD, 60). % seconds -define(DEFAULT_START_PERIOD, 5). % seconds @@ -73,6 +75,14 @@ init_state() -> "couch_httpd_auth", "authentication_db", "_users")), DeleteDbs = config:get_boolean("couch_peruser", "delete_dbs", false), Q = config:get_integer("couch_peruser", "q", 1), + Prefix = config:get("couch_peruser", "database_prefix", ?DEFAULT_USERDB_PREFIX), + case couch_db:validate_dbname(Prefix) of + ok -> ok; + Error -> + couch_log:error("couch_peruser can't proceed as illegal database prefix ~p. + Error: ~p", [Prefix, Error]), + throw(Error) + end, % set up cluster-stable listener @@ -90,7 +100,8 @@ init_state() -> delete_dbs = DeleteDbs, mem3_cluster_pid = Mem3Cluster, cluster_stable = false, - q_for_peruser_db = Q + q_for_peruser_db = Q, + peruser_dbname_prefix = ?l2b(Prefix) } end. @@ -100,7 +111,8 @@ start_listening(#state{states=ChangesStates}=State) when length(ChangesStates) > 0 -> % couch_log:debug("peruser: start_listening() already run on node ~p in pid ~p", [node(), self()]), State; -start_listening(#state{db_name=DbName, delete_dbs=DeleteDbs, q_for_peruser_db = Q} = State) -> +start_listening(#state{db_name=DbName, delete_dbs=DeleteDbs, + q_for_peruser_db = Q, peruser_dbname_prefix = Prefix} = State) -> % couch_log:debug("peruser: start_listening() on node ~p", [node()]), try States = lists:map(fun (A) -> @@ -108,7 +120,8 @@ start_listening(#state{db_name=DbName, delete_dbs=DeleteDbs, q_for_peruser_db = parent = State#state.parent, db_name = A#shard.name, delete_dbs = DeleteDbs, - q_for_peruser_db = Q + q_for_peruser_db = Q, + peruser_dbname_prefix = Prefix }, {Pid, Ref} = spawn_opt( ?MODULE, init_changes_handler, [S], [link, monitor]), @@ -144,7 +157,8 @@ init_changes_handler(#changes_state{db_name=DbName} = ChangesState) -> changes_handler( {change, {Doc}, _Prepend}, _ResType, - ChangesState=#changes_state{db_name=DbName, q_for_peruser_db = Q}) -> + ChangesState=#changes_state{db_name=DbName, q_for_peruser_db = Q, + peruser_dbname_prefix = Prefix}) -> % couch_log:debug("peruser: changes_handler() on DbName/Doc ~p/~p", [DbName, Doc]), case couch_util:get_value(<<"id">>, Doc) of @@ -153,16 +167,16 @@ changes_handler( true -> case couch_util:get_value(<<"deleted">>, Doc, false) of false -> - UserDb = ensure_user_db(User, Q), + UserDb = ensure_user_db(Prefix, User, Q), ok = ensure_security(User, UserDb, fun add_user/3), ChangesState; true -> case ChangesState#changes_state.delete_dbs of true -> - _UserDb = delete_user_db(User), + _UserDb = delete_user_db(Prefix, User), ChangesState; false -> - UserDb = user_db_name(User), + UserDb = user_db_name(Prefix, User), ok = ensure_security(User, UserDb, fun remove_user/3), ChangesState end @@ -207,9 +221,9 @@ should_handle_doc_int(ShardName, DocId) -> false end. --spec delete_user_db(User :: binary()) -> binary(). -delete_user_db(User) -> - UserDb = user_db_name(User), +-spec delete_user_db(Prefix:: binary(), User :: binary()) -> binary(). +delete_user_db(Prefix, User) -> + UserDb = user_db_name(Prefix, User), try case fabric:delete_db(UserDb, [?ADMIN_CTX]) of ok -> ok; @@ -220,9 +234,9 @@ delete_user_db(User) -> end, UserDb. --spec ensure_user_db(User :: binary(), Q :: integer()) -> binary(). -ensure_user_db(User, Q) -> - UserDb = user_db_name(User), +-spec ensure_user_db(Prefix:: binary(), User :: binary(), Q :: integer()) -> binary(). +ensure_user_db(Prefix, User, Q) -> + UserDb = user_db_name(Prefix, User), try {ok, _DbInfo} = fabric:get_db_info(UserDb) catch error:database_does_not_exist -> @@ -300,11 +314,11 @@ ensure_security(User, UserDb, TransformFun) -> end end. --spec user_db_name(User :: binary()) -> binary(). -user_db_name(User) -> +-spec user_db_name(Prefix :: binary(), User :: binary()) -> binary(). +user_db_name(Prefix, User) -> HexUser = list_to_binary( [string:to_lower(integer_to_list(X, 16)) || <> <= User]), - <>. + <>. -spec exit_changes(State :: #state{}) -> ok. exit_changes(State) -> diff --git a/src/couch_peruser/test/couch_peruser_test.erl b/src/couch_peruser/test/couch_peruser_test.erl index 1ce1964ed..f6ef88f0b 100644 --- a/src/couch_peruser/test/couch_peruser_test.erl +++ b/src/couch_peruser/test/couch_peruser_test.erl @@ -156,6 +156,20 @@ should_create_user_db_with_default(TestAuthDb) -> ?_assertEqual(1, couch_util:get_value(q, ClusterInfo)) ]. +should_create_user_db_with_custom_prefix(TestAuthDb) -> + set_config("couch_peruser", "database_prefix", "newuserdb-"), + create_user(TestAuthDb, "fooo"), + wait_for_db_create(<<"newuserdb-666f6f6f">>), + delete_config("couch_peruser", "database_prefix", "newuserdb-"), + ?_assert(lists:member(<<"newuserdb-666f6f6f">>, all_dbs())). + +should_create_user_db_with_custom_special_prefix(TestAuthDb) -> + set_config("couch_peruser", "database_prefix", "userdb_$()+--/"), + create_user(TestAuthDb, "fooo"), + wait_for_db_create(<<"userdb_$()+--/666f6f6f">>), + delete_config("couch_peruser", "database_prefix", "userdb_$()+--/"), + ?_assert(lists:member(<<"userdb_$()+--/666f6f6f">>, all_dbs())). + should_create_anon_user_db_with_default(TestAuthDb) -> create_anon_user(TestAuthDb, "fooo"), wait_for_db_create(<<"userdb-666f6f6f">>), @@ -166,6 +180,20 @@ should_create_anon_user_db_with_default(TestAuthDb) -> ?_assertEqual(1, couch_util:get_value(q, ClusterInfo)) ]. +should_create_anon_user_db_with_custom_prefix(TestAuthDb) -> + set_config("couch_peruser", "database_prefix", "newuserdb-"), + create_anon_user(TestAuthDb, "fooo"), + wait_for_db_create(<<"newuserdb-666f6f6f">>), + delete_config("couch_peruser", "database_prefix", "newuserdb-"), + ?_assert(lists:member(<<"newuserdb-666f6f6f">>, all_dbs())). + +should_create_anon_user_db_with_custom_special_prefix(TestAuthDb) -> + set_config("couch_peruser", "database_prefix", "userdb_$()+--/"), + create_anon_user(TestAuthDb, "fooo"), + wait_for_db_create(<<"userdb_$()+--/666f6f6f">>), + delete_config("couch_peruser", "database_prefix", "userdb_$()+--/"), + ?_assert(lists:member(<<"userdb_$()+--/666f6f6f">>, all_dbs())). + should_create_user_db_with_q4(TestAuthDb) -> set_config("couch_peruser", "q", "4"), create_user(TestAuthDb, "foo"), @@ -214,6 +242,40 @@ should_delete_user_db(TestAuthDb) -> AfterDelete = lists:member(UserDbName, all_dbs()), [?_assert(AfterCreate), ?_assertNot(AfterDelete)]. +should_delete_user_db_with_custom_prefix(TestAuthDb) -> + User = "bar", + UserDbName = <<"newuserdb-626172">>, + set_config("couch_peruser", "delete_dbs", "true"), + set_config("couch_peruser", "database_prefix", "newuserdb-"), + create_user(TestAuthDb, User), + wait_for_db_create(UserDbName), + AfterCreate = lists:member(UserDbName, all_dbs()), + delete_user(TestAuthDb, User), + wait_for_db_delete(UserDbName), + delete_config("couch_peruser", "database_prefix", "newuserdb-"), + AfterDelete = lists:member(UserDbName, all_dbs()), + [ + ?_assert(AfterCreate), + ?_assertNot(AfterDelete) + ]. + +should_delete_user_db_with_custom_special_prefix(TestAuthDb) -> + User = "bar", + UserDbName = <<"userdb_$()+--/626172">>, + set_config("couch_peruser", "delete_dbs", "true"), + set_config("couch_peruser", "database_prefix", "userdb_$()+--/"), + create_user(TestAuthDb, User), + wait_for_db_create(UserDbName), + AfterCreate = lists:member(UserDbName, all_dbs()), + delete_user(TestAuthDb, User), + wait_for_db_delete(UserDbName), + delete_config("couch_peruser", "database_prefix", "userdb_$()+--/"), + AfterDelete = lists:member(UserDbName, all_dbs()), + [ + ?_assert(AfterCreate), + ?_assertNot(AfterDelete) + ]. + should_reflect_config_changes(TestAuthDb) -> User = "baz", UserDbName = <<"userdb-62617a">>, @@ -445,11 +507,17 @@ couch_peruser_test_() -> fun setup/0, fun teardown/1, [ fun should_create_anon_user_db_with_default/1, + fun should_create_anon_user_db_with_custom_prefix/1, + fun should_create_anon_user_db_with_custom_special_prefix/1, fun should_create_user_db_with_default/1, + fun should_create_user_db_with_custom_prefix/1, + fun should_create_user_db_with_custom_special_prefix/1, fun should_create_user_db_with_q4/1, fun should_create_anon_user_db_with_q4/1, fun should_not_delete_user_db/1, fun should_delete_user_db/1, + fun should_delete_user_db_with_custom_prefix/1, + fun should_delete_user_db_with_custom_special_prefix/1, fun should_reflect_config_changes/1, fun should_add_user_to_db_admins/1, fun should_add_user_to_db_members/1, -- cgit v1.2.1 From c3bc95697bc841e6bd898930d3df7ddb1452f75b Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 24 Jan 2018 13:44:36 -0600 Subject: Remove outdated docker targets and docs (#1109) We removed the Dockerfile in 6e57c43a and moved all Docker-related materials to apache/couchdb-docker, but we never cleaned up the Makefile targets or developer documentation. --- Makefile | 18 ------------------ Makefile.win | 18 ------------------ README-DEV.rst | 24 ------------------------ 3 files changed, 60 deletions(-) diff --git a/Makefile b/Makefile index e95d04419..c8c0b093f 100644 --- a/Makefile +++ b/Makefile @@ -211,24 +211,6 @@ dialyze: .rebar @$(REBAR) -r dialyze $(DIALYZE_OPTS) -.PHONY: docker-image -# target: docker-image - Build Docker image -docker-image: - @docker build --rm -t couchdb/dev-cluster . - - -.PHONY: docker-start -# target: docker-start - Start CouchDB in Docker container -docker-start: - @docker run -d -P -t couchdb/dev-cluster > .docker-id - - -.PHONY: docker-stop -# target: docker-stop - Stop Docker container -docker-stop: - @docker stop `cat .docker-id` - - .PHONY: introspect # target: introspect - Check for commits difference between rebar.config and repository introspect: diff --git a/Makefile.win b/Makefile.win index 874ddf411..7ff0ab5c5 100644 --- a/Makefile.win +++ b/Makefile.win @@ -137,24 +137,6 @@ dialyze: .rebar @$(REBAR) -r dialyze $(DIALYZE_OPTS) -.PHONY: docker-image -# target: docker-image - Build Docker image -docker-image: - @docker build --rm -t couchdb\dev-cluster . - - -.PHONY: docker-start -# target: docker-start - Start CouchDB in Docker container -docker-start: - @docker run -d -P -t couchdb\dev-cluster > .docker-id - - -.PHONY: docker-stop -# target: docker-stop - Stop Docker container -docker-stop: - @docker stop `cat .docker-id` - - .PHONY: introspect # target: introspect - Check for commits difference between rebar.config and repository introspect: diff --git a/README-DEV.rst b/README-DEV.rst index 3587e8586..9cfa1f2ef 100644 --- a/README-DEV.rst +++ b/README-DEV.rst @@ -198,30 +198,6 @@ See ``make help`` for more info and useful commands. Please report any problems to the developer's mailing list. -Testing a cluster ------------------ - -We use `Docker `_ to safely run a local three node -cluster all inside a single docker container. - -Assuming you have Docker installed and running:: - - make docker-image - -This will create a docker image (tagged 'couchdb/dev-cluster') capable -of running a joined three node cluster. - -To start it up:: - - make docker-start - -A three node cluster should now be running (you can now use ``docker ps`` -to find the exposed ports of the nodes). - -To stop it:: - - make docker-stop - Releasing --------- -- cgit v1.2.1 From 4e35b36f5d089f8dd567033f3b1db1cc846c7b14 Mon Sep 17 00:00:00 2001 From: jiangphcn Date: Thu, 25 Jan 2018 13:49:46 +0800 Subject: Hide Auth information in replication document for reader - don't display credential information for user who just wants to check replication status. In basic authentication, the credential information is available in header field of doc --- src/couch_replicator/src/couch_replicator_docs.erl | 32 +++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl index 6666cba53..1fe91eca4 100644 --- a/src/couch_replicator/src/couch_replicator_docs.erl +++ b/src/couch_replicator/src/couch_replicator_docs.erl @@ -695,7 +695,8 @@ strip_credentials(Url) when is_binary(Url) -> "http\\1://\\2", [{return, binary}]); strip_credentials({Props}) -> - {lists:keydelete(<<"oauth">>, 1, Props)}. + Props1 = lists:keydelete(<<"oauth">>, 1, Props), + {lists:keydelete(<<"headers">>, 1, Props1)}. error_reason({shutdown, Error}) -> @@ -761,4 +762,33 @@ check_convert_options_fail_test() -> ?assertThrow({bad_request, _}, convert_options([{<<"selector">>, [{key, value}]}])). +check_strip_credentials_test() -> + [?assertEqual(Expected, strip_credentials(Body)) || {Expected, Body} <- [ + { + undefined, + undefined + }, + { + <<"https://remote_server/database">>, + <<"https://foo:bar@remote_server/database">> + }, + { + {[{<<"_id">>, <<"foo">>}]}, + {[{<<"_id">>, <<"foo">>}, {<<"oauth">>, <<"bar">>}]} + }, + { + {[{<<"_id">>, <<"foo">>}]}, + {[{<<"_id">>, <<"foo">>}, {<<"headers">>, <<"bar">>}]} + }, + { + {[{<<"_id">>, <<"foo">>}, {<<"other">>, <<"bar">>}]}, + {[{<<"_id">>, <<"foo">>}, {<<"other">>, <<"bar">>}]} + }, + { + {[{<<"_id">>, <<"foo">>}]}, + {[{<<"_id">>, <<"foo">>}, {<<"oauth">>, <<"bar">>}, + {<<"headers">>, <<"baz">>}]} + } + ]]. + -endif. -- cgit v1.2.1 From 52e7cbe659c1125bea52f1ebd025bf092ed391aa Mon Sep 17 00:00:00 2001 From: jiangphcn Date: Mon, 18 Dec 2017 20:52:17 +0800 Subject: Decode destination header for doc copy Fixes #977 --- src/chttpd/src/chttpd_db.erl | 3 ++- src/chttpd/test/chttpd_db_test.erl | 26 +++++++++++++++++++++++++- src/couch/src/couch_httpd_db.erl | 3 ++- src/couch/src/test_request.erl | 10 ++++++++++ 4 files changed, 39 insertions(+), 3 deletions(-) diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl index e621d657a..3dc9f5794 100644 --- a/src/chttpd/src/chttpd_db.erl +++ b/src/chttpd/src/chttpd_db.erl @@ -851,7 +851,8 @@ db_doc_req(#httpd{method='COPY', user_ctx=Ctx}=Req, Db, SourceDocId) -> missing_rev -> nil; Rev -> Rev end, - {TargetDocId, TargetRevs} = couch_httpd_db:parse_copy_destination_header(Req), + {TargetDocId0, TargetRevs} = couch_httpd_db:parse_copy_destination_header(Req), + TargetDocId = list_to_binary(mochiweb_util:unquote(TargetDocId0)), % open old doc Doc = couch_doc_open(Db, SourceDocId, SourceRev, []), % save new doc diff --git a/src/chttpd/test/chttpd_db_test.erl b/src/chttpd/test/chttpd_db_test.erl index f6732939c..1725f87a9 100644 --- a/src/chttpd/test/chttpd_db_test.erl +++ b/src/chttpd/test/chttpd_db_test.erl @@ -19,6 +19,10 @@ -define(PASS, "pass"). -define(AUTH, {basic_auth, {?USER, ?PASS}}). -define(CONTENT_JSON, {"Content-Type", "application/json"}). +-define(DESTHEADER1, {"Destination", "foo%E5%95%8Abar"}). +-define(DESTHEADER2, {"Destination", "foo%2Fbar%23baz%3Fpow%3Afiz"}). + + -define(FIXTURE_TXT, ?ABS_PATH(?FILE)). -define(i2l(I), integer_to_list(I)). @@ -70,7 +74,8 @@ all_test_() -> fun should_succeed_on_all_docs_with_queries_limit_skip/1, fun should_succeed_on_all_docs_with_multiple_queries/1, fun should_succeed_on_design_docs_with_multiple_queries/1, - fun should_fail_on_multiple_queries_with_keys_and_queries/1 + fun should_fail_on_multiple_queries_with_keys_and_queries/1, + fun should_return_correct_id_on_doc_copy/1 ] } } @@ -303,6 +308,25 @@ should_fail_on_multiple_queries_with_keys_and_queries(Url) -> end). +should_return_correct_id_on_doc_copy(Url) -> + ?_test(begin + {ok, _, _, _} = create_doc(Url, "testdoc"), + {_, _, _, ResultBody1} = test_request:copy(Url ++ "/testdoc/", + [?CONTENT_JSON, ?AUTH, ?DESTHEADER1]), + {ResultJson1} = ?JSON_DECODE(ResultBody1), + Id1 = couch_util:get_value(<<"id">>, ResultJson1), + + {_, _, _, ResultBody2} = test_request:copy(Url ++ "/testdoc/", + [?CONTENT_JSON, ?AUTH, ?DESTHEADER2]), + {ResultJson2} = ?JSON_DECODE(ResultBody2), + Id2 = couch_util:get_value(<<"id">>, ResultJson2), + [ + ?assertEqual(<<102,111,111,229,149,138,98,97,114>>, Id1), + ?assertEqual(<<"foo/bar#baz?pow:fiz">>, Id2) + ] + end). + + attachment_doc() -> {ok, Data} = file:read_file(?FIXTURE_TXT), {[ diff --git a/src/couch/src/couch_httpd_db.erl b/src/couch/src/couch_httpd_db.erl index 05e63ba97..79ba84dab 100644 --- a/src/couch/src/couch_httpd_db.erl +++ b/src/couch/src/couch_httpd_db.erl @@ -616,7 +616,8 @@ db_doc_req(#httpd{method='COPY'}=Req, Db, SourceDocId) -> missing_rev -> nil; Rev -> Rev end, - {TargetDocId, TargetRevs} = parse_copy_destination_header(Req), + {TargetDocId0, TargetRevs} = parse_copy_destination_header(Req), + TargetDocId = list_to_binary(mochiweb_util:unquote(TargetDocId0)), % open old doc Doc = couch_doc_open(Db, SourceDocId, SourceRev, []), % save new doc diff --git a/src/couch/src/test_request.erl b/src/couch/src/test_request.erl index a1b8b57c5..4dfde1a33 100644 --- a/src/couch/src/test_request.erl +++ b/src/couch/src/test_request.erl @@ -12,6 +12,7 @@ -module(test_request). +-export([copy/1, copy/2, copy/3]). -export([get/1, get/2, get/3]). -export([post/2, post/3, post/4]). -export([put/2, put/3, put/4]). @@ -19,6 +20,15 @@ -export([options/1, options/2, options/3]). -export([request/3, request/4, request/5]). +copy(Url) -> + copy(Url, []). + +copy(Url, Headers) -> + copy(Url, Headers, []). + +copy(Url, Headers, Opts) -> + request(copy, Url, Headers, [], Opts). + get(Url) -> get(Url, []). -- cgit v1.2.1 From 380ae691d73b7a039fa0e14e2f74029a41f37289 Mon Sep 17 00:00:00 2001 From: Joan Touzet Date: Fri, 26 Jan 2018 13:16:32 -0500 Subject: Remove 'smartquote' from default.ini, broke the build --- rel/overlay/etc/default.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini index 7e429f624..9d6d30d07 100644 --- a/rel/overlay/etc/default.ini +++ b/rel/overlay/etc/default.ini @@ -92,7 +92,7 @@ delete_dbs = false ; cluster / q ;q = 1 ; prefix for user databases. If you change this after user dbs have been -; created, the existing databases won’t get deleted if the associated user +; created, the existing databases won't get deleted if the associated user ; gets deleted because of the then prefix mismatch. database_prefix = userdb- -- cgit v1.2.1 From 1ecf363f2b6c1cdd937e449e084ca6e62eb343ff Mon Sep 17 00:00:00 2001 From: jiangphcn Date: Mon, 15 Jan 2018 18:16:19 +0800 Subject: Make _design_docs to respect query parameters Fixes #1100 --- src/couch/src/couch_db.erl | 31 ++++-- test/javascript/tests/design_docs_query.js | 154 +++++++++++++++++++++++++++++ 2 files changed, 177 insertions(+), 8 deletions(-) create mode 100644 test/javascript/tests/design_docs_query.js diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl index 5e720c284..b2b94ce80 100644 --- a/src/couch/src/couch_db.erl +++ b/src/couch/src/couch_db.erl @@ -1738,14 +1738,29 @@ do_pipe([Filter|Rest], F0) -> set_namespace_range(Options, undefined) -> Options; set_namespace_range(Options, NS) -> - %% FIXME depending on order we might need to swap keys - SK = select_gt( - proplists:get_value(start_key, Options, <<"">>), - <>), - EK = select_lt( - proplists:get_value(end_key, Options, <>), - <>), - [{start_key, SK}, {end_key_gt, EK}]. + SK0 = proplists:get_value(start_key, Options, <>), + EKType = case proplists:get_value(end_key_gt, Options) of + undefined -> end_key; + _ -> end_key_gt + end, + EK0 = case EKType of + end_key -> + proplists:get_value(end_key, Options, <>); + end_key_gt -> + proplists:get_value(end_key_gt, Options, <>) + end, + case SK0 =< EK0 of + true -> + SK = select_gt(SK0, <>), + EK = select_lt(EK0, <>), + [{dir, proplists:get_value(dir, Options, fwd)}, + {start_key, SK}, {EKType, EK}]; + false -> + SK = select_lt(SK0, <>), + EK = select_gt(EK0, <>), + [{dir, proplists:get_value(dir, Options, fwd)}, + {start_key, SK}, {EKType, EK}] + end. select_gt(V1, V2) when V1 < V2 -> V2; select_gt(V1, _V2) -> V1. diff --git a/test/javascript/tests/design_docs_query.js b/test/javascript/tests/design_docs_query.js new file mode 100644 index 000000000..8fc8da5f8 --- /dev/null +++ b/test/javascript/tests/design_docs_query.js @@ -0,0 +1,154 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +couchTests.design_docs_query = function(debug) { + var db_name = get_random_db_name(); + var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"}); + db.createDb(); + if (debug) debugger; + + var docs = makeDocs(5); + + // create the docs + var results = db.bulkSave(docs); + T(results.length == 5); + for (var i = 0; i < 5; i++) { + T(results[i].id == docs[i]._id); + } + + // create the ddocs + for (var i = 0; i < 5; i++) { + T(db.save({ + _id : "_design/ddoc0" + (i+1).toString(), + views : { + "testing" : { + "map" : "function(){emit(1,1)}" + } + } + }).ok); + } + + // test design_docs + var path = "/" + db_name + "/_design_docs?"; + var xhr_AllDDocs = CouchDB.request("GET", path); + T(xhr_AllDDocs.status == 200, "standard get should be 200"); + var allDDocs = JSON.parse(xhr_AllDDocs.responseText); + TEquals(10, allDDocs.total_rows, "total_rows mismatch"); + TEquals(5, allDDocs.rows.length, "amount of rows mismatch"); + + // test key="_design/ddoc03" + var xhr = CouchDB.request("GET", path + "key=\"_design/ddoc03\""); + T(xhr.status = 200, "standard get should be 200"); + var result = JSON.parse(xhr.responseText); + TEquals(1, result.rows.length, "amount of rows mismatch"); + TEquals("_design/ddoc03", result.rows[0].key, "key test"); + + // test descending=true + var xhr = CouchDB.request("GET", path + "descending=true"); + T(xhr.status == 200, "standard get should be 200"); + var result = JSON.parse(xhr.responseText); + TEquals(5, result.rows.length, "amount of rows mismatch"); + TEquals("_design/ddoc05", result.rows[0].key, "descending test"); + + // test descending=false + var xhr = CouchDB.request("GET", path + "descending=false"); + T(xhr.status == 200, "standard get should be 200"); + var result = JSON.parse(xhr.responseText); + TEquals(5, result.rows.length, "amount of rows mismatch"); + TEquals("_design/ddoc01", result.rows[0].key, "descending test"); + + // test end_key="_design/ddoc03" + var xhr = CouchDB.request("GET", path + "end_key=\"_design/ddoc03\""); + T(xhr.status = 200, "standard get should be 200"); + var result = JSON.parse(xhr.responseText); + TEquals(3, result.rows.length, "amount of rows mismatch"); + TEquals("_design/ddoc03", result.rows[2].key, "end_key test"); + + // test endkey="_design/ddoc03" + var xhr = CouchDB.request("GET", path + "endkey=\"_design/ddoc03\""); + T(xhr.status = 200, "standard get should be 200"); + var result = JSON.parse(xhr.responseText); + TEquals(3, result.rows.length, "amount of rows mismatch"); + TEquals("_design/ddoc03", result.rows[2].key, "endkey test"); + + // test start_key="_design/ddoc03" + var xhr = CouchDB.request("GET", path + "start_key=\"_design/ddoc03\""); + T(xhr.status = 200, "standard get should be 200"); + var result = JSON.parse(xhr.responseText); + TEquals(3, result.rows.length, "amount of rows mismatch"); + TEquals("_design/ddoc03", result.rows[0].key, "start_key test"); + + // test startkey="_design/ddoc03" + var xhr = CouchDB.request("GET", path + "startkey=\"_design/ddoc03\""); + T(xhr.status = 200, "standard get should be 200"); + var result = JSON.parse(xhr.responseText); + TEquals(3, result.rows.length, "amount of rows mismatch"); + TEquals("_design/ddoc03", result.rows[0].key, "startkey test"); + + // test end_key="_design/ddoc03"&inclusive_end=true + var xhr = CouchDB.request("GET", path + "end_key=\"_design/ddoc03\"&inclusive_end=true"); + T(xhr.status = 200, "standard get should be 200"); + var result = JSON.parse(xhr.responseText); + TEquals(3, result.rows.length, "amount of rows mismatch"); + TEquals("_design/ddoc03", result.rows[2].key, "end_key and inclusive_end test"); + + // test end_key="_design/ddoc03"&inclusive_end=false + var xhr = CouchDB.request("GET", path + "end_key=\"_design/ddoc03\"&inclusive_end=false"); + T(xhr.status = 200, "standard get should be 200"); + var result = JSON.parse(xhr.responseText); + TEquals(2, result.rows.length, "amount of rows mismatch"); + TEquals("_design/ddoc02", result.rows[1].key, "end_key and inclusive_end test"); + + // test end_key="_design/ddoc03"&inclusive_end=false&descending=true + var xhr = CouchDB.request("GET", path + + "end_key=\"_design/ddoc03\"&inclusive_end=false&descending=true"); + T(xhr.status = 200, "standard get should be 200"); + var result = JSON.parse(xhr.responseText); + TEquals(2, result.rows.length, "amount of rows mismatch"); + TEquals("_design/ddoc04", result.rows[1].key, "end_key, inclusive_end and descending test"); + + // test end_key="_design/ddoc05"&limit=2 + var xhr = CouchDB.request("GET", path + + "end_key=\"_design/ddoc05\"&limit=2"); + T(xhr.status = 200, "standard get should be 200"); + var result = JSON.parse(xhr.responseText); + TEquals(2, result.rows.length, "amount of rows mismatch"); + TEquals("_design/ddoc02", result.rows[1].key, "end_key and limit test"); + + // test end_key="_design/ddoc05"&skip=2 + var xhr = CouchDB.request("GET", path + + "end_key=\"_design/ddoc05\"&skip=2"); + T(xhr.status = 200, "standard get should be 200"); + var result = JSON.parse(xhr.responseText); + TEquals(3, result.rows.length, "amount of rows mismatch"); + TEquals("_design/ddoc03", result.rows[0].key, "end_key and skip test"); + TEquals("_design/ddoc05", result.rows[2].key, "end_key and skip test"); + + // test end_key="_design/ddoc05"&update_seq=true + var xhr = CouchDB.request("GET", path + + "end_key=\"_design/ddoc05\"&update_seq=true"); + T(xhr.status = 200, "standard get should be 200"); + var result = JSON.parse(xhr.responseText); + T(result.update_seq); + + // test POST with keys + var xhr = CouchDB.request("POST", path, { + headers: {"Content-Type": "application/json"}, + body: JSON.stringify({"keys" : ["_design/ddoc02", "_design/ddoc03"]}) + }); + T(xhr.status = 200, "standard get should be 200"); + var result = JSON.parse(xhr.responseText); + TEquals(2, result.rows.length, "amount of rows mismatch"); + TEquals("_design/ddoc03", result.rows[1].key, "POST test"); + + db.deleteDb(); +}; -- cgit v1.2.1 From 92a280ab6e0b9c728ab7b84ae10a6e2be2e3d430 Mon Sep 17 00:00:00 2001 From: jiangphcn Date: Thu, 28 Dec 2017 11:45:43 +0800 Subject: Introduce new _dbs_info endpoint to get info of a list of databases Fixes #822 --- rel/overlay/etc/default.ini | 4 + src/chttpd/src/chttpd_auth_request.erl | 4 + src/chttpd/src/chttpd_httpd_handlers.erl | 1 + src/chttpd/src/chttpd_misc.erl | 35 +++++++ src/chttpd/test/chttpd_dbs_info_test.erl | 169 ++++++++++++++++++++++++++++++ src/couch/test/chttpd_endpoints_tests.erl | 1 + 6 files changed, 214 insertions(+) create mode 100644 src/chttpd/test/chttpd_dbs_info_test.erl diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini index 9d6d30d07..17a9a4f3d 100644 --- a/rel/overlay/etc/default.ini +++ b/rel/overlay/etc/default.ini @@ -69,6 +69,10 @@ require_valid_user = false ; List of headers that will be kept when the header Prefer: return=minimal is included in a request. ; If Server header is left out, Mochiweb will add its own one in. prefer_minimal = Cache-Control, Content-Length, Content-Range, Content-Type, ETag, Server, Transfer-Encoding, Vary +; +; Limit maximum number of databases when tying to get detailed information using +; _dbs_info in a request +max_db_number_for_dbs_info_req = 100 [database_compaction] ; larger buffer sizes can originate smaller files diff --git a/src/chttpd/src/chttpd_auth_request.erl b/src/chttpd/src/chttpd_auth_request.erl index 4e2e0dbf2..05c5e8e35 100644 --- a/src/chttpd/src/chttpd_auth_request.erl +++ b/src/chttpd/src/chttpd_auth_request.erl @@ -35,6 +35,8 @@ authorize_request_int(#httpd{path_parts=[<<"favicon.ico">>|_]}=Req) -> Req; authorize_request_int(#httpd{path_parts=[<<"_all_dbs">>|_]}=Req) -> Req; +authorize_request_int(#httpd{path_parts=[<<"_dbs_info">>|_]}=Req) -> + Req; authorize_request_int(#httpd{path_parts=[<<"_replicator">>], method='PUT'}=Req) -> require_admin(Req); authorize_request_int(#httpd{path_parts=[<<"_replicator">>], method='DELETE'}=Req) -> @@ -81,6 +83,8 @@ server_authorization_check(#httpd{path_parts=[<<"_stats">>]}=Req) -> Req; server_authorization_check(#httpd{path_parts=[<<"_active_tasks">>]}=Req) -> Req; +server_authorization_check(#httpd{path_parts=[<<"_dbs_info">>]}=Req) -> + Req; server_authorization_check(#httpd{method=Method, path_parts=[<<"_utils">>|_]}=Req) when Method =:= 'HEAD' orelse Method =:= 'GET' -> Req; diff --git a/src/chttpd/src/chttpd_httpd_handlers.erl b/src/chttpd/src/chttpd_httpd_handlers.erl index 9c3044126..cb52e2c40 100644 --- a/src/chttpd/src/chttpd_httpd_handlers.erl +++ b/src/chttpd/src/chttpd_httpd_handlers.erl @@ -18,6 +18,7 @@ url_handler(<<>>) -> fun chttpd_misc:handle_welcome_req/1; url_handler(<<"favicon.ico">>) -> fun chttpd_misc:handle_favicon_req/1; url_handler(<<"_utils">>) -> fun chttpd_misc:handle_utils_dir_req/1; url_handler(<<"_all_dbs">>) -> fun chttpd_misc:handle_all_dbs_req/1; +url_handler(<<"_dbs_info">>) -> fun chttpd_misc:handle_dbs_info_req/1; url_handler(<<"_active_tasks">>) -> fun chttpd_misc:handle_task_status_req/1; url_handler(<<"_scheduler">>) -> fun couch_replicator_httpd:handle_scheduler_req/1; url_handler(<<"_node">>) -> fun chttpd_misc:handle_node_req/1; diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl index 15eabbfbd..253da233e 100644 --- a/src/chttpd/src/chttpd_misc.erl +++ b/src/chttpd/src/chttpd_misc.erl @@ -14,6 +14,7 @@ -export([ handle_all_dbs_req/1, + handle_dbs_info_req/1, handle_node_req/1, handle_favicon_req/1, handle_favicon_req/2, @@ -37,6 +38,8 @@ [send_json/2,send_json/3,send_method_not_allowed/2, send_chunk/2,start_chunked_response/3]). +-define(MAX_DB_NUM_FOR_DBS_INFO, 100). + % httpd global handlers handle_welcome_req(Req) -> @@ -141,6 +144,38 @@ all_dbs_callback({error, Reason}, #vacc{resp=Resp0}=Acc) -> {ok, Resp1} = chttpd:send_delayed_error(Resp0, Reason), {ok, Acc#vacc{resp=Resp1}}. +handle_dbs_info_req(#httpd{method='POST'}=Req) -> + chttpd:validate_ctype(Req, "application/json"), + Props = chttpd:json_body_obj(Req), + Keys = couch_mrview_util:get_view_keys(Props), + case Keys of + undefined -> throw({bad_request, "`keys` member must exist."}); + _ -> ok + end, + MaxNumber = config:get_integer("chttpd", + "max_db_number_for_dbs_info_req", ?MAX_DB_NUM_FOR_DBS_INFO), + case length(Keys) =< MaxNumber of + true -> ok; + false -> throw({bad_request, too_many_keys}) + end, + {ok, Resp} = chttpd:start_json_response(Req, 200), + send_chunk(Resp, "["), + lists:foldl(fun(DbName, AccSeparator) -> + case catch fabric:get_db_info(DbName) of + {ok, Result} -> + Json = ?JSON_ENCODE({[{key, DbName}, {info, {Result}}]}), + send_chunk(Resp, AccSeparator ++ Json); + _ -> + Json = ?JSON_ENCODE({[{key, DbName}, {error, not_found}]}), + send_chunk(Resp, AccSeparator ++ Json) + end, + "," % AccSeparator now has a comma + end, "", Keys), + send_chunk(Resp, "]"), + chttpd:end_json_response(Resp); +handle_dbs_info_req(Req) -> + send_method_not_allowed(Req, "POST"). + handle_task_status_req(#httpd{method='GET'}=Req) -> {Replies, _BadNodes} = gen_server:multi_call(couch_task_status, all), Response = lists:flatmap(fun({Node, Tasks}) -> diff --git a/src/chttpd/test/chttpd_dbs_info_test.erl b/src/chttpd/test/chttpd_dbs_info_test.erl new file mode 100644 index 000000000..5b61d8831 --- /dev/null +++ b/src/chttpd/test/chttpd_dbs_info_test.erl @@ -0,0 +1,169 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(chttpd_dbs_info_test). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(USER, "chttpd_db_test_admin"). +-define(PASS, "pass"). +-define(AUTH, {basic_auth, {?USER, ?PASS}}). +-define(CONTENT_JSON, {"Content-Type", "application/json"}). + + +setup() -> + Hashed = couch_passwords:hash_admin_password(?PASS), + ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = mochiweb_socket_server:get(chttpd, port), + Url = lists:concat(["http://", Addr, ":", Port, "/"]), + Db1Url = lists:concat([Url, "db1"]), + create_db(Db1Url), + Db2Url = lists:concat([Url, "db2"]), + create_db(Db2Url), + Url. + +teardown(Url) -> + Db1Url = lists:concat([Url, "db1"]), + Db2Url = lists:concat([Url, "db2"]), + delete_db(Db1Url), + delete_db(Db2Url), + ok = config:delete("admins", ?USER, _Persist=false). + +create_db(Url) -> + {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"), + ?assert(Status =:= 201 orelse Status =:= 202). + +delete_db(Url) -> + {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). + +dbs_info_test_() -> + { + "chttpd dbs info tests", + { + setup, + fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_return_error_for_get_db_info/1, + fun should_return_dbs_info_for_single_db/1, + fun should_return_dbs_info_for_multiple_dbs/1, + fun should_return_error_for_exceeded_keys/1, + fun should_return_error_for_missing_keys/1, + fun should_return_dbs_info_for_dbs_with_mixed_state/1 + ] + } + } + }. + + +should_return_error_for_get_db_info(Url) -> + ?_test(begin + {ok, Code, _, ResultBody} = test_request:get(Url ++ "/_dbs_info?" + ++ "keys=[\"db1\"]", [?CONTENT_JSON, ?AUTH]), + {Body} = jiffy:decode(ResultBody), + [ + ?assertEqual(<<"method_not_allowed">>, + couch_util:get_value(<<"error">>, Body)), + ?assertEqual(405, Code) + ] + end). + + +should_return_dbs_info_for_single_db(Url) -> + ?_test(begin + NewDoc = "{\"keys\": [\"db1\"]}", + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/", + [?CONTENT_JSON, ?AUTH], NewDoc), + BodyJson = jiffy:decode(ResultBody), + {Db1Data} = lists:nth(1, BodyJson), + [ + ?assertEqual(<<"db1">>, + couch_util:get_value(<<"key">>, Db1Data)), + ?assertNotEqual(undefined, + couch_util:get_value(<<"info">>, Db1Data)) + ] + end). + + +should_return_dbs_info_for_multiple_dbs(Url) -> + ?_test(begin + NewDoc = "{\"keys\": [\"db1\", \"db2\"]}", + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/", + [?CONTENT_JSON, ?AUTH], NewDoc), + BodyJson = jiffy:decode(ResultBody), + {Db1Data} = lists:nth(1, BodyJson), + {Db2Data} = lists:nth(2, BodyJson), + [ + ?assertEqual(<<"db1">>, + couch_util:get_value(<<"key">>, Db1Data)), + ?assertNotEqual(undefined, + couch_util:get_value(<<"info">>, Db1Data)), + ?assertEqual(<<"db2">>, + couch_util:get_value(<<"key">>, Db2Data)), + ?assertNotEqual(undefined, + couch_util:get_value(<<"info">>, Db2Data)) + ] + end). + + +should_return_error_for_exceeded_keys(Url) -> + ?_test(begin + NewDoc = "{\"keys\": [\"db1\", \"db2\"]}", + ok = config:set("chttpd", "max_db_number_for_dbs_info_req", "1"), + {ok, Code, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/", + [?CONTENT_JSON, ?AUTH], NewDoc), + {Body} = jiffy:decode(ResultBody), + ok = config:delete("chttpd", "max_db_number_for_dbs_info_req"), + [ + ?assertEqual(<<"bad_request">>, + couch_util:get_value(<<"error">>, Body)), + ?assertEqual(400, Code) + ] + end). + + +should_return_error_for_missing_keys(Url) -> + ?_test(begin + NewDoc = "{\"missingkeys\": [\"db1\", \"db2\"]}", + {ok, Code, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/", + [?CONTENT_JSON, ?AUTH], NewDoc), + {Body} = jiffy:decode(ResultBody), + [ + ?assertEqual(<<"bad_request">>, + couch_util:get_value(<<"error">>, Body)), + ?assertEqual(400, Code) + ] + end). + + +should_return_dbs_info_for_dbs_with_mixed_state(Url) -> + ?_test(begin + NewDoc = "{\"keys\": [\"db1\", \"noexisteddb\"]}", + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/", + [?CONTENT_JSON, ?AUTH], NewDoc), + Json = jiffy:decode(ResultBody), + {Db1Data} = lists:nth(1, Json), + {Db2Data} = lists:nth(2, Json), + [ + ?assertEqual( + <<"db1">>, couch_util:get_value(<<"key">>, Db1Data)), + ?assertNotEqual(undefined, + couch_util:get_value(<<"info">>, Db1Data)), + ?assertEqual( + <<"noexisteddb">>, couch_util:get_value(<<"key">>, Db2Data)), + ?assertEqual(undefined, couch_util:get_value(<<"info">>, Db2Data)) + ] + end). diff --git a/src/couch/test/chttpd_endpoints_tests.erl b/src/couch/test/chttpd_endpoints_tests.erl index 715576713..9b7430823 100644 --- a/src/couch/test/chttpd_endpoints_tests.erl +++ b/src/couch/test/chttpd_endpoints_tests.erl @@ -41,6 +41,7 @@ handlers(url_handler) -> {<<"favicon.ico">>, chttpd_misc, handle_favicon_req}, {<<"_utils">>, chttpd_misc, handle_utils_dir_req}, {<<"_all_dbs">>, chttpd_misc, handle_all_dbs_req}, + {<<"_dbs_info">>, chttpd_misc, handle_dbs_info_req}, {<<"_active_tasks">>, chttpd_misc, handle_task_status_req}, {<<"_node">>, chttpd_misc, handle_node_req}, {<<"_reload_query_servers">>, chttpd_misc, handle_reload_query_servers_req}, -- cgit v1.2.1 From 960a6f9dcca7ece74838e377b091050284a3233a Mon Sep 17 00:00:00 2001 From: Juanjo Date: Wed, 24 Jan 2018 15:01:40 +0100 Subject: Fix for issue #603 - Error 500 when creating a db below quorum Add degrade-cluster option for cluster testing Add tests for different cluster conditions with/without quorum Add test-cluster-with-quorum and test-cluster-without-quorum tasks --- Makefile | 34 ++++++++++++++++++++++ dev/run | 24 +++++++++++++-- src/fabric/src/fabric_db_create.erl | 4 +-- test/javascript/run | 16 +++++----- .../tests-cluster/with-quorum/db-creation.js | 27 +++++++++++++++++ .../tests-cluster/without-quorum/db-creation.js | 28 ++++++++++++++++++ 6 files changed, 122 insertions(+), 11 deletions(-) create mode 100644 test/javascript/tests-cluster/with-quorum/db-creation.js create mode 100644 test/javascript/tests-cluster/without-quorum/db-creation.js diff --git a/Makefile b/Makefile index c8c0b093f..05a1f3982 100644 --- a/Makefile +++ b/Makefile @@ -129,6 +129,40 @@ endif 'test/javascript/run --suites "$(suites)" \ --ignore "$(ignore_js_suites)"' +.PHONY: test-cluster-with-quorum +test-cluster-with-quorum: + @mkdir -p share/www/script/test +ifeq ($(IN_RELEASE), true) + @cp test/javascript/tests/lorem*.txt share/www/script/test/ +else + @mkdir -p src/fauxton/dist/release/test + @cp test/javascript/tests/lorem*.txt src/fauxton/dist/release/test/ +endif + @rm -rf dev/lib + @dev/run -n 3 -q --with-admin-party-please \ + --enable-erlang-views --degrade-cluster 1 \ + -c 'startup_jitter=0' \ + 'test/javascript/run --suites "$(suites)" \ + --ignore "$(ignore_js_suites)" \ + --path test/javascript/tests-cluster/with-quorum' + +.PHONY: test-cluster-without-quorum +test-cluster-without-quorum: + @mkdir -p share/www/script/test +ifeq ($(IN_RELEASE), true) + @cp test/javascript/tests/lorem*.txt share/www/script/test/ +else + @mkdir -p src/fauxton/dist/release/test + @cp test/javascript/tests/lorem*.txt src/fauxton/dist/release/test/ +endif + @rm -rf dev/lib + @dev/run -n 3 -q --with-admin-party-please \ + --enable-erlang-views --degrade-cluster 2 \ + -c 'startup_jitter=0' \ + 'test/javascript/run --suites "$(suites)" \ + --ignore "$(ignore_js_suites)" \ + --path test/javascript/tests-cluster/without-quorum' + .PHONY: soak-javascript soak-javascript: @mkdir -p share/www/script/test diff --git a/dev/run b/dev/run index 4924de1f6..a5d8fde8c 100755 --- a/dev/run +++ b/dev/run @@ -130,6 +130,8 @@ def setup_argparse(): help='The node number to seed them when creating the node(s)') parser.add_option('-c', '--config-overrides', action="append", default=[], help='Optional key=val config overrides. Can be repeated') + parser.add_option('--degrade-cluster', dest="degrade_cluster",type=int, default=0, + help='The number of nodes that should be stopped after cluster config') return parser.parse_args() @@ -142,6 +144,7 @@ def setup_context(opts, args): 'admin': opts.admin.split(':', 1) if opts.admin else None, 'nodes': ['node%d' % (i + opts.node_number) for i in range(opts.nodes)], 'node_number': opts.node_number, + 'degrade_cluster': opts.degrade_cluster, 'devdir': os.path.dirname(fpath), 'rootdir': os.path.dirname(os.path.dirname(fpath)), 'cmd': ' '.join(args), @@ -337,18 +340,35 @@ def startup(ctx): cluster_setup_with_admin_party(ctx) else: cluster_setup(ctx) - + if ctx['degrade_cluster'] > 0: + degrade_cluster(ctx) def kill_processes(ctx): for proc in ctx['procs']: if proc and proc.returncode is None: proc.kill() +def degrade_cluster(ctx): + if ctx['with_haproxy']: + haproxy_proc = ctx['procs'].pop() + for i in range(0,ctx['degrade_cluster']): + proc = ctx['procs'].pop() + if proc is not None: + kill_process(proc) + if ctx['with_haproxy']: + ctx['procs'].append(haproxy_proc) + +@log('Stoping proc {proc.pid}') +def kill_process(proc): + if proc and proc.returncode is None: + proc.kill() def boot_nodes(ctx): for node in ctx['nodes']: ctx['procs'].append(boot_node(ctx, node)) - ctx['procs'].append(boot_haproxy(ctx)) + haproxy_proc = boot_haproxy(ctx) + if haproxy_proc is not None: + ctx['procs'].append(haproxy_proc) def ensure_all_nodes_alive(ctx): diff --git a/src/fabric/src/fabric_db_create.erl b/src/fabric/src/fabric_db_create.erl index d793f4f13..db914f90e 100644 --- a/src/fabric/src/fabric_db_create.erl +++ b/src/fabric/src/fabric_db_create.erl @@ -146,9 +146,9 @@ maybe_stop(W, Counters) -> {ok, {W, Counters}}; false -> case lists:sum([1 || {_, ok} <- Counters]) of - W -> + NumOk when NumOk >= (W div 2 +1) -> {stop, ok}; - NumOk when NumOk >= (W div 2 + 1) -> + NumOk when NumOk > 0 -> {stop, accepted}; _ -> {error, internal_server_error} diff --git a/test/javascript/run b/test/javascript/run index c611be51e..8ae424467 100755 --- a/test/javascript/run +++ b/test/javascript/run @@ -107,7 +107,10 @@ def options(): dest="ignore", help="Ignore test suites"), op.make_option("-u", "--suites", type="string", action="callback", default=None, callback=get_delimited_list, - dest="suites", help="Run specific suites") + dest="suites", help="Run specific suites"), + op.make_option("-p", "--path", type="string", + default="test/javascript/tests", + dest="test_path", help="Path where the tests are located") ] @@ -118,10 +121,9 @@ def main(): run_list = [] ignore_list = [] tests = [] - - run_list = ["test/javascript/tests"] if not opts.suites else opts.suites - run_list = build_test_case_paths(run_list) - ignore_list = build_test_case_paths(opts.ignore) + run_list = [opts.test_path] if not opts.suites else opts.suites + run_list = build_test_case_paths(opts.test_path,run_list) + ignore_list = build_test_case_paths(opts.test_path,opts.ignore) # sort is needed because certain tests fail if executed out of order tests = sorted(list(set(run_list)-set(ignore_list))) @@ -151,7 +153,7 @@ def main(): failed, passed) + os.linesep) exit(failed > 0) -def build_test_case_paths(args=None): +def build_test_case_paths(path,args=None): tests = [] if args is None: args = [] @@ -161,7 +163,7 @@ def build_test_case_paths(args=None): elif os.path.isfile(name): check = tests.append(name) else: - pname = os.path.join("test/javascript/tests", name) + pname = os.path.join(path, name) if os.path.isfile(pname): tests.append(pname) elif os.path.isfile(pname + ".js"): diff --git a/test/javascript/tests-cluster/with-quorum/db-creation.js b/test/javascript/tests-cluster/with-quorum/db-creation.js new file mode 100644 index 000000000..f8efd6e68 --- /dev/null +++ b/test/javascript/tests-cluster/with-quorum/db-creation.js @@ -0,0 +1,27 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +// Do DB creation under cluster with quorum conditions. +couchTests.db_creation = function(debug) { + + if (debug) debugger; + + var db_name = get_random_db_name() + var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"}); + + // DB Creation should return 201 - Created + xhr = CouchDB.request("PUT", "/" + db_name + "/"); + T(xhr.status == 201); + + // cleanup + db.deleteDb(); +}; diff --git a/test/javascript/tests-cluster/without-quorum/db-creation.js b/test/javascript/tests-cluster/without-quorum/db-creation.js new file mode 100644 index 000000000..0d8ff8367 --- /dev/null +++ b/test/javascript/tests-cluster/without-quorum/db-creation.js @@ -0,0 +1,28 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +// Do DB creation under cluster without quorum conditions. +couchTests.db_creation = function(debug) { + + if (debug) debugger; + + var db_name = get_random_db_name() + var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"}); + + // DB Creation should return 202- Accepted + xhr = CouchDB.request("PUT", "/" + db_name + "/"); + T(xhr.status == 202); + + // cleanup + // TODO DB deletions fails if the quorum is not met. + xhr = CouchDB.request("DELETE", "/" + db_name + "/"); +}; -- cgit v1.2.1 From 1c39e0c70f8ac1ac5959ae86c84ba9d3f3f9df9e Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Tue, 30 Jan 2018 11:40:31 +0100 Subject: feat: add quorum tests to make check --- Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Makefile b/Makefile index 05a1f3982..8ccbf820c 100644 --- a/Makefile +++ b/Makefile @@ -91,6 +91,8 @@ fauxton: share/www .PHONY: check # target: check - Test everything check: all + @$(MAKE) test-cluster-with-quorum + @$(MAKE) test-cluster-without-quorum @$(MAKE) eunit @$(MAKE) javascript @$(MAKE) mango-test @@ -129,6 +131,7 @@ endif 'test/javascript/run --suites "$(suites)" \ --ignore "$(ignore_js_suites)"' +# TODO: port to Makefile.win .PHONY: test-cluster-with-quorum test-cluster-with-quorum: @mkdir -p share/www/script/test @@ -146,6 +149,7 @@ endif --ignore "$(ignore_js_suites)" \ --path test/javascript/tests-cluster/with-quorum' +# TODO: port to Makefile.win .PHONY: test-cluster-without-quorum test-cluster-without-quorum: @mkdir -p share/www/script/test -- cgit v1.2.1 From 7a296d2d6348195ed7788ef707b35486fa6ded0c Mon Sep 17 00:00:00 2001 From: Juanjo Rodriguez Date: Wed, 31 Jan 2018 23:29:24 +0100 Subject: Fix for issue #1134 clean up dev/lib before run mango tests (#1135) * make javascript, test-cluster-with[out]-quorum and mango tests dependent of devclean target --- Makefile | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 8ccbf820c..bd3b8ac58 100644 --- a/Makefile +++ b/Makefile @@ -116,7 +116,7 @@ soak-eunit: couch .PHONY: javascript # target: javascript - Run JavaScript test suites or specific ones defined by suites option -javascript: +javascript: devclean @mkdir -p share/www/script/test ifeq ($(IN_RELEASE), true) @cp test/javascript/tests/lorem*.txt share/www/script/test/ @@ -124,7 +124,6 @@ else @mkdir -p src/fauxton/dist/release/test @cp test/javascript/tests/lorem*.txt src/fauxton/dist/release/test/ endif - @rm -rf dev/lib @dev/run -n 1 -q --with-admin-party-please \ --enable-erlang-views \ -c 'startup_jitter=0' \ @@ -133,7 +132,7 @@ endif # TODO: port to Makefile.win .PHONY: test-cluster-with-quorum -test-cluster-with-quorum: +test-cluster-with-quorum: devclean @mkdir -p share/www/script/test ifeq ($(IN_RELEASE), true) @cp test/javascript/tests/lorem*.txt share/www/script/test/ @@ -141,7 +140,6 @@ else @mkdir -p src/fauxton/dist/release/test @cp test/javascript/tests/lorem*.txt src/fauxton/dist/release/test/ endif - @rm -rf dev/lib @dev/run -n 3 -q --with-admin-party-please \ --enable-erlang-views --degrade-cluster 1 \ -c 'startup_jitter=0' \ @@ -151,7 +149,7 @@ endif # TODO: port to Makefile.win .PHONY: test-cluster-without-quorum -test-cluster-without-quorum: +test-cluster-without-quorum: devclean @mkdir -p share/www/script/test ifeq ($(IN_RELEASE), true) @cp test/javascript/tests/lorem*.txt share/www/script/test/ @@ -159,7 +157,6 @@ else @mkdir -p src/fauxton/dist/release/test @cp test/javascript/tests/lorem*.txt src/fauxton/dist/release/test/ endif - @rm -rf dev/lib @dev/run -n 3 -q --with-admin-party-please \ --enable-erlang-views --degrade-cluster 2 \ -c 'startup_jitter=0' \ @@ -222,7 +219,7 @@ build-test: .PHONY: mango-test # target: mango-test - Run Mango tests -mango-test: all +mango-test: devclean all ./test/build/test-run-couch-for-mango.sh \ -- cgit v1.2.1 From 1446e87fda24538d84735dc27f471157761ecec3 Mon Sep 17 00:00:00 2001 From: jiangphcn Date: Tue, 6 Feb 2018 20:53:20 +0800 Subject: Remove queries for _all_docs issue 820 --- src/chttpd/src/chttpd_db.erl | 42 ++++--------------- src/chttpd/test/chttpd_db_test.erl | 85 -------------------------------------- test/javascript/tests/basics.js | 2 +- 3 files changed, 9 insertions(+), 120 deletions(-) diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl index 3dc9f5794..de5c79c66 100644 --- a/src/chttpd/src/chttpd_db.erl +++ b/src/chttpd/src/chttpd_db.erl @@ -520,18 +520,14 @@ db_req(#httpd{method='GET',path_parts=[_,OP]}=Req, Db) when ?IS_ALL_DOCS(OP) -> db_req(#httpd{method='POST',path_parts=[_,OP]}=Req, Db) when ?IS_ALL_DOCS(OP) -> chttpd:validate_ctype(Req, "application/json"), - Props = chttpd:json_body_obj(Req), - Keys = couch_mrview_util:get_view_keys(Props), - Queries = couch_mrview_util:get_view_queries(Props), - case {Queries, Keys} of - {Queries, undefined} when is_list(Queries) -> - multi_all_docs_view(Req, Db, OP, Queries); - {undefined, Keys} when is_list(Keys) -> - all_docs_view(Req, Db, Keys, OP); - {undefined, undefined} -> - all_docs_view(Req, Db, undefined, OP); - {_, _} -> - throw({bad_request, "`keys` and `queries` are mutually exclusive"}) + {Fields} = chttpd:json_body_obj(Req), + case couch_util:get_value(<<"keys">>, Fields, nil) of + Keys when is_list(Keys) -> + all_docs_view(Req, Db, Keys, OP); + nil -> + all_docs_view(Req, Db, undefined, OP); + _ -> + throw({bad_request, "`keys` body member must be an array."}) end; db_req(#httpd{path_parts=[_,OP]}=Req, _Db) when ?IS_ALL_DOCS(OP) -> @@ -640,28 +636,6 @@ db_req(#httpd{path_parts=[_, DocId]}=Req, Db) -> db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) -> db_attachment_req(Req, Db, DocId, FileNameParts). -multi_all_docs_view(Req, Db, OP, Queries) -> - Args0 = couch_mrview_http:parse_params(Req, undefined), - Args1 = Args0#mrargs{view_type=map}, - ArgQueries = lists:map(fun({Query}) -> - QueryArg1 = couch_mrview_http:parse_params(Query, undefined, - Args1, [decoded]), - QueryArgs2 = couch_mrview_util:validate_args(QueryArg1), - set_namespace(OP, QueryArgs2) - end, Queries), - Options = [{user_ctx, Req#httpd.user_ctx}], - VAcc0 = #vacc{db=Db, req=Req, prepend="\r\n"}, - FirstChunk = "{\"results\":[", - {ok, Resp0} = chttpd:start_delayed_json_response(VAcc0#vacc.req, 200, [], FirstChunk), - VAcc1 = VAcc0#vacc{resp=Resp0}, - VAcc2 = lists:foldl(fun(Args, Acc0) -> - {ok, Acc1} = fabric:all_docs(Db, Options, - fun couch_mrview_http:view_cb/2, Acc0, Args), - Acc1 - end, VAcc1, ArgQueries), - {ok, Resp1} = chttpd:send_delayed_chunk(VAcc2#vacc.resp, "\r\n]}"), - chttpd:end_delayed_json_response(Resp1). - all_docs_view(Req, Db, Keys, OP) -> Args0 = couch_mrview_http:parse_params(Req, Keys), Args1 = Args0#mrargs{view_type=map}, diff --git a/src/chttpd/test/chttpd_db_test.erl b/src/chttpd/test/chttpd_db_test.erl index 1725f87a9..2071ca502 100644 --- a/src/chttpd/test/chttpd_db_test.erl +++ b/src/chttpd/test/chttpd_db_test.erl @@ -70,11 +70,6 @@ all_test_() -> fun should_return_409_for_put_att_nonexistent_rev/1, fun should_return_update_seq_when_set_on_all_docs/1, fun should_not_return_update_seq_when_unset_on_all_docs/1, - fun should_succeed_on_all_docs_with_queries_keys/1, - fun should_succeed_on_all_docs_with_queries_limit_skip/1, - fun should_succeed_on_all_docs_with_multiple_queries/1, - fun should_succeed_on_design_docs_with_multiple_queries/1, - fun should_fail_on_multiple_queries_with_keys_and_queries/1, fun should_return_correct_id_on_doc_copy/1 ] } @@ -228,86 +223,6 @@ should_not_return_update_seq_when_unset_on_all_docs(Url) -> end). -should_succeed_on_all_docs_with_queries_keys(Url) -> - ?_test(begin - [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], - QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]}]}", - {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/", - [?CONTENT_JSON, ?AUTH], QueryDoc), - ?assertEqual(200, RC), - {ResultJson} = ?JSON_DECODE(RespBody), - ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), - {InnerJson} = lists:nth(1, ResultJsonBody), - ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson))) - end). - - -should_succeed_on_all_docs_with_queries_limit_skip(Url) -> - ?_test(begin - [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], - QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}", - {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/", - [?CONTENT_JSON, ?AUTH], QueryDoc), - ?assertEqual(200, RC), - {ResultJson} = ?JSON_DECODE(RespBody), - ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), - {InnerJson} = lists:nth(1, ResultJsonBody), - ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)), - ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson))) - end). - - -should_succeed_on_all_docs_with_multiple_queries(Url) -> - ?_test(begin - [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], - QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]}, - {\"limit\": 5, \"skip\": 2}]}", - {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/", - [?CONTENT_JSON, ?AUTH], QueryDoc), - ?assertEqual(200, RC), - {ResultJson} = ?JSON_DECODE(RespBody), - ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), - {InnerJson1} = lists:nth(1, ResultJsonBody), - ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))), - {InnerJson2} = lists:nth(2, ResultJsonBody), - ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)), - ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2))) - end). - - -should_succeed_on_design_docs_with_multiple_queries(Url) -> - ?_test(begin - [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], - QueryDoc = "{\"queries\": [{\"keys\": [ \"_design/ddoc3\", - \"_design/ddoc8\"]}, {\"limit\": 5, \"skip\": 2}]}", - {ok, RC, _, RespBody} = test_request:post(Url ++ "/_design_docs/", - [?CONTENT_JSON, ?AUTH], QueryDoc), - ?assertEqual(200, RC), - {ResultJson} = ?JSON_DECODE(RespBody), - ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson), - {InnerJson1} = lists:nth(1, ResultJsonBody), - ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))), - {InnerJson2} = lists:nth(2, ResultJsonBody), - ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)), - ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2))) - end). - - -should_fail_on_multiple_queries_with_keys_and_queries(Url) -> - ?_test(begin - [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)], - QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]}], - \"keys\": [ \"testdoc4\", \"testdoc9\"]}", - {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/", - [?CONTENT_JSON, ?AUTH], QueryDoc), - ?assertEqual(400, RC), - ?assertMatch({[ - {<<"error">>,<<"bad_request">>}, - {<<"reason">>,<<"`keys` and `queries` are mutually exclusive">>}]}, - ?JSON_DECODE(RespBody)) - end). - - should_return_correct_id_on_doc_copy(Url) -> ?_test(begin {ok, _, _, _} = create_doc(Url, "testdoc"), diff --git a/test/javascript/tests/basics.js b/test/javascript/tests/basics.js index 79599516d..a36b3035d 100644 --- a/test/javascript/tests/basics.js +++ b/test/javascript/tests/basics.js @@ -268,7 +268,7 @@ couchTests.basics = function(debug) { T(xhr.status == 400); result = JSON.parse(xhr.responseText); T(result.error == "bad_request"); - T(result.reason == "`keys` member must be an array."); + T(result.reason == "`keys` body member must be an array."); // oops, the doc id got lost in code nirwana xhr = CouchDB.request("DELETE", "/" + db_name + "/?rev=foobarbaz"); -- cgit v1.2.1