summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCharlie Swanson <charlie.swanson@mongodb.com>2017-08-31 11:52:47 -0400
committerCharlie Swanson <charlie.swanson@mongodb.com>2017-09-01 15:36:35 -0400
commit4e01e3582541fc00ec2e83c97cac89b59fbfeb34 (patch)
tree8a45fd7b6a4dc03e7ae618cd1553286266dbadec
parent144cb2c717d2d58b7503eab7fa15f28f95772bcc (diff)
downloadmongo-4e01e3582541fc00ec2e83c97cac89b59fbfeb34.tar.gz
SERVER-30907 Ban $changeStream on non replica set deployments
-rw-r--r--buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml3
-rw-r--r--buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml3
-rw-r--r--jstests/core/tailable_cursor_invalidation.js72
-rw-r--r--jstests/noPassthrough/unsupported_change_stream_deployments.js36
-rw-r--r--src/mongo/db/pipeline/document_source_change_stream.cpp5
-rw-r--r--src/mongo/s/query/cluster_find.cpp8
8 files changed, 126 insertions, 5 deletions
diff --git a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml
index 3b3a9832ea5..afe6f6a43fc 100644
--- a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml
@@ -102,6 +102,8 @@ selector:
- jstests/core/bypass_doc_validation.js
- jstests/core/capped_max1.js
- jstests/core/commands_namespace_parsing.js
+ - jstests/core/tailable_cursor_invalidation.js
+ - jstests/core/tailable_getmore_batch_size.js
- jstests/core/tailable_skip_limit.js
- jstests/core/constructors.js
- jstests/core/index_stats.js
diff --git a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml
index 77525a98f6a..bbdf1f8ab57 100644
--- a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml
+++ b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml
@@ -117,6 +117,8 @@ selector:
- jstests/core/bypass_doc_validation.js
- jstests/core/capped_max1.js
- jstests/core/commands_namespace_parsing.js
+ - jstests/core/tailable_cursor_invalidation.js
+ - jstests/core/tailable_getmore_batch_size.js
- jstests/core/tailable_skip_limit.js
- jstests/core/constructors.js
- jstests/core/index_stats.js
diff --git a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml
index 5b16e4026e6..2ab418e0bf0 100644
--- a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml
@@ -41,8 +41,9 @@ selector:
- jstests/core/stages*.js # stageDebug.
- jstests/core/startup_log.js # "local" database.
- jstests/core/storageDetailsCommand.js # diskStorageStats.
- - jstests/core/tailable_skip_limit.js # capped collections.
+ - jstests/core/tailable_cursor_invalidation.js # capped collections.
- jstests/core/tailable_getmore_batch_size.js # capped collections.
+ - jstests/core/tailable_skip_limit.js # capped collections.
- jstests/core/top.js # top.
# The following tests fail because mongos behaves differently from mongod when testing certain
# functionality. The differences are in a comment next to the failing test.
diff --git a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
index 964a6d437cf..fae8086dfa5 100644
--- a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
@@ -39,8 +39,9 @@ selector:
- jstests/core/stages*.js # stageDebug.
- jstests/core/startup_log.js # "local" database.
- jstests/core/storageDetailsCommand.js # diskStorageStats.
- - jstests/core/tailable_skip_limit.js # capped collections.
+ - jstests/core/tailable_cursor_invalidation.js # capped collections.
- jstests/core/tailable_getmore_batch_size.js # capped collections.
+ - jstests/core/tailable_skip_limit.js # capped collections.
- jstests/core/top.js # top.
# The following tests fail because mongos behaves differently from mongod when testing certain
# functionality. The differences are in a comment next to the failing test.
diff --git a/jstests/core/tailable_cursor_invalidation.js b/jstests/core/tailable_cursor_invalidation.js
new file mode 100644
index 00000000000..e6d11bb8f12
--- /dev/null
+++ b/jstests/core/tailable_cursor_invalidation.js
@@ -0,0 +1,72 @@
+// Tests for the behavior of tailable cursors when a collection is dropped or the cursor is
+// otherwise invalidated.
+(function() {
+ "use strict";
+
+ const collName = "tailable_cursor_invalidation";
+ const coll = db[collName];
+ coll.drop();
+
+ // Test that you cannot open a tailable cursor on a non-existent collection.
+ assert.eq(0, assert.commandWorked(db.runCommand({find: collName})).cursor.id);
+ assert.eq(0, assert.commandWorked(db.runCommand({find: collName, tailable: true})).cursor.id);
+ assert.eq(0,
+ assert.commandWorked(db.runCommand({find: collName, tailable: true, awaitData: true}))
+ .cursor.id);
+ const emptyBatchCursorId =
+ assert
+ .commandWorked(
+ db.runCommand({find: collName, tailable: true, awaitData: true, batchSize: 0}))
+ .cursor.id;
+ const isMongos = db.adminCommand({isdbgrid: 1}).isdbgrid;
+ if (isMongos) {
+ // Mongos will let you establish a cursor with batch size 0 and return to you before it
+ // realizes the shard's cursor is exhausted. The next getMore should return a 0 cursor
+ // id
+ // though.
+ assert.neq(emptyBatchCursorId, 0);
+ assert.eq(
+ 0,
+ assert.commandWorked(db.runCommand({getMore: emptyBatchCursorId, collection: collName}))
+ .cursor.id);
+ } else {
+ // A mongod should know immediately that the collection doesn't exist, and return a 0
+ // cursor
+ // id.
+ assert.eq(0, emptyBatchCursorId);
+ }
+
+ function dropAndRecreateColl() {
+ coll.drop();
+ assert.commandWorked(db.createCollection(collName, {capped: true, size: 1024}));
+ const numDocs = 4;
+ const bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < numDocs; ++i) {
+ bulk.insert({_id: i});
+ }
+ assert.writeOK(bulk.execute());
+ }
+ dropAndRecreateColl();
+
+ /**
+ * Runs a find command to establish a cursor. Asserts that the command worked and that the
+ * cursor id is not 0, then returns the cursor id.
+ */
+ function openCursor({tailable, awaitData}) {
+ const findRes = assert.commandWorked(
+ db.runCommand({find: collName, tailable: tailable, awaitData: awaitData}));
+ assert.neq(findRes.cursor.id, 0);
+ assert.eq(findRes.cursor.ns, coll.getFullName());
+ return findRes.cursor.id;
+ }
+
+ // Test that a cursor cannot be found if a collection is dropped between a find and a getMore.
+ let cursorId = openCursor({tailable: true, awaitData: false});
+ dropAndRecreateColl();
+ assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: collName}),
+ ErrorCodes.CursorNotFound);
+ cursorId = openCursor({tailable: true, awaitData: true});
+ dropAndRecreateColl();
+ assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: collName}),
+ ErrorCodes.CursorNotFound);
+}());
diff --git a/jstests/noPassthrough/unsupported_change_stream_deployments.js b/jstests/noPassthrough/unsupported_change_stream_deployments.js
new file mode 100644
index 00000000000..7473168df74
--- /dev/null
+++ b/jstests/noPassthrough/unsupported_change_stream_deployments.js
@@ -0,0 +1,36 @@
+// Tests that the $changeStream stage returns an error when run against a standalone mongod.
+(function() {
+ "use strict";
+ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+
+ function assertChangeStreamNotSupportedOnConnection(conn) {
+ const notReplicaSetErrorCode = 40573;
+ assertErrorCode(
+ conn.getDB("test").non_existent, [{$changeStream: {}}], notReplicaSetErrorCode);
+ assertErrorCode(conn.getDB("test").non_existent,
+ [{$changeStream: {fullDocument: "updateLookup"}}],
+ notReplicaSetErrorCode);
+ }
+
+ const conn = MongoRunner.runMongod();
+ assert.neq(null, conn, "mongod was unable to start up");
+ assertChangeStreamNotSupportedOnConnection(conn);
+ assert.eq(0, MongoRunner.stopMongod(conn));
+
+ // Test master/slave deployments.
+ const masterSlaveFixture = new ReplTest("change_stream");
+ const master = masterSlaveFixture.start(true);
+ assertChangeStreamNotSupportedOnConnection(master);
+ const slave = masterSlaveFixture.start(false);
+ assertChangeStreamNotSupportedOnConnection(slave);
+
+ // Test a sharded cluster with standalone shards.
+ const clusterWithStandalones = new ShardingTest({shards: 2});
+ // Make sure the database exists before running any commands.
+ const mongosDB = clusterWithStandalones.getDB("test");
+ assert.writeOK(mongosDB.unrelated.insert({}));
+ // TODO SERVER-29142 This error code will change to match the others.
+ assertErrorCode(mongosDB.non_existent, [{$changeStream: {}}], 40567);
+ assertChangeStreamNotSupportedOnConnection(clusterWithStandalones.shard0);
+ assertChangeStreamNotSupportedOnConnection(clusterWithStandalones.shard1);
+}());
diff --git a/src/mongo/db/pipeline/document_source_change_stream.cpp b/src/mongo/db/pipeline/document_source_change_stream.cpp
index cc9d0fed9c9..6b73b4b5084 100644
--- a/src/mongo/db/pipeline/document_source_change_stream.cpp
+++ b/src/mongo/db/pipeline/document_source_change_stream.cpp
@@ -228,7 +228,10 @@ list<intrusive_ptr<DocumentSource>> DocumentSourceChangeStream::createFromBson(
!expCtx->getCollator());
auto replCoord = repl::ReplicationCoordinator::get(expCtx->opCtx);
- uassert(40573, "The $changeStream stage is only supported on replica sets", replCoord);
+ uassert(40573,
+ "The $changeStream stage is only supported on replica sets",
+ replCoord &&
+ replCoord->getReplicationMode() == repl::ReplicationCoordinator::Mode::modeReplSet);
Timestamp startFrom = replCoord->getMyLastAppliedOpTime().getTimestamp();
intrusive_ptr<DocumentSource> resumeStage = nullptr;
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index 27afa51d5a5..57050b408b6 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -444,8 +444,12 @@ StatusWith<CursorResponse> ClusterFind::runGetMore(OperationContext* opCtx,
}
if (next.getValue().isEOF()) {
- // We reached end-of-stream.
- if (!pinnedCursor.getValue().isTailable()) {
+ // We reached end-of-stream. If the cursor is not tailable, then we mark it as
+ // exhausted. If it is tailable, usually we keep it open (i.e. "NotExhausted") even when
+ // we reach end-of-stream. However, if all the remote cursors are exhausted, there is no
+ // hope of returning data and thus we need to close the mongos cursor as well.
+ if (!pinnedCursor.getValue().isTailable() ||
+ pinnedCursor.getValue().remotesExhausted()) {
cursorState = ClusterCursorManager::CursorState::Exhausted;
}
break;