summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIan Boros <ian.boros@mongodb.com>2021-12-16 19:27:22 -0500
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-01-05 03:29:28 +0000
commite1a3d63d9753d6922b36335603f37eb6cbb89c3f (patch)
treea18f6cb8d46637f41a20c674cc1952393d3016c0
parentfab9d1e756563e5377bb8a746011f3c08fefeb5b (diff)
downloadmongo-e1a3d63d9753d6922b36335603f37eb6cbb89c3f.tar.gz
SERVER-61819 Fix SBE yielding bug around capped collections
Previously, if SBE was reading from a capped collection, it was possible to fall off the capped collection without returning a CappedPositionLost error.
-rw-r--r--jstests/noPassthrough/fall_off_capped_collection_yielding.js57
-rw-r--r--src/mongo/db/exec/sbe/stages/scan.cpp8
2 files changed, 63 insertions, 2 deletions
diff --git a/jstests/noPassthrough/fall_off_capped_collection_yielding.js b/jstests/noPassthrough/fall_off_capped_collection_yielding.js
new file mode 100644
index 00000000000..e1e9b5aeed6
--- /dev/null
+++ b/jstests/noPassthrough/fall_off_capped_collection_yielding.js
@@ -0,0 +1,57 @@
+/**
+ * Tests that falling off a capped collection across a yield results in the correct error.
+ */
+(function() {
+"use strict";
+
+load("jstests/libs/fail_point_util.js");
+
+const conn = MongoRunner.runMongod();
+const testDB = conn.getDB('test');
+
+const coll = testDB.fall_off_capped_collection_yielding;
+const kCollectionMaxSize = 20;
+coll.drop();
+assert.commandWorked(
+ testDB.createCollection(coll.getName(), {capped: true, size: 4096, max: kCollectionMaxSize}));
+
+// Insert 10 documents.
+const numDocs = 10;
+for (let i = 0; i < numDocs; ++i) {
+ assert.commandWorked(coll.insert({_id: i}));
+}
+
+assert.commandWorked(testDB.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 2}));
+
+const failPoint =
+ configureFailPoint(testDB, "setYieldAllLocksHang", {namespace: coll.getFullName()});
+let joinParallelShell = null;
+// We use this try/finally pattern to ensure that the fail point gets disabled even if the test
+// fails.
+try {
+ // In a separate shell, run the query.
+ joinParallelShell = startParallelShell(function() {
+ const err =
+ assert.throws(() => printjson(db.fall_off_capped_collection_yielding.find().toArray()));
+ assert.eq(err.code, ErrorCodes.CappedPositionLost);
+ }, conn.port);
+
+ failPoint.wait();
+
+ // Now do a bunch of inserts, rolling over the capped collection.
+ for (let i = 0; i < kCollectionMaxSize; ++i) {
+ assert.commandWorked(coll.insert({_id: 100 + i}));
+ }
+
+} finally {
+ // Unblock the thread doing the query by disabling the failpoint.
+ failPoint.off();
+}
+
+// Join with the parallel shell.
+if (joinParallelShell) {
+ joinParallelShell();
+}
+
+MongoRunner.stopMongod(conn);
+})();
diff --git a/src/mongo/db/exec/sbe/stages/scan.cpp b/src/mongo/db/exec/sbe/stages/scan.cpp
index 6be957e2185..03f8a9271ca 100644
--- a/src/mongo/db/exec/sbe/stages/scan.cpp
+++ b/src/mongo/db/exec/sbe/stages/scan.cpp
@@ -338,6 +338,12 @@ PlanState ScanStage::getNext() {
// case it yields as the state will be completely overwritten after the next() call.
disableSlotAccess();
+ // This call to checkForInterrupt() may result in a call to save() or restore() on the entire
+ // PlanStage tree if a yield occurs. It's important that we call checkForInterrupt() before
+ // checking '_needsToCheckCappedPositionLost' since a call to restoreState() may set
+ // '_needsToCheckCappedPositionLost'.
+ checkForInterrupt(_opCtx);
+
if (_needsToCheckCappedPositionLost) {
_cursor->save();
if (!_cursor->restore(false /* do not tolerate capped position lost */)) {
@@ -348,8 +354,6 @@ PlanState ScanStage::getNext() {
_needsToCheckCappedPositionLost = false;
}
- checkForInterrupt(_opCtx);
-
auto res = _firstGetNext && _seekKeyAccessor;
auto nextRecord = res ? _cursor->seekExact(_key) : _cursor->next();
_firstGetNext = false;