summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMihai Andrei <mihai.andrei@10gen.com>2021-07-20 17:18:53 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-07-20 17:31:45 +0000
commitaed22553124e994a54385f36c5d5ce1467b3cd92 (patch)
tree592fe690e5cc64f0aeb79a1d22d6285290d7e067
parent1afe067d8b026da8387d81b4d762fb3f067dd6f5 (diff)
downloadmongo-aed22553124e994a54385f36c5d5ce1467b3cd92.tar.gz
SERVER-57642 Configure $sample pushdown PlanExecutor to use 'INTERRUPT_ONLY' yield policy when running in a transaction
(cherry picked from commit d2448e3da8a121955d5cb2bdbb50c8f2c1e9f6ca) (cherry picked from commit 7984847de09877d062c66373ebf108f1b24de077)
-rw-r--r--jstests/noPassthrough/sample_pushdown_transaction.js52
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp8
2 files changed, 58 insertions, 2 deletions
diff --git a/jstests/noPassthrough/sample_pushdown_transaction.js b/jstests/noPassthrough/sample_pushdown_transaction.js
new file mode 100644
index 00000000000..16790447b01
--- /dev/null
+++ b/jstests/noPassthrough/sample_pushdown_transaction.js
@@ -0,0 +1,52 @@
+/**
+ * Verify that $sample push down works properly in a transaction. This test was designed to
+ * reproduce SERVER-57642.
+ *
+ * Requires WiredTiger for random cursor support.
+ * @tags: [requires_wiredtiger, requires_replication]
+ */
+(function() {
+'use strict';
+
+load('jstests/libs/analyze_plan.js'); // For planHasStage.
+
+// Set up.
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const collName = 'sample_pushdown';
+const dbName = 'test';
+const testDB = rst.getPrimary().getDB(dbName);
+const coll = testDB[collName];
+
+// In order to construct a plan that uses a storage engine random cursor, we not only need more
+// than 100 records in our collection, we also need the sample size to be less than 5% of the
+// number of documents in our collection.
+const numDocs = 1000;
+const sampleSize = numDocs * .03;
+let docs = [];
+for (let i = 0; i < numDocs; ++i) {
+ docs.push({a: i});
+}
+assert.commandWorked(coll.insert(docs));
+const pipeline = [{$sample: {size: sampleSize}}, {$match: {a: {$gte: 0}}}];
+
+// Verify that our pipeline uses $sample push down.
+const explain = coll.explain().aggregate(pipeline);
+assert(aggPlanHasStage(explain, "$sampleFromRandomCursor"), tojson(explain));
+
+// Start the transaction.
+const session = testDB.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+session.startTransaction();
+
+// Run the pipeline.
+const randDocs = sessionDB[collName].aggregate(pipeline).toArray();
+
+// Verify that we have at least one result.
+assert.gt(randDocs.length, 0, tojson(randDocs));
+
+// Clean up.
+assert.commandWorked(session.abortTransaction_forTesting());
+rst.stopSet();
+})();
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index 24732eef7b4..c980a234ee5 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -167,8 +167,12 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> createRandomCursorEx
minWorkAdvancedRatio);
}
- return PlanExecutor::make(
- opCtx, std::move(ws), std::move(root), coll, PlanExecutor::YIELD_AUTO);
+ return PlanExecutor::make(opCtx,
+ std::move(ws),
+ std::move(root),
+ coll,
+ opCtx->inMultiDocumentTransaction() ? PlanExecutor::INTERRUPT_ONLY
+ : PlanExecutor::YIELD_AUTO);
}
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> attemptToGetExecutor(