summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCheahuychou Mao <mao.cheahuychou@gmail.com>2022-06-07 16:05:54 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-06-07 17:31:57 +0000
commit5edd6ea16733548aacdc25c5eefb0673dd98ea09 (patch)
treedb7f4d7f2141808c3264ba9c13328e43bf733d24
parent9927679687b9946b19492d6b9479259d0fca3b19 (diff)
downloadmongo-5edd6ea16733548aacdc25c5eefb0673dd98ea09.tar.gz
SERVER-67029 Make aggregate command in internal_transactions* workloads robust against failover/shutdown or collection rename
(cherry picked from commit b6b89a30190ce3fb85111959c0773b730fd08aa3)
-rw-r--r--jstests/concurrency/fsm_workloads/internal_transactions_resharding.js6
-rw-r--r--jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js33
2 files changed, 33 insertions, 6 deletions
diff --git a/jstests/concurrency/fsm_workloads/internal_transactions_resharding.js b/jstests/concurrency/fsm_workloads/internal_transactions_resharding.js
index d3c8858c04b..31c6e3df3b8 100644
--- a/jstests/concurrency/fsm_workloads/internal_transactions_resharding.js
+++ b/jstests/concurrency/fsm_workloads/internal_transactions_resharding.js
@@ -43,6 +43,12 @@ var $config = extendWorkload($config, function($config, $super) {
return doc;
};
+ $config.data.isAcceptableAggregateCmdError = function isAcceptableAggregateCmdError(res) {
+ // The aggregate command is expected to involve running getMore commands which are not
+ // retryable after a collection rename (done by resharding).
+ return res && (res.code == ErrorCodes.QueryPlanKilled);
+ };
+
$config.data.isAcceptableRetryError = function isAcceptableRetryError(res) {
// This workload does in-place resharding so a retry that is sent
// reshardingMinimumOperationDurationMillis after resharding completes is expected to fail
diff --git a/jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js b/jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js
index 26167c31160..e4136b12137 100644
--- a/jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js
+++ b/jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js
@@ -46,9 +46,9 @@ var $config = extendWorkload($config, function($config, $super) {
// The number of documents assigned to a thread when the workload starts.
$config.data.partitionSize = 200;
- // The batch size for the find command to look up the documents assigned to a thread. Use a
- // large batch size so that a getMore command is never needed since getMore is not retryable and
- // so running it is not allowed in the suites with stepdown/kill/terminate.
+ // The batch size for the find command used for looking up the documents assigned to a thread.
+ // Use a large batch size so that a getMore command is never needed since getMore is not
+ // retryable after network errors.
$config.data.batchSizeForDocsLookUp = 1000;
// The counter values for the documents assigned to a thread. The map is populated during
// the init state and is updated after every write in the other states. Used to verify that
@@ -190,6 +190,17 @@ var $config = extendWorkload($config, function($config, $super) {
return {_id: doc._id, tid: this.tid};
};
+ /**
+ * Returns true if 'res' contains an acceptable error for the aggregate command used to look up
+ * a random document.
+ */
+ $config.data.isAcceptableAggregateCmdError = function isAcceptableAggregateCmdError(res) {
+ // The aggregate command is expected to involve running getMore commands which are not
+ // retryable after network errors.
+ return TestData.runningWithShardStepdowns && res &&
+ (res.code == ErrorCodes.QueryPlanKilled);
+ };
+
$config.data.getRandomDocument = function getRandomDocument(db, collName) {
const aggregateCmdObj = {
aggregate: collName,
@@ -210,7 +221,19 @@ var $config = extendWorkload($config, function($config, $super) {
while (numTries < numDocs) {
print("Finding a random document " +
tojsononeline({aggregateCmdObj, numTries, numDocs}));
- const aggRes = assert.commandWorked(db.runCommand(aggregateCmdObj));
+ let aggRes;
+ assert.soon(() => {
+ try {
+ aggRes = db.runCommand(aggregateCmdObj);
+ assert.commandWorked(aggRes);
+ return true;
+ } catch (e) {
+ if (this.isAcceptableAggregateCmdError(aggRes)) {
+ return false;
+ }
+ throw e;
+ }
+ });
const doc = aggRes.cursor.firstBatch[0];
print("Found a random document " +
tojsononeline({doc, isDirty: this.isDirtyDocument(doc)}));
@@ -607,8 +630,6 @@ var $config = extendWorkload($config, function($config, $super) {
// "snapshot").
fsm.forceRunningOutsideTransaction(this);
- // Run the find command with batch size equal to the number of documents + 1 to avoid
- // running getMore commands as getMore's are not retryable upon network errors.
const numDocsExpected = Object.keys(this.expectedCounters).length;
const findCmdObj = {
find: collName,