summaryrefslogtreecommitdiff
path: root/jstests/libs
diff options
context:
space:
mode:
authorclang-format-7.0.1 <adam.martin@10gen.com>2019-07-26 18:42:24 -0400
committerADAM David Alan Martin <adam.martin@10gen.com>2019-07-26 18:42:24 -0400
commitc1a45ebbb0530e3d0201321d725527f1eb83ffce (patch)
treef523079dc5ded3052eefbdcaae424b7502df5b25 /jstests/libs
parentc9599d8610c3da0b7c3da65667aff821063cf5b9 (diff)
downloadmongo-c1a45ebbb0530e3d0201321d725527f1eb83ffce.tar.gz
Apply formatting per `clang-format-7.0.1`
Diffstat (limited to 'jstests/libs')
-rw-r--r--jstests/libs/change_stream_util.js1
-rw-r--r--jstests/libs/check_log.js234
-rw-r--r--jstests/libs/csrs_upgrade_util.js11
-rw-r--r--jstests/libs/dateutil.js1
-rw-r--r--jstests/libs/feature_compatibility_version.js3
-rw-r--r--jstests/libs/fsm_serial_client.js6
-rw-r--r--jstests/libs/geo_near_random.js3
-rw-r--r--jstests/libs/get_index_helpers.js11
-rw-r--r--jstests/libs/json_schema_test_runner.js74
-rw-r--r--jstests/libs/jstestfuzz/check_for_interrupt_hook.js64
-rw-r--r--jstests/libs/kill_sessions.js257
-rw-r--r--jstests/libs/mongoebench.js1
-rw-r--r--jstests/libs/mql_model_mongod_test_runner.js80
-rw-r--r--jstests/libs/override_methods/causally_consistent_index_builds.js72
-rw-r--r--jstests/libs/override_methods/check_for_operation_not_supported_in_transaction.js50
-rw-r--r--jstests/libs/override_methods/check_uuids_consistent_across_cluster.js2
-rw-r--r--jstests/libs/override_methods/continuous_stepdown.js637
-rw-r--r--jstests/libs/override_methods/detect_spawning_own_mongod.js58
-rw-r--r--jstests/libs/override_methods/enable_causal_consistency.js14
-rw-r--r--jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js10
-rw-r--r--jstests/libs/override_methods/enable_sessions.js98
-rw-r--r--jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js75
-rw-r--r--jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js10
-rw-r--r--jstests/libs/override_methods/find_batch_size.js16
-rw-r--r--jstests/libs/override_methods/implicit_whole_cluster_changestreams.js8
-rw-r--r--jstests/libs/override_methods/implicit_whole_db_changestreams.js87
-rw-r--r--jstests/libs/override_methods/implicitly_retry_on_background_op_in_progress.js233
-rw-r--r--jstests/libs/override_methods/implicitly_retry_on_database_drop_pending.js321
-rw-r--r--jstests/libs/override_methods/implicitly_shard_accessed_collections.js315
-rw-r--r--jstests/libs/override_methods/implicitly_wrap_pipelines_in_facets.js107
-rw-r--r--jstests/libs/override_methods/mongos_manual_intervention_actions.js103
-rw-r--r--jstests/libs/override_methods/network_error_and_txn_override.js1904
-rw-r--r--jstests/libs/override_methods/retry_writes_at_least_once.js77
-rw-r--r--jstests/libs/override_methods/set_read_and_write_concerns.js242
-rw-r--r--jstests/libs/override_methods/set_read_preference_secondary.js287
-rw-r--r--jstests/libs/override_methods/sharding_continuous_config_stepdown.js57
-rw-r--r--jstests/libs/override_methods/txn_passthrough_cmd_massage.js106
-rw-r--r--jstests/libs/override_methods/validate_collections_on_shutdown.js190
-rw-r--r--jstests/libs/test_background_ops.js26
-rw-r--r--jstests/libs/transactions_util.js2
-rw-r--r--jstests/libs/txns/txn_passthrough_runner.js18
-rw-r--r--jstests/libs/txns/txn_passthrough_runner_selftest.js41
42 files changed, 2927 insertions, 2985 deletions
diff --git a/jstests/libs/change_stream_util.js b/jstests/libs/change_stream_util.js
index e1914ab3ce5..c505e47f39f 100644
--- a/jstests/libs/change_stream_util.js
+++ b/jstests/libs/change_stream_util.js
@@ -305,7 +305,6 @@ function ChangeStreamTest(_db, name = "ChangeStreamTest") {
}));
}
}
-
};
/**
diff --git a/jstests/libs/check_log.js b/jstests/libs/check_log.js
index 9940924e2dd..c21a885c2db 100644
--- a/jstests/libs/check_log.js
+++ b/jstests/libs/check_log.js
@@ -4,134 +4,134 @@
var checkLog;
(function() {
- "use strict";
+"use strict";
- if (checkLog) {
- return; // Protect against this file being double-loaded.
- }
+if (checkLog) {
+ return; // Protect against this file being double-loaded.
+}
- checkLog = (function() {
- let getGlobalLog = function(conn) {
- let cmdRes;
- try {
- cmdRes = conn.adminCommand({getLog: 'global'});
- } catch (e) {
- // Retry with network errors.
- print("checkLog ignoring failure: " + e);
- return null;
- }
+checkLog = (function() {
+ let getGlobalLog = function(conn) {
+ let cmdRes;
+ try {
+ cmdRes = conn.adminCommand({getLog: 'global'});
+ } catch (e) {
+ // Retry with network errors.
+ print("checkLog ignoring failure: " + e);
+ return null;
+ }
- return assert.commandWorked(cmdRes).log;
- };
+ return assert.commandWorked(cmdRes).log;
+ };
- /*
- * Calls the 'getLog' function on the provided connection 'conn' to see if the provided msg
- * is found in the logs. Note: this function does not throw an exception, so the return
- * value should not be ignored.
- */
- const checkContainsOnce = function(conn, msg) {
- const logMessages = getGlobalLog(conn);
- if (logMessages === null) {
- return false;
- }
- for (let logMsg of logMessages) {
- if (logMsg.includes(msg)) {
- return true;
- }
- }
+ /*
+ * Calls the 'getLog' function on the provided connection 'conn' to see if the provided msg
+ * is found in the logs. Note: this function does not throw an exception, so the return
+ * value should not be ignored.
+ */
+ const checkContainsOnce = function(conn, msg) {
+ const logMessages = getGlobalLog(conn);
+ if (logMessages === null) {
return false;
- };
+ }
+ for (let logMsg of logMessages) {
+ if (logMsg.includes(msg)) {
+ return true;
+ }
+ }
+ return false;
+ };
- /*
- * Calls the 'getLog' function at regular intervals on the provided connection 'conn' until
- * the provided 'msg' is found in the logs, or it times out. Throws an exception on timeout.
- */
- let contains = function(conn, msg, timeout = 5 * 60 * 1000) {
- assert.soon(function() {
- return checkContainsOnce(conn, msg);
- }, 'Could not find log entries containing the following message: ' + msg, timeout, 300);
- };
+ /*
+ * Calls the 'getLog' function at regular intervals on the provided connection 'conn' until
+ * the provided 'msg' is found in the logs, or it times out. Throws an exception on timeout.
+ */
+ let contains = function(conn, msg, timeout = 5 * 60 * 1000) {
+ assert.soon(function() {
+ return checkContainsOnce(conn, msg);
+ }, 'Could not find log entries containing the following message: ' + msg, timeout, 300);
+ };
- /*
- * Calls the 'getLog' function at regular intervals on the provided connection 'conn' until
- * the provided 'msg' is found in the logs 'expectedCount' times, or it times out.
- * Throws an exception on timeout. If 'exact' is true, checks whether the count is exactly
- * equal to 'expectedCount'. Otherwise, checks whether the count is at least equal to
- * 'expectedCount'. Early returns when at least 'expectedCount' entries are found.
- */
- let containsWithCount = function(
- conn, msg, expectedCount, timeout = 5 * 60 * 1000, exact = true) {
- let expectedStr = exact ? 'exactly ' : 'at least ';
- assert.soon(
- function() {
- let count = 0;
- let logMessages = getGlobalLog(conn);
- if (logMessages === null) {
- return false;
+ /*
+ * Calls the 'getLog' function at regular intervals on the provided connection 'conn' until
+ * the provided 'msg' is found in the logs 'expectedCount' times, or it times out.
+ * Throws an exception on timeout. If 'exact' is true, checks whether the count is exactly
+ * equal to 'expectedCount'. Otherwise, checks whether the count is at least equal to
+ * 'expectedCount'. Early returns when at least 'expectedCount' entries are found.
+ */
+ let containsWithCount = function(
+ conn, msg, expectedCount, timeout = 5 * 60 * 1000, exact = true) {
+ let expectedStr = exact ? 'exactly ' : 'at least ';
+ assert.soon(
+ function() {
+ let count = 0;
+ let logMessages = getGlobalLog(conn);
+ if (logMessages === null) {
+ return false;
+ }
+ for (let i = 0; i < logMessages.length; i++) {
+ if (logMessages[i].indexOf(msg) != -1) {
+ count++;
}
- for (let i = 0; i < logMessages.length; i++) {
- if (logMessages[i].indexOf(msg) != -1) {
- count++;
- }
- if (!exact && count >= expectedCount) {
- print("checkLog found at least " + expectedCount +
- " log entries containing the following message: " + msg);
- return true;
- }
+ if (!exact && count >= expectedCount) {
+ print("checkLog found at least " + expectedCount +
+ " log entries containing the following message: " + msg);
+ return true;
}
+ }
- return exact ? expectedCount === count : expectedCount <= count;
- },
- 'Did not find ' + expectedStr + expectedCount + ' log entries containing the ' +
- 'following message: ' + msg,
- timeout,
- 300);
- };
+ return exact ? expectedCount === count : expectedCount <= count;
+ },
+ 'Did not find ' + expectedStr + expectedCount + ' log entries containing the ' +
+ 'following message: ' + msg,
+ timeout,
+ 300);
+ };
- /*
- * Similar to containsWithCount, but checks whether there are at least 'expectedCount'
- * instances of 'msg' in the logs.
- */
- let containsWithAtLeastCount = function(conn, msg, expectedCount, timeout = 5 * 60 * 1000) {
- containsWithCount(conn, msg, expectedCount, timeout, /*exact*/ false);
- };
+ /*
+ * Similar to containsWithCount, but checks whether there are at least 'expectedCount'
+ * instances of 'msg' in the logs.
+ */
+ let containsWithAtLeastCount = function(conn, msg, expectedCount, timeout = 5 * 60 * 1000) {
+ containsWithCount(conn, msg, expectedCount, timeout, /*exact*/ false);
+ };
- /*
- * Converts a scalar or object to a string format suitable for matching against log output.
- * Field names are not quoted, and by default strings which are not within an enclosing
- * object are not escaped. Similarly, integer values without an enclosing object are
- * serialized as integers, while those within an object are serialized as floats to one
- * decimal point. NumberLongs are unwrapped prior to serialization.
- */
- const formatAsLogLine = function(value, escapeStrings, toDecimal) {
- if (typeof value === "string") {
- return (escapeStrings ? `"${value}"` : value);
- } else if (typeof value === "number") {
- return (Number.isInteger(value) && toDecimal ? value.toFixed(1) : value);
- } else if (value instanceof NumberLong) {
- return `${value}`.match(/NumberLong..(.*)../m)[1];
- } else if (typeof value !== "object") {
- return value;
- } else if (Object.keys(value).length === 0) {
- return Array.isArray(value) ? "[]" : "{}";
- }
- let serialized = [];
- escapeStrings = toDecimal = true;
- for (let fieldName in value) {
- const valueStr = formatAsLogLine(value[fieldName], escapeStrings, toDecimal);
- serialized.push(Array.isArray(value) ? valueStr : `${fieldName}: ${valueStr}`);
- }
- return (Array.isArray(value) ? `[ ${serialized.join(', ')} ]`
- : `{ ${serialized.join(', ')} }`);
- };
+ /*
+ * Converts a scalar or object to a string format suitable for matching against log output.
+ * Field names are not quoted, and by default strings which are not within an enclosing
+ * object are not escaped. Similarly, integer values without an enclosing object are
+ * serialized as integers, while those within an object are serialized as floats to one
+ * decimal point. NumberLongs are unwrapped prior to serialization.
+ */
+ const formatAsLogLine = function(value, escapeStrings, toDecimal) {
+ if (typeof value === "string") {
+ return (escapeStrings ? `"${value}"` : value);
+ } else if (typeof value === "number") {
+ return (Number.isInteger(value) && toDecimal ? value.toFixed(1) : value);
+ } else if (value instanceof NumberLong) {
+ return `${value}`.match(/NumberLong..(.*)../m)[1];
+ } else if (typeof value !== "object") {
+ return value;
+ } else if (Object.keys(value).length === 0) {
+ return Array.isArray(value) ? "[]" : "{}";
+ }
+ let serialized = [];
+ escapeStrings = toDecimal = true;
+ for (let fieldName in value) {
+ const valueStr = formatAsLogLine(value[fieldName], escapeStrings, toDecimal);
+ serialized.push(Array.isArray(value) ? valueStr : `${fieldName}: ${valueStr}`);
+ }
+ return (Array.isArray(value) ? `[ ${serialized.join(', ')} ]`
+ : `{ ${serialized.join(', ')} }`);
+ };
- return {
- getGlobalLog: getGlobalLog,
- checkContainsOnce: checkContainsOnce,
- contains: contains,
- containsWithCount: containsWithCount,
- containsWithAtLeastCount: containsWithAtLeastCount,
- formatAsLogLine: formatAsLogLine
- };
- })();
+ return {
+ getGlobalLog: getGlobalLog,
+ checkContainsOnce: checkContainsOnce,
+ contains: contains,
+ containsWithCount: containsWithCount,
+ containsWithAtLeastCount: containsWithAtLeastCount,
+ formatAsLogLine: formatAsLogLine
+ };
+})();
})();
diff --git a/jstests/libs/csrs_upgrade_util.js b/jstests/libs/csrs_upgrade_util.js
index 8a4b3582f9f..9d4d158eca2 100644
--- a/jstests/libs/csrs_upgrade_util.js
+++ b/jstests/libs/csrs_upgrade_util.js
@@ -1,9 +1,9 @@
/**
-* This file defines a class, CSRSUpgradeCoordinator, which contains logic for spinning up a
-* sharded cluster using SCCC config servers and for upgrading that cluster to CSRS.
-* Include this file and use the CSRSUpgradeCoordinator class in any targetted jstests of csrs
-* upgrade behavior.
-*/
+ * This file defines a class, CSRSUpgradeCoordinator, which contains logic for spinning up a
+ * sharded cluster using SCCC config servers and for upgrading that cluster to CSRS.
+ * Include this file and use the CSRSUpgradeCoordinator class in any targetted jstests of csrs
+ * upgrade behavior.
+ */
load("jstests/replsets/rslib.js");
@@ -204,5 +204,4 @@ var CSRSUpgradeCoordinator = function() {
jsTest.log("Shutting down final SCCC config server now that upgrade is complete");
MongoRunner.stopMongod(st.c1);
};
-
};
diff --git a/jstests/libs/dateutil.js b/jstests/libs/dateutil.js
index 485e07020ee..99e535eaa25 100644
--- a/jstests/libs/dateutil.js
+++ b/jstests/libs/dateutil.js
@@ -4,7 +4,6 @@
* Helpers for generating test dates for aggregations
*/
var DateUtil = (function() {
-
/**
* local function to add leading 0 to month or day if needed.
*/
diff --git a/jstests/libs/feature_compatibility_version.js b/jstests/libs/feature_compatibility_version.js
index b9bb718fe73..869dce125b7 100644
--- a/jstests/libs/feature_compatibility_version.js
+++ b/jstests/libs/feature_compatibility_version.js
@@ -46,8 +46,7 @@ function removeFCVDocument(adminDB) {
let dropOriginalAdminSystemVersionCollection =
{op: "c", ns: "admin.$cmd", ui: originalUUID, o: {drop: "admin.tmp_system_version"}};
assert.commandWorked(adminDB.runCommand({
- applyOps:
- [createNewAdminSystemVersionCollection, dropOriginalAdminSystemVersionCollection]
+ applyOps: [createNewAdminSystemVersionCollection, dropOriginalAdminSystemVersionCollection]
}));
res = adminDB.runCommand({listCollections: 1, filter: {name: "system.version"}});
diff --git a/jstests/libs/fsm_serial_client.js b/jstests/libs/fsm_serial_client.js
index 8279524d8e9..8c6c6fcb690 100644
--- a/jstests/libs/fsm_serial_client.js
+++ b/jstests/libs/fsm_serial_client.js
@@ -21,7 +21,7 @@ runWorkloadsSerially(workloadList.filter(function(file) {
{},
{dbNamePrefix: dbNamePrefix},
{
- keepExistingDatabases: true,
- dropDatabaseBlacklist: fsmDbBlacklist,
- validateCollections: validateCollectionsOnCleanup
+ keepExistingDatabases: true,
+ dropDatabaseBlacklist: fsmDbBlacklist,
+ validateCollections: validateCollectionsOnCleanup
});
diff --git a/jstests/libs/geo_near_random.js b/jstests/libs/geo_near_random.js
index d5de7aa70a7..fd1fe36e799 100644
--- a/jstests/libs/geo_near_random.js
+++ b/jstests/libs/geo_near_random.js
@@ -24,7 +24,6 @@ GeoNearRandomTest.prototype.mkPt = function mkPt(scale, indexBounds) {
(Random.rand() * (range - eps) + eps) + indexBounds.min
];
}
-
};
GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds, scale) {
@@ -74,7 +73,7 @@ GeoNearRandomTest.prototype.testPt = function(pt, opts) {
let last = runQuery(1);
for (var i = 2; i <= opts.nToTest; i++) {
let ret = runQuery(i);
- this.assertIsPrefix(last, ret, `Unexpected result when comparing ${i-1} and ${i}`);
+ this.assertIsPrefix(last, ret, `Unexpected result when comparing ${i - 1} and ${i}`);
// Make sure distances are in increasing order.
assert.gte(ret[ret.length - 1].dis, last[last.length - 1].dis);
diff --git a/jstests/libs/get_index_helpers.js b/jstests/libs/get_index_helpers.js
index 77468ab17cb..15a18fa7409 100644
--- a/jstests/libs/get_index_helpers.js
+++ b/jstests/libs/get_index_helpers.js
@@ -4,7 +4,6 @@
* Helpers for filtering the index specifications returned by DBCollection.prototype.getIndexes().
*/
var GetIndexHelpers = (function() {
-
/**
* Returns the index specification with the name 'indexName' if it is present in the
* 'indexSpecs' array, and returns null otherwise.
@@ -17,8 +16,8 @@ var GetIndexHelpers = (function() {
const found = indexSpecs.filter(spec => spec.name === indexName);
if (found.length > 1) {
- throw new Error("Found multiple indexes with name '" + indexName + "': " +
- tojson(indexSpecs));
+ throw new Error("Found multiple indexes with name '" + indexName +
+ "': " + tojson(indexSpecs));
}
return (found.length === 1) ? found[0] : null;
}
@@ -38,9 +37,9 @@ var GetIndexHelpers = (function() {
if (!collationWasSpecified) {
if (foundByKeyPattern.length > 1) {
- throw new Error("Found multiple indexes with key pattern " + tojson(keyPattern) +
- " and 'collation' parameter was not specified: " +
- tojson(indexSpecs));
+ throw new Error(
+ "Found multiple indexes with key pattern " + tojson(keyPattern) +
+ " and 'collation' parameter was not specified: " + tojson(indexSpecs));
}
return (foundByKeyPattern.length === 1) ? foundByKeyPattern[0] : null;
}
diff --git a/jstests/libs/json_schema_test_runner.js b/jstests/libs/json_schema_test_runner.js
index 8955f188c2a..b82cc9365ea 100644
--- a/jstests/libs/json_schema_test_runner.js
+++ b/jstests/libs/json_schema_test_runner.js
@@ -2,49 +2,47 @@
* Test runner responsible for parsing and executing a JSON-Schema-Test-Suite json file.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/assert_schema_match.js");
+load("jstests/libs/assert_schema_match.js");
- const coll = db.json_schema_test_corpus;
- coll.drop();
+const coll = db.json_schema_test_corpus;
+coll.drop();
- const jsonFilename = jsTestOptions().jsonSchemaTestFile;
+const jsonFilename = jsTestOptions().jsonSchemaTestFile;
- if (jsonFilename === undefined) {
- throw new Error('JSON Schema tests must be run through resmoke.py');
- }
+if (jsonFilename === undefined) {
+ throw new Error('JSON Schema tests must be run through resmoke.py');
+}
+
+function runSchemaTest(test, schema, banFromTopLevel) {
+ assert(test.hasOwnProperty("data"), "JSON Schema test requires 'data'");
+ assert(test.hasOwnProperty("valid"), "JSON Schema test requires 'valid'");
+ const data = test["data"];
+ const valid = test["valid"];
- function runSchemaTest(test, schema, banFromTopLevel) {
- assert(test.hasOwnProperty("data"), "JSON Schema test requires 'data'");
- assert(test.hasOwnProperty("valid"), "JSON Schema test requires 'valid'");
- const data = test["data"];
- const valid = test["valid"];
-
- try {
- assertSchemaMatch(coll,
- {properties: {schema_test_wrapper: schema}},
- {schema_test_wrapper: data},
- valid);
-
- // Run against a top-level schema if the data is an object, since MongoDB only stores
- // records as documents.
- // (Note: JS notion of an 'object' includes arrays and null.)
- if (typeof data === "object" && !Array.isArray(data) && data !== null &&
- banFromTopLevel !== true) {
- assertSchemaMatch(coll, schema, data, valid);
- }
- } catch (e) {
- throw new Error(tojson(e) + "\n\nJSON Schema test failed for schema " + tojson(schema) +
- " and data " + tojson(data));
+ try {
+ assertSchemaMatch(
+ coll, {properties: {schema_test_wrapper: schema}}, {schema_test_wrapper: data}, valid);
+
+ // Run against a top-level schema if the data is an object, since MongoDB only stores
+ // records as documents.
+ // (Note: JS notion of an 'object' includes arrays and null.)
+ if (typeof data === "object" && !Array.isArray(data) && data !== null &&
+ banFromTopLevel !== true) {
+ assertSchemaMatch(coll, schema, data, valid);
}
+ } catch (e) {
+ throw new Error(tojson(e) + "\n\nJSON Schema test failed for schema " + tojson(schema) +
+ " and data " + tojson(data));
}
-
- const testGroupList = JSON.parse(cat(jsonFilename));
- testGroupList.forEach(function(testGroup) {
- assert(testGroup.hasOwnProperty("schema"), "JSON Schema test requires a 'schema'");
- assert(testGroup.hasOwnProperty("tests"), "JSON Schema test requires a 'tests' list");
- testGroup["tests"].forEach(
- test => runSchemaTest(test, testGroup["schema"], testGroup["banFromTopLevel"]));
- });
+}
+
+const testGroupList = JSON.parse(cat(jsonFilename));
+testGroupList.forEach(function(testGroup) {
+ assert(testGroup.hasOwnProperty("schema"), "JSON Schema test requires a 'schema'");
+ assert(testGroup.hasOwnProperty("tests"), "JSON Schema test requires a 'tests' list");
+ testGroup["tests"].forEach(
+ test => runSchemaTest(test, testGroup["schema"], testGroup["banFromTopLevel"]));
+});
}());
diff --git a/jstests/libs/jstestfuzz/check_for_interrupt_hook.js b/jstests/libs/jstestfuzz/check_for_interrupt_hook.js
index 8612824a637..0a2a3add0d0 100644
--- a/jstests/libs/jstestfuzz/check_for_interrupt_hook.js
+++ b/jstests/libs/jstestfuzz/check_for_interrupt_hook.js
@@ -2,46 +2,46 @@
// the failpoint for the duration of the serverInfo section of the fuzzer's preamble.
(function() {
- 'use strict';
+'use strict';
- load('jstests/libs/jstestfuzz/hook_utils.js');
+load('jstests/libs/jstestfuzz/hook_utils.js');
- let threadName;
+let threadName;
- const disableCheckForInterruptFailFP = function() {
- // There is no synchronization between fuzzer clients so this hook cannot run with the
- // concurrent fuzzer.
- assert.eq(TestData.numTestClients,
- 1,
- 'Cannot run the check for interrupt hook when there is more than 1 client');
+const disableCheckForInterruptFailFP = function() {
+ // There is no synchronization between fuzzer clients so this hook cannot run with the
+ // concurrent fuzzer.
+ assert.eq(TestData.numTestClients,
+ 1,
+ 'Cannot run the check for interrupt hook when there is more than 1 client');
- const myUriRes = assert.commandWorked(db.runCommand({whatsmyuri: 1}));
- const myUri = myUriRes.you;
+ const myUriRes = assert.commandWorked(db.runCommand({whatsmyuri: 1}));
+ const myUri = myUriRes.you;
- const curOpRes = assert.commandWorked(db.adminCommand({currentOp: 1, client: myUri}));
- threadName = curOpRes.inprog[0].desc;
+ const curOpRes = assert.commandWorked(db.adminCommand({currentOp: 1, client: myUri}));
+ threadName = curOpRes.inprog[0].desc;
- assert.commandWorked(db.adminCommand({
- configureFailPoint: 'checkForInterruptFail',
- mode: 'off',
- }));
- };
+ assert.commandWorked(db.adminCommand({
+ configureFailPoint: 'checkForInterruptFail',
+ mode: 'off',
+ }));
+};
- const enableCheckForInterruptFailFP = function() {
- const chance = TestData.checkForInterruptFailpointChance;
+const enableCheckForInterruptFailFP = function() {
+ const chance = TestData.checkForInterruptFailpointChance;
- assert.gte(chance, 0, "checkForInterruptFailpointChance must be >= 0");
- assert.lte(chance, 1, "checkForInterruptFailpointChance must be <= 1");
+ assert.gte(chance, 0, "checkForInterruptFailpointChance must be >= 0");
+ assert.lte(chance, 1, "checkForInterruptFailpointChance must be <= 1");
- assert.commandWorked(db.adminCommand({
- configureFailPoint: 'checkForInterruptFail',
- mode: 'alwaysOn',
- data: {threadName, chance},
- }));
- };
+ assert.commandWorked(db.adminCommand({
+ configureFailPoint: 'checkForInterruptFail',
+ mode: 'alwaysOn',
+ data: {threadName, chance},
+ }));
+};
- defineFuzzerHooks({
- beforeServerInfo: disableCheckForInterruptFailFP,
- afterServerInfo: enableCheckForInterruptFailFP,
- });
+defineFuzzerHooks({
+ beforeServerInfo: disableCheckForInterruptFailFP,
+ afterServerInfo: enableCheckForInterruptFailFP,
+});
})();
diff --git a/jstests/libs/kill_sessions.js b/jstests/libs/kill_sessions.js
index 6ee642eddbc..ae9978c271e 100644
--- a/jstests/libs/kill_sessions.js
+++ b/jstests/libs/kill_sessions.js
@@ -403,41 +403,40 @@ var _kill_sessions_api_module = (function() {
});
[[
- // Verifies that we can killSessions by lsid
- "killSessions",
- function(x) {
- if (!x.uid) {
- return {
- id: x.id,
- uid: computeSHA256Block(""),
- };
- } else {
- return x;
- }
- }
+ // Verifies that we can killSessions by lsid
+ "killSessions",
+ function(x) {
+ if (!x.uid) {
+ return {
+ id: x.id,
+ uid: computeSHA256Block(""),
+ };
+ } else {
+ return x;
+ }
+ }
],
[
- // Verifies that we can kill by pattern by lsid
- "killAllSessionsByPattern",
- function(x) {
- if (!x.uid) {
- return {
- lsid: {
- id: x.id,
- uid: computeSHA256Block(""),
- }
- };
- } else {
- return {lsid: x};
- }
- }
+ // Verifies that we can kill by pattern by lsid
+ "killAllSessionsByPattern",
+ function(x) {
+ if (!x.uid) {
+ return {
+ lsid: {
+ id: x.id,
+ uid: computeSHA256Block(""),
+ }
+ };
+ } else {
+ return {lsid: x};
+ }
+ }
]].forEach(function(cmd) {
noAuth = noAuth.concat(makeNoAuthArgKill.apply({}, cmd));
});
KillSessionsTestHelper.runNoAuth = function(
clientToExecuteVia, clientToKillVia, clientsToVerifyVia) {
-
var fixture = new Fixture(clientToExecuteVia, clientToKillVia, clientsToVerifyVia);
for (var i = 0; i < noAuth.length; ++i) {
@@ -564,102 +563,102 @@ var _kill_sessions_api_module = (function() {
// Tests for makeAuthNoArgKill
[[
- // We can kill our own sessions
- "killSessions",
- "simple",
- "simple",
+ // We can kill our own sessions
+ "killSessions",
+ "simple",
+ "simple",
],
[
- // We can kill all sessions
- "killAllSessions",
- "simple",
- "killAny",
+ // We can kill all sessions
+ "killAllSessions",
+ "simple",
+ "killAny",
],
[
- // We can kill all sessions by pattern
- "killAllSessionsByPattern",
- "simple",
- "killAny",
+ // We can kill all sessions by pattern
+ "killAllSessionsByPattern",
+ "simple",
+ "killAny",
]].forEach(function(cmd) {
auth = auth.concat(makeAuthNoArgKill.apply({}, cmd));
});
// Tests for makeAuthArgKill
[[
- // We can kill our own sessions by id (spoofing our own id)
- "killSessions",
- "simple",
- "simple",
- "killAny",
- function() {
- return function(x) {
- if (!x.uid) {
- return {
- id: x.id,
- uid: computeSHA256Block("simple@admin"),
- };
- } else {
- return x;
- }
- };
- }
+ // We can kill our own sessions by id (spoofing our own id)
+ "killSessions",
+ "simple",
+ "simple",
+ "killAny",
+ function() {
+ return function(x) {
+ if (!x.uid) {
+ return {
+ id: x.id,
+ uid: computeSHA256Block("simple@admin"),
+ };
+ } else {
+ return x;
+ }
+ };
+ }
],
[
- // We can kill our own sessions without spoofing
- "killSessions",
- "simple",
- "simple",
- "simple",
- function() {
- return function(x) {
- return x;
- };
- }
+ // We can kill our own sessions without spoofing
+ "killSessions",
+ "simple",
+ "simple",
+ "simple",
+ function() {
+ return function(x) {
+ return x;
+ };
+ }
],
[
- // We can kill by pattern by id
- "killAllSessionsByPattern",
- "simple",
- "simple",
- "killAny",
- function() {
- return function(x) {
- if (!x.uid) {
- return {
- lsid: {
- id: x.id,
- uid: computeSHA256Block("simple@admin"),
- }
- };
- } else {
- return {lsid: x};
- }
- };
- }
+ // We can kill by pattern by id
+ "killAllSessionsByPattern",
+ "simple",
+ "simple",
+ "killAny",
+ function() {
+ return function(x) {
+ if (!x.uid) {
+ return {
+ lsid: {
+ id: x.id,
+ uid: computeSHA256Block("simple@admin"),
+ }
+ };
+ } else {
+ return {lsid: x};
+ }
+ };
+ }
],
[
- // We can kill any by user
- "killAllSessions",
- "simple",
- "simple2",
- "killAny",
- function(user) {
- return function(x) {
- return {db: "admin", user: user};
- };
- }
+ // We can kill any by user
+ "killAllSessions",
+ "simple",
+ "simple2",
+ "killAny",
+ function(user) {
+ return function(x) {
+ return {db: "admin", user: user};
+ };
+ }
],
[
- // We can kill any by pattern by user
- "killAllSessionsByPattern",
- "simple",
- "simple2",
- "killAny",
- function(user) {
- return function(x) {
- return {uid: computeSHA256Block(user + "@admin")};
- };
- }
+ // We can kill any by pattern by user
+ "killAllSessionsByPattern",
+ "simple",
+ "simple2",
+ "killAny",
+ function(user) {
+ return function(x) {
+ return {uid: computeSHA256Block(user + "@admin")};
+ };
+ }
]].forEach(function(cmd) {
auth = auth.concat(makeAuthArgKill.apply({}, cmd));
});
@@ -683,32 +682,32 @@ var _kill_sessions_api_module = (function() {
// Tests for makeAuthArgKillFailure
[[
- // We can't kill another users sessions
- "killSessions",
- "simple",
- "simple2",
- function(user) {
- return function(x) {
- return {
- id: x.id,
- uid: computeSHA256Block(user + "@admin"),
- };
- };
- },
+ // We can't kill another users sessions
+ "killSessions",
+ "simple",
+ "simple2",
+ function(user) {
+ return function(x) {
+ return {
+ id: x.id,
+ uid: computeSHA256Block(user + "@admin"),
+ };
+ };
+ },
],
[
- // We can't impersonate without impersonate
- "killAllSessionsByPattern",
- "simple",
- "killAny",
- function(user) {
- return function(x) {
- return {
- users: {},
- roles: {},
- };
- };
- },
+ // We can't impersonate without impersonate
+ "killAllSessionsByPattern",
+ "simple",
+ "killAny",
+ function(user) {
+ return function(x) {
+ return {
+ users: {},
+ roles: {},
+ };
+ };
+ },
]].forEach(function(cmd) {
auth = auth.concat(makeAuthArgKillFailure.apply({}, cmd));
});
diff --git a/jstests/libs/mongoebench.js b/jstests/libs/mongoebench.js
index f6feb4eb9f0..a0d6f1b512d 100644
--- a/jstests/libs/mongoebench.js
+++ b/jstests/libs/mongoebench.js
@@ -1,7 +1,6 @@
"use strict";
var {runMongoeBench} = (function() {
-
/**
* Spawns a mongoebench process with the specified options.
*
diff --git a/jstests/libs/mql_model_mongod_test_runner.js b/jstests/libs/mql_model_mongod_test_runner.js
index f19e2ce1f12..4485c81cdc1 100644
--- a/jstests/libs/mql_model_mongod_test_runner.js
+++ b/jstests/libs/mql_model_mongod_test_runner.js
@@ -2,56 +2,56 @@
* Test runner responsible for parsing and executing a MQL MongoD model test json file.
*/
(function() {
- "use strict";
+"use strict";
- const jsonFilename = jsTestOptions().mqlTestFile;
- const mqlRootPath = jsTestOptions().mqlRootPath;
+const jsonFilename = jsTestOptions().mqlTestFile;
+const mqlRootPath = jsTestOptions().mqlRootPath;
- if (jsonFilename === undefined) {
- throw new Error('Undefined JSON file name: MQL Model tests must be run through resmoke.py');
- }
+if (jsonFilename === undefined) {
+ throw new Error('Undefined JSON file name: MQL Model tests must be run through resmoke.py');
+}
- // Populate collections with data fetched from the dataFile.
- function populateCollections(dataFile) {
- const data = JSON.parse(cat(mqlRootPath + dataFile));
+// Populate collections with data fetched from the dataFile.
+function populateCollections(dataFile) {
+ const data = JSON.parse(cat(mqlRootPath + dataFile));
- data.forEach(function(singleColl) {
- assert(singleColl.hasOwnProperty("namespace"), "MQL data model requires a 'namespace'");
- assert(singleColl.hasOwnProperty("data"), "MQL data model requires a 'data'");
+ data.forEach(function(singleColl) {
+ assert(singleColl.hasOwnProperty("namespace"), "MQL data model requires a 'namespace'");
+ assert(singleColl.hasOwnProperty("data"), "MQL data model requires a 'data'");
- const coll = db.getCollection(singleColl["namespace"]);
- coll.drop();
+ const coll = db.getCollection(singleColl["namespace"]);
+ coll.drop();
- singleColl["data"].forEach(function(doc) {
- assert.commandWorked(coll.insert(doc));
- });
+ singleColl["data"].forEach(function(doc) {
+ assert.commandWorked(coll.insert(doc));
});
- }
+ });
+}
- // Run a single find test.
- function runFindTest(testFile, dataFile, expected) {
- populateCollections(dataFile);
+// Run a single find test.
+function runFindTest(testFile, dataFile, expected) {
+ populateCollections(dataFile);
- const test = JSON.parse(cat(mqlRootPath + testFile));
+ const test = JSON.parse(cat(mqlRootPath + testFile));
- const results = db.getCollection(test["find"]).find(test["filter"], {_id: 0}).toArray();
+ const results = db.getCollection(test["find"]).find(test["filter"], {_id: 0}).toArray();
- assert.eq(results, expected);
- }
+ assert.eq(results, expected);
+}
- // Read a list of tests from the jsonFilename and execute them.
- const testList = JSON.parse(cat(jsonFilename));
- testList.forEach(function(singleTest) {
- if (singleTest.hasOwnProperty("match")) {
- // Skip the match test type as it is not directly supported by mongod.
- } else if (singleTest.hasOwnProperty("find")) {
- // Run the find test type.
- assert(singleTest.hasOwnProperty("data"), "MQL model test requires a 'data'");
- assert(singleTest.hasOwnProperty("expected"), "MQL model test requires a 'expected'");
-
- runFindTest(singleTest["find"], singleTest["data"], singleTest["expected"]);
- } else {
- throw new Error("Unknown test type: " + tojson(singleTest));
- }
- });
+// Read a list of tests from the jsonFilename and execute them.
+const testList = JSON.parse(cat(jsonFilename));
+testList.forEach(function(singleTest) {
+ if (singleTest.hasOwnProperty("match")) {
+ // Skip the match test type as it is not directly supported by mongod.
+ } else if (singleTest.hasOwnProperty("find")) {
+ // Run the find test type.
+ assert(singleTest.hasOwnProperty("data"), "MQL model test requires a 'data'");
+ assert(singleTest.hasOwnProperty("expected"), "MQL model test requires a 'expected'");
+
+ runFindTest(singleTest["find"], singleTest["data"], singleTest["expected"]);
+ } else {
+ throw new Error("Unknown test type: " + tojson(singleTest));
+ }
+});
}());
diff --git a/jstests/libs/override_methods/causally_consistent_index_builds.js b/jstests/libs/override_methods/causally_consistent_index_builds.js
index ec20a87f588..cacd1312f80 100644
--- a/jstests/libs/override_methods/causally_consistent_index_builds.js
+++ b/jstests/libs/override_methods/causally_consistent_index_builds.js
@@ -3,48 +3,48 @@
* TODO: SERVER-38961 This override is not necessary when two-phase index builds are complete.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/override_methods/override_helpers.js");
+load("jstests/libs/override_methods/override_helpers.js");
- // This override runs a collMod after a createIndexes command. After collMod completes
- // we can guarantee the background index build started earlier has also completed. We update the
- // command response operationTime and $clusterTime so causally consistent reads only read from
- // that point onwards.
- function runCommandWithCollMod(conn, dbName, commandName, commandObj, func, makeFuncArgs) {
- if (typeof commandObj !== "object" || commandObj === null) {
- return func.apply(conn, makeFuncArgs(commandObj));
- }
+// This override runs a collMod after a createIndexes command. After collMod completes
+// we can guarantee the background index build started earlier has also completed. We update the
+// command response operationTime and $clusterTime so causally consistent reads only read from
+// that point onwards.
+function runCommandWithCollMod(conn, dbName, commandName, commandObj, func, makeFuncArgs) {
+ if (typeof commandObj !== "object" || commandObj === null) {
+ return func.apply(conn, makeFuncArgs(commandObj));
+ }
- let res = func.apply(conn, makeFuncArgs(commandObj));
- if (commandName !== "createIndexes") {
- return res;
- }
- if (!res.ok) {
- return res;
- }
+ let res = func.apply(conn, makeFuncArgs(commandObj));
+ if (commandName !== "createIndexes") {
+ return res;
+ }
+ if (!res.ok) {
+ return res;
+ }
- let collModCmd = {collMod: commandObj[commandName]};
- let collModRes = func.apply(conn, makeFuncArgs(collModCmd));
+ let collModCmd = {collMod: commandObj[commandName]};
+ let collModRes = func.apply(conn, makeFuncArgs(collModCmd));
- // If a follow-up collMod fails, another command was likely able to execute after the
- // createIndexes command. That means it is safe to use the latest operationTime for
- // causal consistency purposes.
- if (!collModRes.ok) {
- print('note: ignoring collMod failure after sending createIndex command: ' +
- tojson(collModRes));
- }
+ // If a follow-up collMod fails, another command was likely able to execute after the
+ // createIndexes command. That means it is safe to use the latest operationTime for
+ // causal consistency purposes.
+ if (!collModRes.ok) {
+ print('note: ignoring collMod failure after sending createIndex command: ' +
+ tojson(collModRes));
+ }
- // Overwrite the createIndex command's operation and cluster times, so that the owning
- // session can perform causal reads.
- if (collModRes.hasOwnProperty("operationTime")) {
- res.operationTime = collModRes["operationTime"];
- }
- if (collModRes.hasOwnProperty("$clusterTime")) {
- res.$clusterTime = collModRes["$clusterTime"];
- }
- return res;
+ // Overwrite the createIndex command's operation and cluster times, so that the owning
+ // session can perform causal reads.
+ if (collModRes.hasOwnProperty("operationTime")) {
+ res.operationTime = collModRes["operationTime"];
+ }
+ if (collModRes.hasOwnProperty("$clusterTime")) {
+ res.$clusterTime = collModRes["$clusterTime"];
}
+ return res;
+}
- OverrideHelpers.overrideRunCommand(runCommandWithCollMod);
+OverrideHelpers.overrideRunCommand(runCommandWithCollMod);
})();
diff --git a/jstests/libs/override_methods/check_for_operation_not_supported_in_transaction.js b/jstests/libs/override_methods/check_for_operation_not_supported_in_transaction.js
index 355e3f53a55..57ab445896d 100644
--- a/jstests/libs/override_methods/check_for_operation_not_supported_in_transaction.js
+++ b/jstests/libs/override_methods/check_for_operation_not_supported_in_transaction.js
@@ -6,38 +6,36 @@
* InvalidOptions or TransientTransactionError.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/error_code_utils.js");
- load("jstests/libs/override_methods/override_helpers.js");
+load("jstests/libs/error_code_utils.js");
+load("jstests/libs/override_methods/override_helpers.js");
- function runCommandCheckForOperationNotSupportedInTransaction(
- conn, dbName, commandName, commandObj, func, makeFuncArgs) {
- let res = func.apply(conn, makeFuncArgs(commandObj));
- const isTransient =
- (res.errorLabels && res.errorLabels.includes('TransientTransactionError') &&
- !includesErrorCode(res, ErrorCodes.NoSuchTransaction));
+function runCommandCheckForOperationNotSupportedInTransaction(
+ conn, dbName, commandName, commandObj, func, makeFuncArgs) {
+ let res = func.apply(conn, makeFuncArgs(commandObj));
+ const isTransient = (res.errorLabels && res.errorLabels.includes('TransientTransactionError') &&
+ !includesErrorCode(res, ErrorCodes.NoSuchTransaction));
- const isNotSupported =
- (includesErrorCode(res, ErrorCodes.OperationNotSupportedInTransaction) ||
- includesErrorCode(res, ErrorCodes.InvalidOptions));
+ const isNotSupported = (includesErrorCode(res, ErrorCodes.OperationNotSupportedInTransaction) ||
+ includesErrorCode(res, ErrorCodes.InvalidOptions));
- if (isTransient || isNotSupported) {
- // Generate an exception, store some info for fsm.js to inspect, and rethrow.
- try {
- assert.commandWorked(res);
- } catch (ex) {
- ex.isTransient = isTransient;
- ex.isNotSupported = isNotSupported;
- throw ex;
- }
+ if (isTransient || isNotSupported) {
+ // Generate an exception, store some info for fsm.js to inspect, and rethrow.
+ try {
+ assert.commandWorked(res);
+ } catch (ex) {
+ ex.isTransient = isTransient;
+ ex.isNotSupported = isNotSupported;
+ throw ex;
}
-
- return res;
}
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/check_for_operation_not_supported_in_transaction.js");
+ return res;
+}
+
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/check_for_operation_not_supported_in_transaction.js");
- OverrideHelpers.overrideRunCommand(runCommandCheckForOperationNotSupportedInTransaction);
+OverrideHelpers.overrideRunCommand(runCommandCheckForOperationNotSupportedInTransaction);
})();
diff --git a/jstests/libs/override_methods/check_uuids_consistent_across_cluster.js b/jstests/libs/override_methods/check_uuids_consistent_across_cluster.js
index ec9894303b4..4e3b6ae8599 100644
--- a/jstests/libs/override_methods/check_uuids_consistent_across_cluster.js
+++ b/jstests/libs/override_methods/check_uuids_consistent_across_cluster.js
@@ -123,7 +123,7 @@ ShardingTest.prototype.checkUUIDsConsistentAcrossCluster = function() {
for (let authoritativeCollMetadata of authoritativeCollMetadataArr) {
const ns = authoritativeCollMetadata._id;
- const[dbName, collName] = parseNs(ns);
+ const [dbName, collName] = parseNs(ns);
for (let shardConnString of authoritativeCollMetadata.shardConnStrings) {
// A connection the shard may not be cached in ShardingTest if the shard was added
diff --git a/jstests/libs/override_methods/continuous_stepdown.js b/jstests/libs/override_methods/continuous_stepdown.js
index cbd5687dbda..9c7881f9d4a 100644
--- a/jstests/libs/override_methods/continuous_stepdown.js
+++ b/jstests/libs/override_methods/continuous_stepdown.js
@@ -28,392 +28,387 @@
let ContinuousStepdown;
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/parallelTester.js"); // ScopedThread and CountDownLatch
- load("jstests/replsets/rslib.js"); // reconfig
+load("jstests/libs/parallelTester.js"); // ScopedThread and CountDownLatch
+load("jstests/replsets/rslib.js"); // reconfig
+
+/**
+ * Helper class to manage the ScopedThread instance that will continuously step down the primary
+ * node.
+ */
+const StepdownThread = function() {
+ let _counter = null;
+ let _thread = null;
/**
- * Helper class to manage the ScopedThread instance that will continuously step down the primary
- * node.
+ * This function is intended to be called in a separate thread and it continuously
+ * steps down the current primary for a number of attempts.
+ *
+ * @param {CountDownLatch} stopCounter Object, which can be used to stop the thread.
+ *
+ * @param {string} seedNode The connection string of a node from which to discover
+ * the primary of the replica set.
+ *
+ * @param {Object} options Configuration object with the following fields:
+ * stepdownDurationSecs {integer}: The number of seconds after stepping down the
+ * primary for which the node is not re-electable.
+ * stepdownIntervalMS {integer}: The number of milliseconds to wait after
+ * issuing a step down command.
+ *
+ * @return Object with the following fields:
+ * ok {integer}: 0 if it failed, 1 if it succeeded.
+ * error {string}: Only present if ok == 0. Contains the cause for the error.
+ * stack {string}: Only present if ok == 0. Contains the stack at the time of
+ * the error.
*/
- const StepdownThread = function() {
- let _counter = null;
- let _thread = null;
-
- /**
- * This function is intended to be called in a separate thread and it continuously
- * steps down the current primary for a number of attempts.
- *
- * @param {CountDownLatch} stopCounter Object, which can be used to stop the thread.
- *
- * @param {string} seedNode The connection string of a node from which to discover
- * the primary of the replica set.
- *
- * @param {Object} options Configuration object with the following fields:
- * stepdownDurationSecs {integer}: The number of seconds after stepping down the
- * primary for which the node is not re-electable.
- * stepdownIntervalMS {integer}: The number of milliseconds to wait after
- * issuing a step down command.
- *
- * @return Object with the following fields:
- * ok {integer}: 0 if it failed, 1 if it succeeded.
- * error {string}: Only present if ok == 0. Contains the cause for the error.
- * stack {string}: Only present if ok == 0. Contains the stack at the time of
- * the error.
- */
- function _continuousPrimaryStepdownFn(stopCounter, seedNode, options) {
- "use strict";
+ function _continuousPrimaryStepdownFn(stopCounter, seedNode, options) {
+ "use strict";
- print("*** Continuous stepdown thread running with seed node " + seedNode);
+ print("*** Continuous stepdown thread running with seed node " + seedNode);
- try {
- // The config primary may unexpectedly step down during startup if under heavy
- // load and too slowly processing heartbeats.
- const replSet = new ReplSetTest(seedNode);
+ try {
+ // The config primary may unexpectedly step down during startup if under heavy
+ // load and too slowly processing heartbeats.
+ const replSet = new ReplSetTest(seedNode);
- let primary = replSet.getPrimary();
+ let primary = replSet.getPrimary();
- while (stopCounter.getCount() > 0) {
- print("*** Stepping down " + primary);
+ while (stopCounter.getCount() > 0) {
+ print("*** Stepping down " + primary);
- // The command may fail if the node is no longer primary or is in the process of
- // stepping down.
- assert.commandWorkedOrFailedWithCode(
- primary.adminCommand(
- {replSetStepDown: options.stepdownDurationSecs, force: true}),
- [ErrorCodes.NotMaster, ErrorCodes.ConflictingOperationInProgress]);
+ // The command may fail if the node is no longer primary or is in the process of
+ // stepping down.
+ assert.commandWorkedOrFailedWithCode(
+ primary.adminCommand(
+ {replSetStepDown: options.stepdownDurationSecs, force: true}),
+ [ErrorCodes.NotMaster, ErrorCodes.ConflictingOperationInProgress]);
- // Wait for primary to get elected and allow the test to make some progress
- // before attempting another stepdown.
- if (stopCounter.getCount() > 0) {
- primary = replSet.getPrimary();
- }
-
- if (stopCounter.getCount() > 0) {
- sleep(options.stepdownIntervalMS);
- }
+ // Wait for primary to get elected and allow the test to make some progress
+ // before attempting another stepdown.
+ if (stopCounter.getCount() > 0) {
+ primary = replSet.getPrimary();
}
- print("*** Continuous stepdown thread completed successfully");
- return {ok: 1};
- } catch (e) {
- print("*** Continuous stepdown thread caught exception: " + tojson(e));
- return {ok: 0, error: e.toString(), stack: e.stack};
+ if (stopCounter.getCount() > 0) {
+ sleep(options.stepdownIntervalMS);
+ }
}
+
+ print("*** Continuous stepdown thread completed successfully");
+ return {ok: 1};
+ } catch (e) {
+ print("*** Continuous stepdown thread caught exception: " + tojson(e));
+ return {ok: 0, error: e.toString(), stack: e.stack};
+ }
+ }
+
+ /**
+ * Returns true if the stepdown thread has been created and started.
+ */
+ this.hasStarted = function() {
+ return !!_thread;
+ };
+
+ /**
+ * Spawns a ScopedThread using the given seedNode to discover the replica set.
+ */
+ this.start = function(seedNode, options) {
+ if (_thread) {
+ throw new Error("Continuous stepdown thread is already active");
+ }
+
+ _counter = new CountDownLatch(1);
+ _thread = new ScopedThread(_continuousPrimaryStepdownFn, _counter, seedNode, options);
+ _thread.start();
+ };
+
+ /**
+ * Sets the stepdown thread's counter to 0, and waits for it to finish. Throws if the
+ * stepdown thread did not exit successfully.
+ */
+ this.stop = function() {
+ if (!_thread) {
+ throw new Error("Continuous stepdown thread is not active");
}
+ _counter.countDown();
+ _counter = null;
+
+ _thread.join();
+
+ const retVal = _thread.returnData();
+ _thread = null;
+
+ assert.commandWorked(retVal);
+ };
+};
+
+ContinuousStepdown = {};
+
+/**
+ * Defines two methods on ReplSetTest, startContinuousFailover and stopContinuousFailover, that
+ * allow starting and stopping a separate thread that will periodically step down the replica
+ * set's primary node. Also defines these methods on ShardingTest, which allow starting and
+ * stopping a stepdown thread for the test's config server replica set and each of the shard
+ * replica sets, as specified by the given stepdownOptions object.
+ */
+ContinuousStepdown.configure = function(stepdownOptions,
+ {verbositySetting: verbositySetting = {}} = {}) {
+ const defaultOptions = {
+ configStepdown: true,
+ electionTimeoutMS: 5 * 1000,
+ shardStepdown: true,
+ stepdownDurationSecs: 10,
+ stepdownIntervalMS: 8 * 1000,
+ catchUpTimeoutMS: 0,
+ };
+ stepdownOptions = Object.merge(defaultOptions, stepdownOptions);
+
+ verbositySetting = tojson(verbositySetting);
+
+ // Preserve the original ReplSetTest and ShardingTest constructors, because they are being
+ // overriden.
+ const originalReplSetTest = ReplSetTest;
+ const originalShardingTest = ShardingTest;
+
+ /**
+ * Overrides the ReplSetTest constructor to start the continuous primary stepdown thread.
+ */
+ ReplSetTest = function ReplSetTestWithContinuousPrimaryStepdown() {
+ // Construct the original object
+ originalReplSetTest.apply(this, arguments);
+
+ // Preserve the original versions of functions that are overrided below.
+ const _originalStartSetFn = this.startSet;
+ const _originalStopSetFn = this.stopSet;
+ const _originalAwaitLastOpCommitted = this.awaitLastOpCommitted;
+
/**
- * Returns true if the stepdown thread has been created and started.
+ * Overrides startSet call to increase logging verbosity.
*/
- this.hasStarted = function() {
- return !!_thread;
+ this.startSet = function() {
+ let options = arguments[0] || {};
+
+ if (typeof (options.setParameter) === "string") {
+ var eqIdx = options.setParameter.indexOf("=");
+ if (eqIdx != -1) {
+ var param = options.setParameter.substring(0, eqIdx);
+ var value = options.setParameter.substring(eqIdx + 1);
+ options.setParameter = {};
+ options.setParameter[param] = value;
+ }
+ }
+ arguments[0] = options;
+
+ options.setParameter = options.setParameter || {};
+ options.setParameter.logComponentVerbosity = verbositySetting;
+ return _originalStartSetFn.apply(this, arguments);
};
/**
- * Spawns a ScopedThread using the given seedNode to discover the replica set.
+ * Overrides stopSet to terminate the failover thread.
*/
- this.start = function(seedNode, options) {
- if (_thread) {
- throw new Error("Continuous stepdown thread is already active");
- }
+ this.stopSet = function() {
+ this.stopContinuousFailover({waitForPrimary: false});
+ _originalStopSetFn.apply(this, arguments);
+ };
- _counter = new CountDownLatch(1);
- _thread = new ScopedThread(_continuousPrimaryStepdownFn, _counter, seedNode, options);
- _thread.start();
+ /**
+ * Overrides awaitLastOpCommitted to retry on network errors.
+ */
+ this.awaitLastOpCommitted = function() {
+ return retryOnNetworkError(_originalAwaitLastOpCommitted.bind(this));
};
+ // Handle for the continuous stepdown thread.
+ const _stepdownThread = new StepdownThread();
+
/**
- * Sets the stepdown thread's counter to 0, and waits for it to finish. Throws if the
- * stepdown thread did not exit successfully.
+ * Reconfigures the replica set, then starts the stepdown thread. As part of the new
+ * config, this sets:
+ * - electionTimeoutMillis to stepdownOptions.electionTimeoutMS so a new primary can
+ * get elected before the stepdownOptions.stepdownIntervalMS period would cause one
+ * to step down again.
+ * - catchUpTimeoutMillis to stepdownOptions.catchUpTimeoutMS. Lower values increase
+ * the likelihood and volume of rollbacks.
*/
- this.stop = function() {
- if (!_thread) {
- throw new Error("Continuous stepdown thread is not active");
+ this.startContinuousFailover = function() {
+ if (_stepdownThread.hasStarted()) {
+ throw new Error("Continuous failover thread is already active");
}
- _counter.countDown();
- _counter = null;
+ const rsconfig = this.getReplSetConfigFromNode();
- _thread.join();
+ const shouldUpdateElectionTimeout =
+ (rsconfig.settings.electionTimeoutMillis !== stepdownOptions.electionTimeoutMS);
+ const shouldUpdateCatchUpTimeout =
+ (rsconfig.settings.catchUpTimeoutMillis !== stepdownOptions.catchUpTimeoutMS);
- const retVal = _thread.returnData();
- _thread = null;
+ if (shouldUpdateElectionTimeout || shouldUpdateCatchUpTimeout) {
+ rsconfig.settings.electionTimeoutMillis = stepdownOptions.electionTimeoutMS;
+ rsconfig.settings.catchUpTimeoutMillis = stepdownOptions.catchUpTimeoutMS;
- assert.commandWorked(retVal);
- };
- };
+ rsconfig.version += 1;
+ reconfig(this, rsconfig);
- ContinuousStepdown = {};
+ const newSettings = this.getReplSetConfigFromNode().settings;
- /**
- * Defines two methods on ReplSetTest, startContinuousFailover and stopContinuousFailover, that
- * allow starting and stopping a separate thread that will periodically step down the replica
- * set's primary node. Also defines these methods on ShardingTest, which allow starting and
- * stopping a stepdown thread for the test's config server replica set and each of the shard
- * replica sets, as specified by the given stepdownOptions object.
- */
- ContinuousStepdown.configure = function(stepdownOptions,
- {verbositySetting: verbositySetting = {}} = {}) {
- const defaultOptions = {
- configStepdown: true,
- electionTimeoutMS: 5 * 1000,
- shardStepdown: true,
- stepdownDurationSecs: 10,
- stepdownIntervalMS: 8 * 1000,
- catchUpTimeoutMS: 0,
- };
- stepdownOptions = Object.merge(defaultOptions, stepdownOptions);
-
- verbositySetting = tojson(verbositySetting);
+ assert.eq(newSettings.electionTimeoutMillis,
+ stepdownOptions.electionTimeoutMS,
+ "Failed to set the electionTimeoutMillis to " +
+ stepdownOptions.electionTimeoutMS + " milliseconds.");
+ assert.eq(newSettings.catchUpTimeoutMillis,
+ stepdownOptions.catchUpTimeoutMS,
+ "Failed to set the catchUpTimeoutMillis to " +
+ stepdownOptions.catchUpTimeoutMS + " milliseconds.");
+ }
- // Preserve the original ReplSetTest and ShardingTest constructors, because they are being
- // overriden.
- const originalReplSetTest = ReplSetTest;
- const originalShardingTest = ShardingTest;
+ _stepdownThread.start(this.nodes[0].host, stepdownOptions);
+ };
/**
- * Overrides the ReplSetTest constructor to start the continuous primary stepdown thread.
+ * Blocking method, which tells the thread running continuousPrimaryStepdownFn to stop
+ * and waits for it to terminate.
+ *
+ * If waitForPrimary is true, blocks until a new primary has been elected.
*/
- ReplSetTest = function ReplSetTestWithContinuousPrimaryStepdown() {
- // Construct the original object
- originalReplSetTest.apply(this, arguments);
-
- // Preserve the original versions of functions that are overrided below.
- const _originalStartSetFn = this.startSet;
- const _originalStopSetFn = this.stopSet;
- const _originalAwaitLastOpCommitted = this.awaitLastOpCommitted;
-
- /**
- * Overrides startSet call to increase logging verbosity.
- */
- this.startSet = function() {
- let options = arguments[0] || {};
-
- if (typeof(options.setParameter) === "string") {
- var eqIdx = options.setParameter.indexOf("=");
- if (eqIdx != -1) {
- var param = options.setParameter.substring(0, eqIdx);
- var value = options.setParameter.substring(eqIdx + 1);
- options.setParameter = {};
- options.setParameter[param] = value;
- }
- }
- arguments[0] = options;
-
- options.setParameter = options.setParameter || {};
- options.setParameter.logComponentVerbosity = verbositySetting;
- return _originalStartSetFn.apply(this, arguments);
- };
-
- /**
- * Overrides stopSet to terminate the failover thread.
- */
- this.stopSet = function() {
- this.stopContinuousFailover({waitForPrimary: false});
- _originalStopSetFn.apply(this, arguments);
- };
-
- /**
- * Overrides awaitLastOpCommitted to retry on network errors.
- */
- this.awaitLastOpCommitted = function() {
- return retryOnNetworkError(_originalAwaitLastOpCommitted.bind(this));
- };
-
- // Handle for the continuous stepdown thread.
- const _stepdownThread = new StepdownThread();
-
- /**
- * Reconfigures the replica set, then starts the stepdown thread. As part of the new
- * config, this sets:
- * - electionTimeoutMillis to stepdownOptions.electionTimeoutMS so a new primary can
- * get elected before the stepdownOptions.stepdownIntervalMS period would cause one
- * to step down again.
- * - catchUpTimeoutMillis to stepdownOptions.catchUpTimeoutMS. Lower values increase
- * the likelihood and volume of rollbacks.
- */
- this.startContinuousFailover = function() {
- if (_stepdownThread.hasStarted()) {
- throw new Error("Continuous failover thread is already active");
- }
-
- const rsconfig = this.getReplSetConfigFromNode();
+ this.stopContinuousFailover = function({waitForPrimary: waitForPrimary = false} = {}) {
+ if (!_stepdownThread.hasStarted()) {
+ return;
+ }
- const shouldUpdateElectionTimeout =
- (rsconfig.settings.electionTimeoutMillis !== stepdownOptions.electionTimeoutMS);
- const shouldUpdateCatchUpTimeout =
- (rsconfig.settings.catchUpTimeoutMillis !== stepdownOptions.catchUpTimeoutMS);
+ _stepdownThread.stop();
- if (shouldUpdateElectionTimeout || shouldUpdateCatchUpTimeout) {
- rsconfig.settings.electionTimeoutMillis = stepdownOptions.electionTimeoutMS;
- rsconfig.settings.catchUpTimeoutMillis = stepdownOptions.catchUpTimeoutMS;
+ if (waitForPrimary) {
+ this.getPrimary();
+ }
+ };
+ };
- rsconfig.version += 1;
- reconfig(this, rsconfig);
+ Object.extend(ReplSetTest, originalReplSetTest);
- const newSettings = this.getReplSetConfigFromNode().settings;
+ /**
+ * Overrides the ShardingTest constructor to start the continuous primary stepdown thread.
+ */
+ ShardingTest = function ShardingTestWithContinuousPrimaryStepdown(params) {
+ params.other = params.other || {};
- assert.eq(newSettings.electionTimeoutMillis,
- stepdownOptions.electionTimeoutMS,
- "Failed to set the electionTimeoutMillis to " +
- stepdownOptions.electionTimeoutMS + " milliseconds.");
- assert.eq(newSettings.catchUpTimeoutMillis,
- stepdownOptions.catchUpTimeoutMS,
- "Failed to set the catchUpTimeoutMillis to " +
- stepdownOptions.catchUpTimeoutMS + " milliseconds.");
- }
+ if (stepdownOptions.configStepdown) {
+ params.other.configOptions = params.other.configOptions || {};
+ params.other.configOptions.setParameter = params.other.configOptions.setParameter || {};
+ params.other.configOptions.setParameter.logComponentVerbosity = verbositySetting;
+ }
- _stepdownThread.start(this.nodes[0].host, stepdownOptions);
- };
-
- /**
- * Blocking method, which tells the thread running continuousPrimaryStepdownFn to stop
- * and waits for it to terminate.
- *
- * If waitForPrimary is true, blocks until a new primary has been elected.
- */
- this.stopContinuousFailover = function({waitForPrimary: waitForPrimary = false} = {}) {
- if (!_stepdownThread.hasStarted()) {
- return;
- }
+ if (stepdownOptions.shardStepdown) {
+ params.other.shardOptions = params.other.shardOptions || {};
+ params.other.shardOptions.setParameter = params.other.shardOptions.setParameter || {};
+ params.other.shardOptions.setParameter.logComponentVerbosity = verbositySetting;
+ }
- _stepdownThread.stop();
+ // Construct the original object.
+ originalShardingTest.apply(this, arguments);
- if (waitForPrimary) {
- this.getPrimary();
- }
- };
- };
+ // Validate the stepdown options.
+ if (stepdownOptions.configStepdown && !this.configRS) {
+ throw new Error("Continuous config server primary step down only available with CSRS");
+ }
- Object.extend(ReplSetTest, originalReplSetTest);
+ if (stepdownOptions.shardStepdown && this._rs.some(rst => !rst)) {
+ throw new Error(
+ "Continuous shard primary step down only available with replica set shards");
+ }
/**
- * Overrides the ShardingTest constructor to start the continuous primary stepdown thread.
+ * Calls startContinuousFailover on the config server and/or each shard replica set as
+ * specifed by the stepdownOptions object.
*/
- ShardingTest = function ShardingTestWithContinuousPrimaryStepdown(params) {
- params.other = params.other || {};
-
+ this.startContinuousFailover = function() {
if (stepdownOptions.configStepdown) {
- params.other.configOptions = params.other.configOptions || {};
- params.other.configOptions.setParameter =
- params.other.configOptions.setParameter || {};
- params.other.configOptions.setParameter.logComponentVerbosity = verbositySetting;
+ this.configRS.startContinuousFailover();
}
if (stepdownOptions.shardStepdown) {
- params.other.shardOptions = params.other.shardOptions || {};
- params.other.shardOptions.setParameter =
- params.other.shardOptions.setParameter || {};
- params.other.shardOptions.setParameter.logComponentVerbosity = verbositySetting;
+ this._rs.forEach(function(rst) {
+ rst.test.startContinuousFailover();
+ });
}
+ };
- // Construct the original object.
- originalShardingTest.apply(this, arguments);
-
- // Validate the stepdown options.
- if (stepdownOptions.configStepdown && !this.configRS) {
- throw new Error(
- "Continuous config server primary step down only available with CSRS");
+ /**
+ * Calls stopContinuousFailover on the config server and each shard replica set as
+ * specified by the stepdownOptions object.
+ *
+ * If waitForPrimary is true, blocks until each replica set has elected a primary.
+ * If waitForMongosRetarget is true, blocks until each mongos has an up to date view of
+ * the cluster.
+ */
+ this.stopContinuousFailover = function({
+ waitForPrimary: waitForPrimary = false,
+ waitForMongosRetarget: waitForMongosRetarget = false
+ } = {}) {
+ if (stepdownOptions.configStepdown) {
+ this.configRS.stopContinuousFailover({waitForPrimary: waitForPrimary});
}
- if (stepdownOptions.shardStepdown && this._rs.some(rst => !rst)) {
- throw new Error(
- "Continuous shard primary step down only available with replica set shards");
+ if (stepdownOptions.shardStepdown) {
+ this._rs.forEach(function(rst) {
+ rst.test.stopContinuousFailover({waitForPrimary: waitForPrimary});
+ });
}
- /**
- * Calls startContinuousFailover on the config server and/or each shard replica set as
- * specifed by the stepdownOptions object.
- */
- this.startContinuousFailover = function() {
- if (stepdownOptions.configStepdown) {
- this.configRS.startContinuousFailover();
- }
-
- if (stepdownOptions.shardStepdown) {
- this._rs.forEach(function(rst) {
- rst.test.startContinuousFailover();
- });
- }
- };
-
- /**
- * Calls stopContinuousFailover on the config server and each shard replica set as
- * specified by the stepdownOptions object.
- *
- * If waitForPrimary is true, blocks until each replica set has elected a primary.
- * If waitForMongosRetarget is true, blocks until each mongos has an up to date view of
- * the cluster.
- */
- this.stopContinuousFailover = function({
- waitForPrimary: waitForPrimary = false,
- waitForMongosRetarget: waitForMongosRetarget = false
- } = {}) {
- if (stepdownOptions.configStepdown) {
- this.configRS.stopContinuousFailover({waitForPrimary: waitForPrimary});
- }
-
- if (stepdownOptions.shardStepdown) {
- this._rs.forEach(function(rst) {
- rst.test.stopContinuousFailover({waitForPrimary: waitForPrimary});
- });
- }
-
- if (waitForMongosRetarget) {
- // Run validate on each collection in each database to ensure mongos can target
- // the primary for each shard with data, including the config servers.
- this._mongos.forEach(s => {
- const res = assert.commandWorked(s.adminCommand({listDatabases: 1}));
- res.databases.forEach(dbInfo => {
- const startTime = Date.now();
- print("Waiting for mongos: " + s.host + " to retarget db: " +
- dbInfo.name);
-
- const db = s.getDB(dbInfo.name);
- assert.soon(() => {
- let collInfo;
- try {
- collInfo = db.getCollectionInfos();
- } catch (e) {
- if (ErrorCodes.isNotMasterError(e.code)) {
- return false;
- }
- throw e;
+ if (waitForMongosRetarget) {
+ // Run validate on each collection in each database to ensure mongos can target
+ // the primary for each shard with data, including the config servers.
+ this._mongos.forEach(s => {
+ const res = assert.commandWorked(s.adminCommand({listDatabases: 1}));
+ res.databases.forEach(dbInfo => {
+ const startTime = Date.now();
+ print("Waiting for mongos: " + s.host + " to retarget db: " + dbInfo.name);
+
+ const db = s.getDB(dbInfo.name);
+ assert.soon(() => {
+ let collInfo;
+ try {
+ collInfo = db.getCollectionInfos();
+ } catch (e) {
+ if (ErrorCodes.isNotMasterError(e.code)) {
+ return false;
}
+ throw e;
+ }
- collInfo.forEach(collDoc => {
- const res = db.runCommand({collStats: collDoc["name"]});
- if (ErrorCodes.isNotMasterError(res.code)) {
- return false;
- }
- assert.commandWorked(res);
- });
-
- return true;
+ collInfo.forEach(collDoc => {
+ const res = db.runCommand({collStats: collDoc["name"]});
+ if (ErrorCodes.isNotMasterError(res.code)) {
+ return false;
+ }
+ assert.commandWorked(res);
});
- const totalTime = Date.now() - startTime;
- print("Finished waiting for mongos: " + s.host + " to retarget db: " +
- dbInfo.name + ", in " + totalTime + " ms");
+
+ return true;
});
+ const totalTime = Date.now() - startTime;
+ print("Finished waiting for mongos: " + s.host +
+ " to retarget db: " + dbInfo.name + ", in " + totalTime + " ms");
});
- }
-
- };
-
- /**
- * This method is disabled because it runs aggregation, which doesn't handle config
- * server stepdown correctly.
- */
- this.printShardingStatus = function() {};
+ });
+ }
};
- Object.extend(ShardingTest, originalShardingTest);
-
- // The checkUUIDsConsistentAcrossCluster() function is defined on ShardingTest's prototype,
- // but ShardingTest's prototype gets reset when ShardingTest is reassigned. We reload the
- // override to redefine checkUUIDsConsistentAcrossCluster() on the new ShardingTest's
- // prototype.
- load('jstests/libs/override_methods/check_uuids_consistent_across_cluster.js');
+ /**
+ * This method is disabled because it runs aggregation, which doesn't handle config
+ * server stepdown correctly.
+ */
+ this.printShardingStatus = function() {};
};
+
+ Object.extend(ShardingTest, originalShardingTest);
+
+ // The checkUUIDsConsistentAcrossCluster() function is defined on ShardingTest's prototype,
+ // but ShardingTest's prototype gets reset when ShardingTest is reassigned. We reload the
+ // override to redefine checkUUIDsConsistentAcrossCluster() on the new ShardingTest's
+ // prototype.
+ load('jstests/libs/override_methods/check_uuids_consistent_across_cluster.js');
+};
})();
diff --git a/jstests/libs/override_methods/detect_spawning_own_mongod.js b/jstests/libs/override_methods/detect_spawning_own_mongod.js
index f741c086cdd..42b95a58d51 100644
--- a/jstests/libs/override_methods/detect_spawning_own_mongod.js
+++ b/jstests/libs/override_methods/detect_spawning_own_mongod.js
@@ -3,40 +3,38 @@
* suites should not contain JS tests that start their own mongod/s.
*/
(function() {
- 'use strict';
+'use strict';
- MongoRunner.runMongod = function() {
- throw new Error(
- "Detected MongoRunner.runMongod() call in js test from passthrough suite. " +
- "Consider moving the test to one of the jstests/noPassthrough/, " +
- "jstests/replsets/, or jstests/sharding/ directories.");
- };
+MongoRunner.runMongod = function() {
+ throw new Error("Detected MongoRunner.runMongod() call in js test from passthrough suite. " +
+ "Consider moving the test to one of the jstests/noPassthrough/, " +
+ "jstests/replsets/, or jstests/sharding/ directories.");
+};
- MongoRunner.runMongos = function() {
- throw new Error(
- "Detected MongoRunner.runMongos() call in js test from passthrough suite. " +
- "Consider moving the test to one of the jstests/noPassthrough/, " +
- "jstests/replsets/, or jstests/sharding/ directories.");
- };
+MongoRunner.runMongos = function() {
+ throw new Error("Detected MongoRunner.runMongos() call in js test from passthrough suite. " +
+ "Consider moving the test to one of the jstests/noPassthrough/, " +
+ "jstests/replsets/, or jstests/sharding/ directories.");
+};
- const STOverrideConstructor = function() {
- throw new Error("Detected ShardingTest() call in js test from passthrough suite. " +
- "Consider moving the test to one of the jstests/noPassthrough/, " +
- "jstests/replsets/, or jstests/sharding/ directories.");
- };
+const STOverrideConstructor = function() {
+ throw new Error("Detected ShardingTest() call in js test from passthrough suite. " +
+ "Consider moving the test to one of the jstests/noPassthrough/, " +
+ "jstests/replsets/, or jstests/sharding/ directories.");
+};
- // This Object.assign() lets us modify ShardingTest to use the new overridden constructor but
- // still keep any static properties it has.
- ShardingTest = Object.assign(STOverrideConstructor, ShardingTest);
+// This Object.assign() lets us modify ShardingTest to use the new overridden constructor but
+// still keep any static properties it has.
+ShardingTest = Object.assign(STOverrideConstructor, ShardingTest);
- const RSTOverrideConstructor = function() {
- throw new Error("Detected ReplSetTest() call in js test from passthrough suite. " +
- "Consider moving the test to one of the jstests/noPassthrough/, " +
- "jstests/replsets/, or jstests/sharding/ directories.");
- };
+const RSTOverrideConstructor = function() {
+ throw new Error("Detected ReplSetTest() call in js test from passthrough suite. " +
+ "Consider moving the test to one of the jstests/noPassthrough/, " +
+ "jstests/replsets/, or jstests/sharding/ directories.");
+};
- // Same as the above Object.assign() call. In particular, we want to preserve the
- // ReplSetTest.kDefaultTimeoutMS property, which should be accessible to tests in the
- // passthrough suite.
- ReplSetTest = Object.assign(RSTOverrideConstructor, ReplSetTest);
+// Same as the above Object.assign() call. In particular, we want to preserve the
+// ReplSetTest.kDefaultTimeoutMS property, which should be accessible to tests in the
+// passthrough suite.
+ReplSetTest = Object.assign(RSTOverrideConstructor, ReplSetTest);
})();
diff --git a/jstests/libs/override_methods/enable_causal_consistency.js b/jstests/libs/override_methods/enable_causal_consistency.js
index 26c861baa9c..cb9eb52db06 100644
--- a/jstests/libs/override_methods/enable_causal_consistency.js
+++ b/jstests/libs/override_methods/enable_causal_consistency.js
@@ -2,14 +2,14 @@
* Enables causal consistency on the connections.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/override_methods/override_helpers.js");
- load('jstests/libs/override_methods/set_read_preference_secondary.js');
- load('jstests/libs/override_methods/causally_consistent_index_builds.js');
+load("jstests/libs/override_methods/override_helpers.js");
+load('jstests/libs/override_methods/set_read_preference_secondary.js');
+load('jstests/libs/override_methods/causally_consistent_index_builds.js');
- db.getMongo().setCausalConsistency();
+db.getMongo().setCausalConsistency();
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/enable_causal_consistency.js");
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/enable_causal_consistency.js");
})();
diff --git a/jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js b/jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js
index 4da6c596ef6..96860afb3f0 100644
--- a/jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js
+++ b/jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js
@@ -2,12 +2,12 @@
* Enables causal consistency on the connections without setting the read preference to secondary.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/override_methods/override_helpers.js");
+load("jstests/libs/override_methods/override_helpers.js");
- db.getMongo().setCausalConsistency();
+db.getMongo().setCausalConsistency();
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js");
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js");
})();
diff --git a/jstests/libs/override_methods/enable_sessions.js b/jstests/libs/override_methods/enable_sessions.js
index 85bb57d7e94..846143da999 100644
--- a/jstests/libs/override_methods/enable_sessions.js
+++ b/jstests/libs/override_methods/enable_sessions.js
@@ -2,67 +2,65 @@
* Enables sessions on the db object
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/override_methods/override_helpers.js");
+load("jstests/libs/override_methods/override_helpers.js");
- const getDBOriginal = Mongo.prototype.getDB;
+const getDBOriginal = Mongo.prototype.getDB;
- const sessionMap = new WeakMap();
- const sessionOptions = TestData.sessionOptions;
+const sessionMap = new WeakMap();
+const sessionOptions = TestData.sessionOptions;
- // Override the runCommand to check for any command obj that does not contain a logical session
- // and throw an error.
- function runCommandWithLsidCheck(conn, dbName, cmdName, cmdObj, func, makeFuncArgs) {
- if (jsTest.options().disableEnableSessions) {
- return func.apply(conn, makeFuncArgs(cmdObj));
- }
-
- // If the command is in a wrapped form, then we look for the actual command object
- // inside the query/$query object.
- let cmdObjUnwrapped = cmdObj;
- if (cmdName === "query" || cmdName === "$query") {
- cmdObj[cmdName] = Object.assign({}, cmdObj[cmdName]);
- cmdObjUnwrapped = cmdObj[cmdName];
- }
-
- if (!cmdObjUnwrapped.hasOwnProperty("lsid")) {
- // TODO: SERVER-30848 fixes getMore requests to use a session in the mongo shell.
- // Until that happens, we bypass throwing an error for getMore and only throw an error
- // for other requests not using sessions.
- if (cmdName !== "getMore") {
- throw new Error("command object does not have session id: " + tojson(cmdObj));
- }
- }
+// Override the runCommand to check for any command obj that does not contain a logical session
+// and throw an error.
+function runCommandWithLsidCheck(conn, dbName, cmdName, cmdObj, func, makeFuncArgs) {
+ if (jsTest.options().disableEnableSessions) {
return func.apply(conn, makeFuncArgs(cmdObj));
}
- // Override the getDB to return a db object with the correct driverSession. We use a WeakMap
- // to cache the session for each connection instance so we can retrieve the same session on
- // subsequent calls to getDB.
- Mongo.prototype.getDB = function(dbName) {
- if (jsTest.options().disableEnableSessions) {
- return getDBOriginal.apply(this, arguments);
- }
+ // If the command is in a wrapped form, then we look for the actual command object
+ // inside the query/$query object.
+ let cmdObjUnwrapped = cmdObj;
+ if (cmdName === "query" || cmdName === "$query") {
+ cmdObj[cmdName] = Object.assign({}, cmdObj[cmdName]);
+ cmdObjUnwrapped = cmdObj[cmdName];
+ }
- if (!sessionMap.has(this)) {
- const session = this.startSession(sessionOptions);
- // Override the endSession function to be a no-op so jstestfuzz doesn't accidentally
- // end the session.
- session.endSession = Function.prototype;
- sessionMap.set(this, session);
+ if (!cmdObjUnwrapped.hasOwnProperty("lsid")) {
+ // TODO: SERVER-30848 fixes getMore requests to use a session in the mongo shell.
+ // Until that happens, we bypass throwing an error for getMore and only throw an error
+ // for other requests not using sessions.
+ if (cmdName !== "getMore") {
+ throw new Error("command object does not have session id: " + tojson(cmdObj));
}
+ }
+ return func.apply(conn, makeFuncArgs(cmdObj));
+}
- const db = getDBOriginal.apply(this, arguments);
- db._session = sessionMap.get(this);
- return db;
- };
+// Override the getDB to return a db object with the correct driverSession. We use a WeakMap
+// to cache the session for each connection instance so we can retrieve the same session on
+// subsequent calls to getDB.
+Mongo.prototype.getDB = function(dbName) {
+ if (jsTest.options().disableEnableSessions) {
+ return getDBOriginal.apply(this, arguments);
+ }
+
+ if (!sessionMap.has(this)) {
+ const session = this.startSession(sessionOptions);
+ // Override the endSession function to be a no-op so jstestfuzz doesn't accidentally
+ // end the session.
+ session.endSession = Function.prototype;
+ sessionMap.set(this, session);
+ }
- // Override the global `db` object to be part of a session.
- db = db.getMongo().getDB(db.getName());
+ const db = getDBOriginal.apply(this, arguments);
+ db._session = sessionMap.get(this);
+ return db;
+};
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/enable_sessions.js");
- OverrideHelpers.overrideRunCommand(runCommandWithLsidCheck);
+// Override the global `db` object to be part of a session.
+db = db.getMongo().getDB(db.getName());
+OverrideHelpers.prependOverrideInParallelShell("jstests/libs/override_methods/enable_sessions.js");
+OverrideHelpers.overrideRunCommand(runCommandWithLsidCheck);
})();
diff --git a/jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js b/jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js
index 2feca955c3d..858a345ae5a 100644
--- a/jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js
+++ b/jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js
@@ -4,46 +4,47 @@
* inaccurate results.
*/
(function() {
- "use strict";
-
- load("jstests/libs/override_methods/override_helpers.js");
-
- function runCommandFailUncleanShutdownIncompatibleCommands(
- conn, dbName, commandName, commandObj, func, makeFuncArgs) {
- if (typeof commandObj !== "object" || commandObj === null) {
- return func.apply(conn, makeFuncArgs(commandObj));
- }
-
- // If the command is in a wrapped form, then we look for the actual command object inside
- // the query/$query object.
- let commandObjUnwrapped = commandObj;
- if (commandName === "query" || commandName === "$query") {
- commandObjUnwrapped = commandObj[commandName];
- commandName = Object.keys(commandObjUnwrapped)[0];
- }
-
- if (commandName === "count" && (!commandObjUnwrapped.hasOwnProperty("query") ||
- Object.keys(commandObjUnwrapped["query"]).length === 0)) {
- throw new Error("Cowardly fail if fastcount is run with a mongod that had an unclean" +
- " shutdown: " + tojson(commandObjUnwrapped));
- }
-
- if (commandName === "dataSize" && !commandObjUnwrapped.hasOwnProperty("min") &&
- !commandObjUnwrapped.hasOwnProperty("max")) {
- throw new Error("Cowardly fail if unbounded dataSize is run with a mongod that had an" +
- " unclean shutdown: " + tojson(commandObjUnwrapped));
- }
-
- if (commandName === "collStats" || commandName === "dbStats") {
- throw new Error("Cowardly fail if " + commandName + " is run with a mongod that had" +
- " an unclean shutdown: " + tojson(commandObjUnwrapped));
- }
+"use strict";
+load("jstests/libs/override_methods/override_helpers.js");
+
+function runCommandFailUncleanShutdownIncompatibleCommands(
+ conn, dbName, commandName, commandObj, func, makeFuncArgs) {
+ if (typeof commandObj !== "object" || commandObj === null) {
return func.apply(conn, makeFuncArgs(commandObj));
}
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js");
+ // If the command is in a wrapped form, then we look for the actual command object inside
+ // the query/$query object.
+ let commandObjUnwrapped = commandObj;
+ if (commandName === "query" || commandName === "$query") {
+ commandObjUnwrapped = commandObj[commandName];
+ commandName = Object.keys(commandObjUnwrapped)[0];
+ }
+
+ if (commandName === "count" &&
+ (!commandObjUnwrapped.hasOwnProperty("query") ||
+ Object.keys(commandObjUnwrapped["query"]).length === 0)) {
+ throw new Error("Cowardly fail if fastcount is run with a mongod that had an unclean" +
+ " shutdown: " + tojson(commandObjUnwrapped));
+ }
+
+ if (commandName === "dataSize" && !commandObjUnwrapped.hasOwnProperty("min") &&
+ !commandObjUnwrapped.hasOwnProperty("max")) {
+ throw new Error("Cowardly fail if unbounded dataSize is run with a mongod that had an" +
+ " unclean shutdown: " + tojson(commandObjUnwrapped));
+ }
+
+ if (commandName === "collStats" || commandName === "dbStats") {
+ throw new Error("Cowardly fail if " + commandName + " is run with a mongod that had" +
+ " an unclean shutdown: " + tojson(commandObjUnwrapped));
+ }
+
+ return func.apply(conn, makeFuncArgs(commandObj));
+}
+
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js");
- OverrideHelpers.overrideRunCommand(runCommandFailUncleanShutdownIncompatibleCommands);
+OverrideHelpers.overrideRunCommand(runCommandFailUncleanShutdownIncompatibleCommands);
})();
diff --git a/jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js b/jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js
index fabeff4915f..5a6b04a308b 100644
--- a/jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js
+++ b/jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js
@@ -3,10 +3,10 @@
* unclean shutdown and won't be restarted when the node is restarted.
*/
(function() {
- "use strict";
+"use strict";
- startParallelShell = function(jsCode, port, noConnect) {
- throw new Error("Cowardly fail if startParallelShell is run with a mongod that had" +
- " an unclean shutdown.");
- };
+startParallelShell = function(jsCode, port, noConnect) {
+ throw new Error("Cowardly fail if startParallelShell is run with a mongod that had" +
+ " an unclean shutdown.");
+};
})();
diff --git a/jstests/libs/override_methods/find_batch_size.js b/jstests/libs/override_methods/find_batch_size.js
index ab773ded7ed..9636be036fd 100644
--- a/jstests/libs/override_methods/find_batch_size.js
+++ b/jstests/libs/override_methods/find_batch_size.js
@@ -10,14 +10,14 @@
// TODO: Add support for overriding batch sizes in the bulk API.
(function() {
- 'use strict';
+'use strict';
- // Save a reference to the original find method in the IIFE's scope.
- // This scoping allows the original method to be called by the find override below.
- var originalFind = DBCollection.prototype.find;
+// Save a reference to the original find method in the IIFE's scope.
+// This scoping allows the original method to be called by the find override below.
+var originalFind = DBCollection.prototype.find;
- DBCollection.prototype.find = function(query, fields, limit, skip, batchSize, options) {
- var batchSizeDefault = batchSize || (TestData && TestData.batchSize);
- return originalFind.call(this, query, fields, limit, skip, batchSizeDefault, options);
- };
+DBCollection.prototype.find = function(query, fields, limit, skip, batchSize, options) {
+ var batchSizeDefault = batchSize || (TestData && TestData.batchSize);
+ return originalFind.call(this, query, fields, limit, skip, batchSizeDefault, options);
+};
}());
diff --git a/jstests/libs/override_methods/implicit_whole_cluster_changestreams.js b/jstests/libs/override_methods/implicit_whole_cluster_changestreams.js
index c7ba66763a2..9e13e0d0847 100644
--- a/jstests/libs/override_methods/implicit_whole_cluster_changestreams.js
+++ b/jstests/libs/override_methods/implicit_whole_cluster_changestreams.js
@@ -24,14 +24,14 @@ ChangeStreamPassthroughHelpers.nsMatchFilter = function(db, collName) {
$match: {
$or: [
{
- "ns.db": db.getName(),
- "ns.coll": (isSingleCollectionStream ? collName : {$exists: true})
+ "ns.db": db.getName(),
+ "ns.coll": (isSingleCollectionStream ? collName : {$exists: true})
},
// Add a clause to detect if the collection being watched is the target of a
// renameCollection command, since that is expected to return a "rename" entry.
{
- "to.db": db.getName(),
- "to.coll": (isSingleCollectionStream ? collName : {$exists: true})
+ "to.db": db.getName(),
+ "to.coll": (isSingleCollectionStream ? collName : {$exists: true})
},
{operationType: "invalidate"}
]
diff --git a/jstests/libs/override_methods/implicit_whole_db_changestreams.js b/jstests/libs/override_methods/implicit_whole_db_changestreams.js
index 93f485e4564..e5fe87c5287 100644
--- a/jstests/libs/override_methods/implicit_whole_db_changestreams.js
+++ b/jstests/libs/override_methods/implicit_whole_db_changestreams.js
@@ -106,56 +106,55 @@ const ChangeStreamPassthroughHelpers = {
};
(function() {
- 'use strict';
+'use strict';
- const originalRunCommandImpl = DB.prototype._runCommandImpl;
- const originalRunCommand = DB.prototype.runCommand;
+const originalRunCommandImpl = DB.prototype._runCommandImpl;
+const originalRunCommand = DB.prototype.runCommand;
- const upconvertedCursors = new Set();
+const upconvertedCursors = new Set();
- const db = null;
+const db = null;
- const passthroughRunCommandImpl = function(dbName, cmdObj, options) {
- // Check whether this command is an upconvertable $changeStream request.
- const upconvertCursor =
- ChangeStreamPassthroughHelpers.isUpconvertableChangeStreamRequest(this, cmdObj);
- if (upconvertCursor) {
- [dbName, cmdObj] =
- ChangeStreamPassthroughHelpers.upconvertChangeStreamRequest(this, cmdObj);
- }
+const passthroughRunCommandImpl = function(dbName, cmdObj, options) {
+ // Check whether this command is an upconvertable $changeStream request.
+ const upconvertCursor =
+ ChangeStreamPassthroughHelpers.isUpconvertableChangeStreamRequest(this, cmdObj);
+ if (upconvertCursor) {
+ [dbName, cmdObj] =
+ ChangeStreamPassthroughHelpers.upconvertChangeStreamRequest(this, cmdObj);
+ }
- // If the command is a getMore, it may be a $changeStream that we upconverted to run
- // whole-db. Ensure that we update the 'collection' field to be the collectionless
- // namespace.
- if (cmdObj && cmdObj.getMore && upconvertedCursors.has(cmdObj.getMore.toString())) {
- [dbName, cmdObj] = ChangeStreamPassthroughHelpers.upconvertGetMoreRequest(this, cmdObj);
- }
+ // If the command is a getMore, it may be a $changeStream that we upconverted to run
+ // whole-db. Ensure that we update the 'collection' field to be the collectionless
+ // namespace.
+ if (cmdObj && cmdObj.getMore && upconvertedCursors.has(cmdObj.getMore.toString())) {
+ [dbName, cmdObj] = ChangeStreamPassthroughHelpers.upconvertGetMoreRequest(this, cmdObj);
+ }
- // Pass the modified command to the original runCommand implementation.
- const res = originalRunCommandImpl.apply(this, [dbName, cmdObj, options]);
+ // Pass the modified command to the original runCommand implementation.
+ const res = originalRunCommandImpl.apply(this, [dbName, cmdObj, options]);
- // Record the upconverted cursor ID so that we can adjust subsequent getMores.
- if (upconvertCursor && res.cursor && res.cursor.id > 0) {
- upconvertedCursors.add(res.cursor.id.toString());
- }
+ // Record the upconverted cursor ID so that we can adjust subsequent getMores.
+ if (upconvertCursor && res.cursor && res.cursor.id > 0) {
+ upconvertedCursors.add(res.cursor.id.toString());
+ }
- return res;
- };
-
- // Redirect the Collection's 'watch' function to use the whole-DB version. Although calls to the
- // shell helpers will ultimately resolve to the overridden runCommand anyway, we need to
- // override the helpers to ensure that the DB.watch function itself is exercised by the
- // passthrough wherever Collection.watch is called.
- DBCollection.prototype.watch = function(pipeline, options) {
- pipeline = Object.assign([], pipeline);
- pipeline.unshift(
- ChangeStreamPassthroughHelpers.nsMatchFilter(this.getDB(), this.getName()));
- return this.getDB().watch(pipeline, options);
- };
-
- // Override DB.runCommand to use the custom or original _runCommandImpl.
- DB.prototype.runCommand = function(cmdObj, extra, queryOptions, noPassthrough) {
- this._runCommandImpl = (noPassthrough ? originalRunCommandImpl : passthroughRunCommandImpl);
- return originalRunCommand.apply(this, [cmdObj, extra, queryOptions]);
- };
+ return res;
+};
+
+// Redirect the Collection's 'watch' function to use the whole-DB version. Although calls to the
+// shell helpers will ultimately resolve to the overridden runCommand anyway, we need to
+// override the helpers to ensure that the DB.watch function itself is exercised by the
+// passthrough wherever Collection.watch is called.
+DBCollection.prototype.watch = function(pipeline, options) {
+ pipeline = Object.assign([], pipeline);
+ pipeline.unshift(ChangeStreamPassthroughHelpers.nsMatchFilter(this.getDB(), this.getName()));
+ return this.getDB().watch(pipeline, options);
+};
+
+// Override DB.runCommand to use the custom or original _runCommandImpl.
+DB.prototype.runCommand = function(cmdObj, extra, queryOptions, noPassthrough) {
+ this._runCommandImpl = (noPassthrough ? originalRunCommandImpl : passthroughRunCommandImpl);
+ return originalRunCommand.apply(this, [cmdObj, extra, queryOptions]);
+};
}());
diff --git a/jstests/libs/override_methods/implicitly_retry_on_background_op_in_progress.js b/jstests/libs/override_methods/implicitly_retry_on_background_op_in_progress.js
index 7ec99e0fd14..c8ca76eb08a 100644
--- a/jstests/libs/override_methods/implicitly_retry_on_background_op_in_progress.js
+++ b/jstests/libs/override_methods/implicitly_retry_on_background_op_in_progress.js
@@ -3,135 +3,134 @@
* codes automatically retry.
*/
(function() {
- "use strict";
-
- load("jstests/libs/override_methods/override_helpers.js");
-
- // These are all commands that can return BackgroundOperationInProgress error codes.
- const commandWhitelist = new Set([
- "cloneCollectionAsCapped",
- "collMod",
- "compact",
- "convertToCapped",
- "createIndexes",
- "drop",
- "dropDatabase",
- "dropIndexes",
- "renameCollection",
- ]);
-
- // Whitelisted errors commands may encounter when retried on a sharded cluster. Shards may
- // return different responses, so errors associated with repeated executions of a command may be
- // ignored.
- const acceptableCommandErrors = {
- "drop": [ErrorCodes.NamespaceNotFound],
- "dropIndexes": [ErrorCodes.IndexNotFound],
- "renameCollection": [ErrorCodes.NamespaceNotFound],
- };
-
- const kTimeout = 10 * 60 * 1000;
- const kInterval = 200;
-
- // Make it easier to understand whether or not returns from the assert.soon are being retried.
- const kNoRetry = true;
- const kRetry = false;
-
- function hasBackgroundOpInProgress(res) {
- // Only these are retryable.
- return res.code === ErrorCodes.BackgroundOperationInProgressForNamespace ||
- res.code === ErrorCodes.BackgroundOperationInProgressForDatabase;
+"use strict";
+
+load("jstests/libs/override_methods/override_helpers.js");
+
+// These are all commands that can return BackgroundOperationInProgress error codes.
+const commandWhitelist = new Set([
+ "cloneCollectionAsCapped",
+ "collMod",
+ "compact",
+ "convertToCapped",
+ "createIndexes",
+ "drop",
+ "dropDatabase",
+ "dropIndexes",
+ "renameCollection",
+]);
+
+// Whitelisted errors commands may encounter when retried on a sharded cluster. Shards may
+// return different responses, so errors associated with repeated executions of a command may be
+// ignored.
+const acceptableCommandErrors = {
+ "drop": [ErrorCodes.NamespaceNotFound],
+ "dropIndexes": [ErrorCodes.IndexNotFound],
+ "renameCollection": [ErrorCodes.NamespaceNotFound],
+};
+
+const kTimeout = 10 * 60 * 1000;
+const kInterval = 200;
+
+// Make it easier to understand whether or not returns from the assert.soon are being retried.
+const kNoRetry = true;
+const kRetry = false;
+
+function hasBackgroundOpInProgress(res) {
+ // Only these are retryable.
+ return res.code === ErrorCodes.BackgroundOperationInProgressForNamespace ||
+ res.code === ErrorCodes.BackgroundOperationInProgressForDatabase;
+}
+
+function runCommandWithRetries(conn, dbName, commandName, commandObj, func, makeFuncArgs) {
+ if (typeof commandObj !== "object" || commandObj === null) {
+ return func.apply(conn, makeFuncArgs(commandObj));
}
- function runCommandWithRetries(conn, dbName, commandName, commandObj, func, makeFuncArgs) {
- if (typeof commandObj !== "object" || commandObj === null) {
- return func.apply(conn, makeFuncArgs(commandObj));
- }
+ let res;
+ let attempt = 0;
- let res;
- let attempt = 0;
+ assert.soon(
+ () => {
+ attempt++;
- assert.soon(
- () => {
- attempt++;
+ res = func.apply(conn, makeFuncArgs(commandObj));
+ if (res.ok === 1) {
+ return kNoRetry;
+ }
- res = func.apply(conn, makeFuncArgs(commandObj));
- if (res.ok === 1) {
- return kNoRetry;
- }
+ // Commands that are not in the whitelist should never fail with this error code.
+ if (!commandWhitelist.has(commandName)) {
+ return kNoRetry;
+ }
- // Commands that are not in the whitelist should never fail with this error code.
- if (!commandWhitelist.has(commandName)) {
- return kNoRetry;
- }
+ let message = "Retrying the " + commandName +
+ " command because a background operation is in progress (attempt " + attempt + ")";
- let message = "Retrying the " + commandName +
- " command because a background operation is in progress (attempt " + attempt +
- ")";
-
- // This handles the retry case when run against a standalone, replica set, or mongos
- // where both shards returned the same response.
- if (hasBackgroundOpInProgress(res)) {
- print(message);
- return kRetry;
+ // This handles the retry case when run against a standalone, replica set, or mongos
+ // where both shards returned the same response.
+ if (hasBackgroundOpInProgress(res)) {
+ print(message);
+ return kRetry;
+ }
+
+ // The following logic only applies to sharded clusters.
+ if (!conn.isMongos() || !res.raw) {
+ // We don't attempt to retry commands for which mongos doesn't expose the raw
+ // responses from the shards.
+ return kNoRetry;
+ }
+
+ // In certain cases, retrying a command on a sharded cluster may result in a
+ // scenario where one shard has executed the command and another still has a
+ // background operation in progress. Retry, ignoring whitelisted errors on a
+ // command-by-command basis.
+ let shardsWithBackgroundOps = [];
+
+ // If any shard has a background operation in progress and the other shards sent
+ // whitelisted errors after a first attempt, retry the entire command.
+ for (let shard in res.raw) {
+ let shardRes = res.raw[shard];
+ if (shardRes.ok) {
+ continue;
}
- // The following logic only applies to sharded clusters.
- if (!conn.isMongos() || !res.raw) {
- // We don't attempt to retry commands for which mongos doesn't expose the raw
- // responses from the shards.
- return kNoRetry;
+ if (hasBackgroundOpInProgress(shardRes)) {
+ shardsWithBackgroundOps.push(shard);
+ continue;
}
- // In certain cases, retrying a command on a sharded cluster may result in a
- // scenario where one shard has executed the command and another still has a
- // background operation in progress. Retry, ignoring whitelisted errors on a
- // command-by-command basis.
- let shardsWithBackgroundOps = [];
-
- // If any shard has a background operation in progress and the other shards sent
- // whitelisted errors after a first attempt, retry the entire command.
- for (let shard in res.raw) {
- let shardRes = res.raw[shard];
- if (shardRes.ok) {
- continue;
- }
-
- if (hasBackgroundOpInProgress(shardRes)) {
- shardsWithBackgroundOps.push(shard);
- continue;
- }
-
- // If any of the shards return an error that is not whitelisted or even if a
- // whitelisted error is received on the first attempt, do not retry.
- let acceptableErrors = acceptableCommandErrors[commandName] || [];
- if (!acceptableErrors.includes(shardRes.code)) {
- return kNoRetry;
- }
- // Whitelisted errors can only occur from running a command more than once, so
- // it would be unexpected to receive an error on the first attempt.
- if (attempt === 1) {
- return kNoRetry;
- }
+ // If any of the shards return an error that is not whitelisted or even if a
+ // whitelisted error is received on the first attempt, do not retry.
+ let acceptableErrors = acceptableCommandErrors[commandName] || [];
+ if (!acceptableErrors.includes(shardRes.code)) {
+ return kNoRetry;
}
-
- // At this point, all shards have resulted in whitelisted errors resulting in
- // retrying whitelisted commands. Fake a successful response.
- if (shardsWithBackgroundOps.length === 0) {
- print("done retrying " + commandName +
- " command because all shards have responded with acceptable errors");
- res.ok = 1;
+ // Whitelisted errors can only occur from running a command more than once, so
+ // it would be unexpected to receive an error on the first attempt.
+ if (attempt === 1) {
return kNoRetry;
}
-
- print(message + " on shards: " + tojson(shardsWithBackgroundOps));
- return kRetry;
- },
- () => "Timed out while retrying command '" + tojson(commandObj) + "', response: " +
- tojson(res),
- kTimeout,
- kInterval);
- return res;
- }
-
- OverrideHelpers.overrideRunCommand(runCommandWithRetries);
+ }
+
+ // At this point, all shards have resulted in whitelisted errors resulting in
+ // retrying whitelisted commands. Fake a successful response.
+ if (shardsWithBackgroundOps.length === 0) {
+ print("done retrying " + commandName +
+ " command because all shards have responded with acceptable errors");
+ res.ok = 1;
+ return kNoRetry;
+ }
+
+ print(message + " on shards: " + tojson(shardsWithBackgroundOps));
+ return kRetry;
+ },
+ () => "Timed out while retrying command '" + tojson(commandObj) +
+ "', response: " + tojson(res),
+ kTimeout,
+ kInterval);
+ return res;
+}
+
+OverrideHelpers.overrideRunCommand(runCommandWithRetries);
})();
diff --git a/jstests/libs/override_methods/implicitly_retry_on_database_drop_pending.js b/jstests/libs/override_methods/implicitly_retry_on_database_drop_pending.js
index c605f9336d6..534d52a76f3 100644
--- a/jstests/libs/override_methods/implicitly_retry_on_database_drop_pending.js
+++ b/jstests/libs/override_methods/implicitly_retry_on_database_drop_pending.js
@@ -3,178 +3,175 @@
* "DatabaseDropPending" error response are automatically retried until they succeed.
*/
(function() {
- "use strict";
-
- const defaultTimeout = 10 * 60 * 1000;
-
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- const mongoRunCommandWithMetadataOriginal = Mongo.prototype.runCommandWithMetadata;
-
- function awaitLatestOperationMajorityConfirmed(primary) {
- // Get the latest optime from the primary.
- const replSetStatus = assert.commandWorked(primary.adminCommand({replSetGetStatus: 1}),
- "error getting replication status from primary");
- const primaryInfo = replSetStatus.members.find(memberInfo => memberInfo.self);
- assert(primaryInfo !== undefined,
- "failed to find self in replication status: " + tojson(replSetStatus));
-
- // Wait for all operations until 'primaryInfo.optime' to be applied by a majority of the
- // replica set.
- assert.commandWorked( //
- primary.adminCommand({
- getLastError: 1,
- w: "majority",
- wtimeout: defaultTimeout,
- wOpTime: primaryInfo.optime,
- }),
- "error awaiting replication");
+"use strict";
+
+const defaultTimeout = 10 * 60 * 1000;
+
+const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+const mongoRunCommandWithMetadataOriginal = Mongo.prototype.runCommandWithMetadata;
+
+function awaitLatestOperationMajorityConfirmed(primary) {
+ // Get the latest optime from the primary.
+ const replSetStatus = assert.commandWorked(primary.adminCommand({replSetGetStatus: 1}),
+ "error getting replication status from primary");
+ const primaryInfo = replSetStatus.members.find(memberInfo => memberInfo.self);
+ assert(primaryInfo !== undefined,
+ "failed to find self in replication status: " + tojson(replSetStatus));
+
+ // Wait for all operations until 'primaryInfo.optime' to be applied by a majority of the
+ // replica set.
+ assert.commandWorked( //
+ primary.adminCommand({
+ getLastError: 1,
+ w: "majority",
+ wtimeout: defaultTimeout,
+ wOpTime: primaryInfo.optime,
+ }),
+ "error awaiting replication");
+}
+
+function runCommandWithRetries(conn, dbName, commandObj, func, makeFuncArgs) {
+ if (typeof commandObj !== "object" || commandObj === null) {
+ return func.apply(conn, makeFuncArgs(commandObj));
}
- function runCommandWithRetries(conn, dbName, commandObj, func, makeFuncArgs) {
- if (typeof commandObj !== "object" || commandObj === null) {
- return func.apply(conn, makeFuncArgs(commandObj));
- }
-
- // We create a copy of 'commandObj' to avoid mutating the parameter the caller specified.
- // Instead, we use the makeFuncArgs() function to build the array of arguments to 'func' by
- // giving it the 'commandObj' that should be used. This is done to work around the
- // difference in the order of parameters for the Mongo.prototype.runCommand() and
- // Mongo.prototype.runCommandWithMetadata() functions.
- commandObj = Object.assign({}, commandObj);
- const commandName = Object.keys(commandObj)[0];
- let resPrevious;
- let res;
-
- assert.soon(
- () => {
- resPrevious = res;
- res = func.apply(conn, makeFuncArgs(commandObj));
-
- if (commandName === "insert" || commandName === "update") {
- let opsExecuted;
- const opsToRetry = [];
-
- // We merge ths statistics returned by the server about the number of documents
- // inserted and updated.
- if (commandName === "insert") {
- // We make 'commandObj.documents' refer to 'opsToRetry' to consolidate the
- // logic for how we retry insert and update operations.
- opsExecuted = commandObj.documents;
- commandObj.documents = opsToRetry;
-
- if (resPrevious !== undefined) {
- res.n += resPrevious.n;
- }
- } else if (commandName === "update") {
- // We make 'commandObj.updates' refer to 'opsToRetry' to consolidate the
- // logic for how we retry insert and update operations.
- opsExecuted = commandObj.updates;
- commandObj.updates = opsToRetry;
-
- // The 'upserted' property isn't defined in the response if there weren't
- // any documents upserted, but we define it as an empty array for
- // convenience when merging results from 'resPrevious'.
- res.upserted = res.upserted || [];
-
- if (resPrevious !== undefined) {
- res.n += resPrevious.n;
- res.nModified += resPrevious.nModified;
-
- // We translate the 'upsertInfo.index' back to its index in the original
- // operation that were sent to the server by finding the object's
- // reference (i.e. using strict-equality) in 'originalOps'.
- for (let upsertInfo of res.upserted) {
- upsertInfo.index =
- originalOps.indexOf(opsToRetry[upsertInfo.index]);
- }
-
- res.upserted.push(...resPrevious.upserted);
- }
- }
-
- if (res.ok !== 1 || !res.hasOwnProperty("writeErrors")) {
- // If the operation succeeded or failed for another reason, then we simply
- // return and let the caller deal with the response.
- return true;
+ // We create a copy of 'commandObj' to avoid mutating the parameter the caller specified.
+ // Instead, we use the makeFuncArgs() function to build the array of arguments to 'func' by
+ // giving it the 'commandObj' that should be used. This is done to work around the
+ // difference in the order of parameters for the Mongo.prototype.runCommand() and
+ // Mongo.prototype.runCommandWithMetadata() functions.
+ commandObj = Object.assign({}, commandObj);
+ const commandName = Object.keys(commandObj)[0];
+ let resPrevious;
+ let res;
+
+ assert.soon(
+ () => {
+ resPrevious = res;
+ res = func.apply(conn, makeFuncArgs(commandObj));
+
+ if (commandName === "insert" || commandName === "update") {
+ let opsExecuted;
+ const opsToRetry = [];
+
+ // We merge ths statistics returned by the server about the number of documents
+ // inserted and updated.
+ if (commandName === "insert") {
+ // We make 'commandObj.documents' refer to 'opsToRetry' to consolidate the
+ // logic for how we retry insert and update operations.
+ opsExecuted = commandObj.documents;
+ commandObj.documents = opsToRetry;
+
+ if (resPrevious !== undefined) {
+ res.n += resPrevious.n;
}
-
- for (let writeError of res.writeErrors) {
- if (writeError.code !== ErrorCodes.DatabaseDropPending) {
- // If the operation failed for a reason other than a
- // "DatabaseDropPending" error response, then we simply return and let
- // the caller deal with the response.
- return true;
+ } else if (commandName === "update") {
+ // We make 'commandObj.updates' refer to 'opsToRetry' to consolidate the
+ // logic for how we retry insert and update operations.
+ opsExecuted = commandObj.updates;
+ commandObj.updates = opsToRetry;
+
+ // The 'upserted' property isn't defined in the response if there weren't
+ // any documents upserted, but we define it as an empty array for
+ // convenience when merging results from 'resPrevious'.
+ res.upserted = res.upserted || [];
+
+ if (resPrevious !== undefined) {
+ res.n += resPrevious.n;
+ res.nModified += resPrevious.nModified;
+
+ // We translate the 'upsertInfo.index' back to its index in the original
+ // operation that were sent to the server by finding the object's
+ // reference (i.e. using strict-equality) in 'originalOps'.
+ for (let upsertInfo of res.upserted) {
+ upsertInfo.index = originalOps.indexOf(opsToRetry[upsertInfo.index]);
}
- }
- // We filter out operations that didn't produce a write error to avoid causing a
- // duplicate key error when retrying the operations. We cache the error message
- // for the assertion below to avoid the expense of serializing the server's
- // response as a JSON string repeatedly. (There may be up to 1000 write errors
- // in the server's response.)
- const errorMsg =
- "A write error was returned for an operation outside the list of" +
- " operations executed: " + tojson(res);
-
- for (let writeError of res.writeErrors) {
- assert.lt(writeError.index, opsExecuted.length, errorMsg);
- opsToRetry.push(opsExecuted[writeError.index]);
+ res.upserted.push(...resPrevious.upserted);
}
- } else if (res.ok === 1 || res.code !== ErrorCodes.DatabaseDropPending) {
- return true;
}
- let msg = commandName + " command";
- if (commandName !== "insert" && commandName !== "update") {
- // We intentionally omit the command object in the diagnostic message for
- // "insert" and "update" commands being retried to avoid printing a large blob
- // and hurting readability of the logs.
- msg += " " + tojsononeline(commandObj);
- }
-
- msg += " failed due to the " + dbName + " database being marked as drop-pending." +
- " Waiting for the latest operation to become majority confirmed before trying" +
- " again.";
- print(msg);
-
- // We wait for the primary's latest operation to become majority confirmed.
- // However, we may still need to retry more than once because the primary may not
- // yet have generated the oplog entry for the "dropDatabase" operation while it is
- // dropping each intermediate collection.
- awaitLatestOperationMajorityConfirmed(conn);
-
- if (TestData.skipDropDatabaseOnDatabaseDropPending &&
- commandName === "dropDatabase") {
- // We avoid retrying the "dropDatabase" command when another "dropDatabase"
- // command was already in progress for the database. This reduces the likelihood
- // that other clients would observe another DatabaseDropPending error response
- // when they go to retry, and therefore reduces the risk that repeatedly
- // retrying an individual operation would take longer than the 'defaultTimeout'
- // period.
- res = {ok: 1, dropped: dbName};
+ if (res.ok !== 1 || !res.hasOwnProperty("writeErrors")) {
+ // If the operation succeeded or failed for another reason, then we simply
+ // return and let the caller deal with the response.
return true;
}
- },
- "timed out while retrying '" + commandName +
- "' operation on DatabaseDropPending error response for '" + dbName + "' database",
- defaultTimeout);
- return res;
- }
+ for (let writeError of res.writeErrors) {
+ if (writeError.code !== ErrorCodes.DatabaseDropPending) {
+ // If the operation failed for a reason other than a
+ // "DatabaseDropPending" error response, then we simply return and let
+ // the caller deal with the response.
+ return true;
+ }
+ }
- Mongo.prototype.runCommand = function(dbName, commandObj, options) {
- return runCommandWithRetries(this,
- dbName,
- commandObj,
- mongoRunCommandOriginal,
- (commandObj) => [dbName, commandObj, options]);
- };
-
- Mongo.prototype.runCommandWithMetadata = function(dbName, metadata, commandArgs) {
- return runCommandWithRetries(this,
- dbName,
- commandArgs,
- mongoRunCommandWithMetadataOriginal,
- (commandArgs) => [dbName, metadata, commandArgs]);
- };
+ // We filter out operations that didn't produce a write error to avoid causing a
+ // duplicate key error when retrying the operations. We cache the error message
+ // for the assertion below to avoid the expense of serializing the server's
+ // response as a JSON string repeatedly. (There may be up to 1000 write errors
+ // in the server's response.)
+ const errorMsg = "A write error was returned for an operation outside the list of" +
+ " operations executed: " + tojson(res);
+
+ for (let writeError of res.writeErrors) {
+ assert.lt(writeError.index, opsExecuted.length, errorMsg);
+ opsToRetry.push(opsExecuted[writeError.index]);
+ }
+ } else if (res.ok === 1 || res.code !== ErrorCodes.DatabaseDropPending) {
+ return true;
+ }
+
+ let msg = commandName + " command";
+ if (commandName !== "insert" && commandName !== "update") {
+ // We intentionally omit the command object in the diagnostic message for
+ // "insert" and "update" commands being retried to avoid printing a large blob
+ // and hurting readability of the logs.
+ msg += " " + tojsononeline(commandObj);
+ }
+
+ msg += " failed due to the " + dbName + " database being marked as drop-pending." +
+ " Waiting for the latest operation to become majority confirmed before trying" +
+ " again.";
+ print(msg);
+
+ // We wait for the primary's latest operation to become majority confirmed.
+ // However, we may still need to retry more than once because the primary may not
+ // yet have generated the oplog entry for the "dropDatabase" operation while it is
+ // dropping each intermediate collection.
+ awaitLatestOperationMajorityConfirmed(conn);
+
+ if (TestData.skipDropDatabaseOnDatabaseDropPending && commandName === "dropDatabase") {
+ // We avoid retrying the "dropDatabase" command when another "dropDatabase"
+ // command was already in progress for the database. This reduces the likelihood
+ // that other clients would observe another DatabaseDropPending error response
+ // when they go to retry, and therefore reduces the risk that repeatedly
+ // retrying an individual operation would take longer than the 'defaultTimeout'
+ // period.
+ res = {ok: 1, dropped: dbName};
+ return true;
+ }
+ },
+ "timed out while retrying '" + commandName +
+ "' operation on DatabaseDropPending error response for '" + dbName + "' database",
+ defaultTimeout);
+
+ return res;
+}
+
+Mongo.prototype.runCommand = function(dbName, commandObj, options) {
+ return runCommandWithRetries(this,
+ dbName,
+ commandObj,
+ mongoRunCommandOriginal,
+ (commandObj) => [dbName, commandObj, options]);
+};
+
+Mongo.prototype.runCommandWithMetadata = function(dbName, metadata, commandArgs) {
+ return runCommandWithRetries(this,
+ dbName,
+ commandArgs,
+ mongoRunCommandWithMetadataOriginal,
+ (commandArgs) => [dbName, metadata, commandArgs]);
+};
})();
diff --git a/jstests/libs/override_methods/implicitly_shard_accessed_collections.js b/jstests/libs/override_methods/implicitly_shard_accessed_collections.js
index a6cb5a6c2a0..fc83df394cb 100644
--- a/jstests/libs/override_methods/implicitly_shard_accessed_collections.js
+++ b/jstests/libs/override_methods/implicitly_shard_accessed_collections.js
@@ -34,190 +34,189 @@ const ImplicitlyShardAccessCollSettings = (function() {
})();
(function() {
- 'use strict';
+'use strict';
- load("jstests/libs/override_methods/override_helpers.js"); // For 'OverrideHelpers'.
+load("jstests/libs/override_methods/override_helpers.js"); // For 'OverrideHelpers'.
- // Save a reference to the original methods in the IIFE's scope.
- // This scoping allows the original methods to be called by the overrides below.
- var originalGetCollection = DB.prototype.getCollection;
- var originalDBCollectionDrop = DBCollection.prototype.drop;
- var originalStartParallelShell = startParallelShell;
- var originalRunCommand = Mongo.prototype.runCommand;
+// Save a reference to the original methods in the IIFE's scope.
+// This scoping allows the original methods to be called by the overrides below.
+var originalGetCollection = DB.prototype.getCollection;
+var originalDBCollectionDrop = DBCollection.prototype.drop;
+var originalStartParallelShell = startParallelShell;
+var originalRunCommand = Mongo.prototype.runCommand;
- var testMayRunDropInParallel = false;
+var testMayRunDropInParallel = false;
- // Blacklisted namespaces that should not be sharded.
- var blacklistedNamespaces = [
- /\$cmd/,
- /^admin\./,
- /^config\./,
- /\.system\./,
- ];
+// Blacklisted namespaces that should not be sharded.
+var blacklistedNamespaces = [
+ /\$cmd/,
+ /^admin\./,
+ /^config\./,
+ /\.system\./,
+];
- const kZoneName = 'moveToHereForMigrationPassthrough';
+const kZoneName = 'moveToHereForMigrationPassthrough';
- function shardCollection(collection) {
- var db = collection.getDB();
- var dbName = db.getName();
- var fullName = collection.getFullName();
+function shardCollection(collection) {
+ var db = collection.getDB();
+ var dbName = db.getName();
+ var fullName = collection.getFullName();
- for (var ns of blacklistedNamespaces) {
- if (fullName.match(ns)) {
- return;
- }
+ for (var ns of blacklistedNamespaces) {
+ if (fullName.match(ns)) {
+ return;
}
+ }
- var res = db.adminCommand({enableSharding: dbName});
-
- // enableSharding may only be called once for a database.
- if (res.code !== ErrorCodes.AlreadyInitialized) {
- assert.commandWorked(res, "enabling sharding on the '" + dbName + "' db failed");
- }
+ var res = db.adminCommand({enableSharding: dbName});
- res = db.adminCommand(
- {shardCollection: fullName, key: {_id: 'hashed'}, collation: {locale: "simple"}});
-
- let checkResult = function(res, opDescription) {
- if (res.ok === 0 && testMayRunDropInParallel) {
- // We ignore ConflictingOperationInProgress error responses from the
- // "shardCollection" command if it's possible the test was running a "drop" command
- // concurrently. We could retry running the "shardCollection" command, but tests
- // that are likely to trigger this case are also likely running the "drop" command
- // in a loop. We therefore just let the test continue with the collection being
- // unsharded.
- assert.commandFailedWithCode(res, ErrorCodes.ConflictingOperationInProgress);
- jsTest.log("Ignoring failure while " + opDescription +
- " due to a concurrent drop operation: " + tojson(res));
- } else {
- assert.commandWorked(res, opDescription + " failed");
- }
- };
-
- checkResult(res, 'shard ' + fullName);
-
- // Set the entire chunk range to a single zone, so balancer will be forced to move the
- // evenly distributed chunks to a shard (selected at random).
- if (res.ok === 1 &&
- ImplicitlyShardAccessCollSettings.getMode() ===
- ImplicitlyShardAccessCollSettings.Modes.kHashedMoveToSingleShard) {
- let shardName =
- db.getSiblingDB('config').shards.aggregate([{$sample: {size: 1}}]).toArray()[0]._id;
-
- checkResult(db.adminCommand({addShardToZone: shardName, zone: kZoneName}),
- 'add ' + shardName + ' to zone ' + kZoneName);
- checkResult(db.adminCommand({
- updateZoneKeyRange: fullName,
- min: {_id: MinKey},
- max: {_id: MaxKey},
- zone: kZoneName
- }),
- 'set zone for ' + fullName);
-
- // Wake up the balancer.
- checkResult(db.adminCommand({balancerStart: 1}), 'turn on balancer');
- }
+ // enableSharding may only be called once for a database.
+ if (res.code !== ErrorCodes.AlreadyInitialized) {
+ assert.commandWorked(res, "enabling sharding on the '" + dbName + "' db failed");
}
- DB.prototype.getCollection = function() {
- var collection = originalGetCollection.apply(this, arguments);
-
- // The following "collStats" command can behave unexpectedly when running in a causal
- // consistency suite with secondary read preference. "collStats" does not support causal
- // consistency, making it possible to see a stale view of the collection if run on a
- // secondary, potentially causing shardCollection() to be called when it shouldn't.
- // E.g. if the collection has just been sharded but not yet visible on the
- // secondary, we could end up calling shardCollection on it again, which would fail.
- //
- // The workaround is to use a TestData flag to temporarily bypass the read preference
- // override.
- const testDataDoNotOverrideReadPreferenceOriginal = TestData.doNotOverrideReadPreference;
- let collStats;
-
- try {
- TestData.doNotOverrideReadPreference = true;
- collStats = this.runCommand({collStats: collection.getName()});
- } finally {
- TestData.doNotOverrideReadPreference = testDataDoNotOverrideReadPreferenceOriginal;
- }
-
- // If the collection is already sharded or is non-empty, do not attempt to shard.
- if (collStats.sharded || collStats.count > 0) {
- return collection;
+ res = db.adminCommand(
+ {shardCollection: fullName, key: {_id: 'hashed'}, collation: {locale: "simple"}});
+
+ let checkResult = function(res, opDescription) {
+ if (res.ok === 0 && testMayRunDropInParallel) {
+ // We ignore ConflictingOperationInProgress error responses from the
+ // "shardCollection" command if it's possible the test was running a "drop" command
+ // concurrently. We could retry running the "shardCollection" command, but tests
+ // that are likely to trigger this case are also likely running the "drop" command
+ // in a loop. We therefore just let the test continue with the collection being
+ // unsharded.
+ assert.commandFailedWithCode(res, ErrorCodes.ConflictingOperationInProgress);
+ jsTest.log("Ignoring failure while " + opDescription +
+ " due to a concurrent drop operation: " + tojson(res));
+ } else {
+ assert.commandWorked(res, opDescription + " failed");
}
+ };
- // Attempt to enable sharding on database and collection if not already done.
- shardCollection(collection);
+ checkResult(res, 'shard ' + fullName);
+
+ // Set the entire chunk range to a single zone, so balancer will be forced to move the
+ // evenly distributed chunks to a shard (selected at random).
+ if (res.ok === 1 &&
+ ImplicitlyShardAccessCollSettings.getMode() ===
+ ImplicitlyShardAccessCollSettings.Modes.kHashedMoveToSingleShard) {
+ let shardName =
+ db.getSiblingDB('config').shards.aggregate([{$sample: {size: 1}}]).toArray()[0]._id;
+
+ checkResult(db.adminCommand({addShardToZone: shardName, zone: kZoneName}),
+ 'add ' + shardName + ' to zone ' + kZoneName);
+ checkResult(db.adminCommand({
+ updateZoneKeyRange: fullName,
+ min: {_id: MinKey},
+ max: {_id: MaxKey},
+ zone: kZoneName
+ }),
+ 'set zone for ' + fullName);
+
+ // Wake up the balancer.
+ checkResult(db.adminCommand({balancerStart: 1}), 'turn on balancer');
+ }
+}
+
+DB.prototype.getCollection = function() {
+ var collection = originalGetCollection.apply(this, arguments);
+
+ // The following "collStats" command can behave unexpectedly when running in a causal
+ // consistency suite with secondary read preference. "collStats" does not support causal
+ // consistency, making it possible to see a stale view of the collection if run on a
+ // secondary, potentially causing shardCollection() to be called when it shouldn't.
+ // E.g. if the collection has just been sharded but not yet visible on the
+ // secondary, we could end up calling shardCollection on it again, which would fail.
+ //
+ // The workaround is to use a TestData flag to temporarily bypass the read preference
+ // override.
+ const testDataDoNotOverrideReadPreferenceOriginal = TestData.doNotOverrideReadPreference;
+ let collStats;
+
+ try {
+ TestData.doNotOverrideReadPreference = true;
+ collStats = this.runCommand({collStats: collection.getName()});
+ } finally {
+ TestData.doNotOverrideReadPreference = testDataDoNotOverrideReadPreferenceOriginal;
+ }
+ // If the collection is already sharded or is non-empty, do not attempt to shard.
+ if (collStats.sharded || collStats.count > 0) {
return collection;
- };
+ }
- DBCollection.prototype.drop = function() {
- var dropResult = originalDBCollectionDrop.apply(this, arguments);
+ // Attempt to enable sharding on database and collection if not already done.
+ shardCollection(collection);
- // Attempt to enable sharding on database and collection if not already done.
- shardCollection(this);
+ return collection;
+};
- return dropResult;
- };
+DBCollection.prototype.drop = function() {
+ var dropResult = originalDBCollectionDrop.apply(this, arguments);
- // The mapReduce command has a special requirement where the command must indicate the output
- // collection is sharded, so we must be sure to add this information in this passthrough.
- Mongo.prototype.runCommand = function(dbName, cmdObj, options) {
- // Skip any commands that are not mapReduce or do not have an 'out' option.
- if (typeof cmdObj !== 'object' || cmdObj === null ||
- (!cmdObj.hasOwnProperty('mapreduce') && !cmdObj.hasOwnProperty('mapReduce')) ||
- !cmdObj.hasOwnProperty('out')) {
- return originalRunCommand.apply(this, arguments);
- }
+ // Attempt to enable sharding on database and collection if not already done.
+ shardCollection(this);
- const originalCmdObj = Object.merge({}, cmdObj);
+ return dropResult;
+};
- // SERVER-5448 'jsMode' is not supported through mongos. The 'jsMode' should not impact the
- // results at all, so can be safely deleted in the sharded environment.
- delete cmdObj.jsMode;
+// The mapReduce command has a special requirement where the command must indicate the output
+// collection is sharded, so we must be sure to add this information in this passthrough.
+Mongo.prototype.runCommand = function(dbName, cmdObj, options) {
+ // Skip any commands that are not mapReduce or do not have an 'out' option.
+ if (typeof cmdObj !== 'object' || cmdObj === null ||
+ (!cmdObj.hasOwnProperty('mapreduce') && !cmdObj.hasOwnProperty('mapReduce')) ||
+ !cmdObj.hasOwnProperty('out')) {
+ return originalRunCommand.apply(this, arguments);
+ }
- // Modify the output options to specify that the collection is sharded.
- let outputSpec = cmdObj.out;
- if (typeof(outputSpec) === "string") {
- this.getDB(dbName)[outputSpec].drop(); // This will implicitly shard it.
- outputSpec = {replace: outputSpec, sharded: true};
- } else if (typeof(outputSpec) !== "object") {
- // This is a malformed command, just send it along.
- return originalRunCommand.apply(this, arguments);
- } else if (!outputSpec.hasOwnProperty("sharded")) {
- let outputColl = null;
- if (outputSpec.hasOwnProperty("replace")) {
- outputColl = outputSpec.replace;
- } else if (outputSpec.hasOwnProperty("merge")) {
- outputColl = outputSpec.merge;
- } else if (outputSpec.hasOwnProperty("reduce")) {
- outputColl = outputSpec.reduce;
- }
+ const originalCmdObj = Object.merge({}, cmdObj);
- if (outputColl === null) {
- // This is a malformed command, just send it along.
- return originalRunCommand.apply(this, arguments);
- }
- this.getDB(dbName)[outputColl].drop(); // This will implicitly shard it.
- outputSpec.sharded = true;
- }
+ // SERVER-5448 'jsMode' is not supported through mongos. The 'jsMode' should not impact the
+ // results at all, so can be safely deleted in the sharded environment.
+ delete cmdObj.jsMode;
- cmdObj.out = outputSpec;
- jsTestLog('Overriding mapReduce command. Original command: ' + tojson(originalCmdObj) +
- ' New command: ' + tojson(cmdObj));
+ // Modify the output options to specify that the collection is sharded.
+ let outputSpec = cmdObj.out;
+ if (typeof (outputSpec) === "string") {
+ this.getDB(dbName)[outputSpec].drop(); // This will implicitly shard it.
+ outputSpec = {replace: outputSpec, sharded: true};
+ } else if (typeof (outputSpec) !== "object") {
+ // This is a malformed command, just send it along.
return originalRunCommand.apply(this, arguments);
- };
-
- // Tests may use a parallel shell to run the "drop" command concurrently with other
- // operations. This can cause the "shardCollection" command to return a
- // ConflictingOperationInProgress error response.
- startParallelShell = function() {
- testMayRunDropInParallel = true;
- return originalStartParallelShell.apply(this, arguments);
- };
+ } else if (!outputSpec.hasOwnProperty("sharded")) {
+ let outputColl = null;
+ if (outputSpec.hasOwnProperty("replace")) {
+ outputColl = outputSpec.replace;
+ } else if (outputSpec.hasOwnProperty("merge")) {
+ outputColl = outputSpec.merge;
+ } else if (outputSpec.hasOwnProperty("reduce")) {
+ outputColl = outputSpec.reduce;
+ }
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/implicitly_shard_accessed_collections.js");
+ if (outputColl === null) {
+ // This is a malformed command, just send it along.
+ return originalRunCommand.apply(this, arguments);
+ }
+ this.getDB(dbName)[outputColl].drop(); // This will implicitly shard it.
+ outputSpec.sharded = true;
+ }
+ cmdObj.out = outputSpec;
+ jsTestLog('Overriding mapReduce command. Original command: ' + tojson(originalCmdObj) +
+ ' New command: ' + tojson(cmdObj));
+ return originalRunCommand.apply(this, arguments);
+};
+
+// Tests may use a parallel shell to run the "drop" command concurrently with other
+// operations. This can cause the "shardCollection" command to return a
+// ConflictingOperationInProgress error response.
+startParallelShell = function() {
+ testMayRunDropInParallel = true;
+ return originalStartParallelShell.apply(this, arguments);
+};
+
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/implicitly_shard_accessed_collections.js");
}());
diff --git a/jstests/libs/override_methods/implicitly_wrap_pipelines_in_facets.js b/jstests/libs/override_methods/implicitly_wrap_pipelines_in_facets.js
index 84da15b1b8f..55bc6f36f06 100644
--- a/jstests/libs/override_methods/implicitly_wrap_pipelines_in_facets.js
+++ b/jstests/libs/override_methods/implicitly_wrap_pipelines_in_facets.js
@@ -4,73 +4,72 @@
* yield the same results, but stress the logic of the $facet stage.
*/
(function() {
- 'use strict';
+'use strict';
- // Set the batch size of the $facet stage's buffer to be lower. This will further stress the
- // batching logic, since most pipelines will fall below the default size of 100MB.
- assert.commandWorked(
- db.adminCommand({setParameter: 1, internalQueryFacetBufferSizeBytes: 1000}));
+// Set the batch size of the $facet stage's buffer to be lower. This will further stress the
+// batching logic, since most pipelines will fall below the default size of 100MB.
+assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryFacetBufferSizeBytes: 1000}));
- // Save a reference to the original runCommand method in the IIFE's scope.
- // This scoping allows the original method to be called by the override below.
- var originalRunCommand = Mongo.prototype.runCommand;
+// Save a reference to the original runCommand method in the IIFE's scope.
+// This scoping allows the original method to be called by the override below.
+var originalRunCommand = Mongo.prototype.runCommand;
- Mongo.prototype.runCommand = function(dbName, cmdObj, options) {
- // Skip wrapping the pipeline in a $facet stage if it's not an aggregation, or if it's
- // possibly an invalid one without a pipeline.
- if (typeof cmdObj !== 'object' || cmdObj === null || !cmdObj.hasOwnProperty('aggregate') ||
- !cmdObj.hasOwnProperty('pipeline') || !Array.isArray(cmdObj.pipeline)) {
- return originalRunCommand.apply(this, arguments);
- }
+Mongo.prototype.runCommand = function(dbName, cmdObj, options) {
+ // Skip wrapping the pipeline in a $facet stage if it's not an aggregation, or if it's
+ // possibly an invalid one without a pipeline.
+ if (typeof cmdObj !== 'object' || cmdObj === null || !cmdObj.hasOwnProperty('aggregate') ||
+ !cmdObj.hasOwnProperty('pipeline') || !Array.isArray(cmdObj.pipeline)) {
+ return originalRunCommand.apply(this, arguments);
+ }
- var originalPipeline = cmdObj.pipeline;
+ var originalPipeline = cmdObj.pipeline;
+
+ if (originalPipeline.length === 0) {
+ // Empty pipelines are disallowed within a $facet stage.
+ print('Not wrapping empty pipeline in a $facet stage');
+ return originalRunCommand.apply(this, arguments);
+ }
- if (originalPipeline.length === 0) {
- // Empty pipelines are disallowed within a $facet stage.
- print('Not wrapping empty pipeline in a $facet stage');
+ const stagesDisallowedInsideFacet =
+ ['$changeStream', '$collStats', '$facet', '$geoNear', '$indexStats', '$merge', '$out'];
+ for (let stageSpec of originalPipeline) {
+ // Skip wrapping the pipeline in a $facet stage if it has an invalid stage
+ // specification.
+ if (typeof stageSpec !== 'object' || stageSpec === null) {
+ print('Not wrapping invalid pipeline in a $facet stage');
return originalRunCommand.apply(this, arguments);
}
- const stagesDisallowedInsideFacet =
- ['$changeStream', '$collStats', '$facet', '$geoNear', '$indexStats', '$merge', '$out'];
- for (let stageSpec of originalPipeline) {
- // Skip wrapping the pipeline in a $facet stage if it has an invalid stage
- // specification.
- if (typeof stageSpec !== 'object' || stageSpec === null) {
- print('Not wrapping invalid pipeline in a $facet stage');
+ if (stageSpec.hasOwnProperty('$match') && typeof stageSpec.$match === 'object' &&
+ stageSpec.$match !== null) {
+ if (stageSpec.$match.hasOwnProperty('$text')) {
+ // A $text search is disallowed within a $facet stage.
+ print('Not wrapping $text in a $facet stage');
return originalRunCommand.apply(this, arguments);
}
-
- if (stageSpec.hasOwnProperty('$match') && typeof stageSpec.$match === 'object' &&
- stageSpec.$match !== null) {
- if (stageSpec.$match.hasOwnProperty('$text')) {
- // A $text search is disallowed within a $facet stage.
- print('Not wrapping $text in a $facet stage');
- return originalRunCommand.apply(this, arguments);
- }
- if (Object.keys(stageSpec.$match).length === 0) {
- // Skip wrapping an empty $match stage, since it can be optimized out, resulting
- // in an empty pipeline which is disallowed within a $facet stage.
- print('Not wrapping empty $match in a $facet stage');
- return originalRunCommand.apply(this, arguments);
- }
+ if (Object.keys(stageSpec.$match).length === 0) {
+ // Skip wrapping an empty $match stage, since it can be optimized out, resulting
+ // in an empty pipeline which is disallowed within a $facet stage.
+ print('Not wrapping empty $match in a $facet stage');
+ return originalRunCommand.apply(this, arguments);
}
+ }
- // Skip wrapping the pipeline in a $facet stage if it contains a stage disallowed inside
- // a $facet.
- for (let disallowedStage of stagesDisallowedInsideFacet) {
- if (stageSpec.hasOwnProperty(disallowedStage)) {
- print('Not wrapping ' + disallowedStage + ' in a $facet stage');
- return originalRunCommand.apply(this, arguments);
- }
+ // Skip wrapping the pipeline in a $facet stage if it contains a stage disallowed inside
+ // a $facet.
+ for (let disallowedStage of stagesDisallowedInsideFacet) {
+ if (stageSpec.hasOwnProperty(disallowedStage)) {
+ print('Not wrapping ' + disallowedStage + ' in a $facet stage');
+ return originalRunCommand.apply(this, arguments);
}
}
+ }
- cmdObj.pipeline = [
- {$facet: {originalPipeline: originalPipeline}},
- {$unwind: '$originalPipeline'},
- {$replaceRoot: {newRoot: '$originalPipeline'}},
- ];
- return originalRunCommand.apply(this, arguments);
- };
+ cmdObj.pipeline = [
+ {$facet: {originalPipeline: originalPipeline}},
+ {$unwind: '$originalPipeline'},
+ {$replaceRoot: {newRoot: '$originalPipeline'}},
+ ];
+ return originalRunCommand.apply(this, arguments);
+};
}());
diff --git a/jstests/libs/override_methods/mongos_manual_intervention_actions.js b/jstests/libs/override_methods/mongos_manual_intervention_actions.js
index 802778b6ec1..fb0a7080585 100644
--- a/jstests/libs/override_methods/mongos_manual_intervention_actions.js
+++ b/jstests/libs/override_methods/mongos_manual_intervention_actions.js
@@ -41,7 +41,7 @@ var ManualInterventionActions = (function() {
", dropping the collection, and retrying the command.");
removeChunks(mongosConn, ns);
- const[dbName, collName] = ns.split(".");
+ const [dbName, collName] = ns.split(".");
assert.commandWorked(
mongosConn.getDB(dbName).runCommand({"drop": collName, writeConcern: {w: "majority"}}));
};
@@ -51,64 +51,63 @@ var ManualInterventionActions = (function() {
(function() {
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- Mongo.prototype.runCommand = function runCommand(dbName, cmdObj, options) {
- const cmdName = Object.keys(cmdObj)[0];
- const commandsToRetry =
- new Set(["mapReduce", "mapreduce", "shardCollection", "shardcollection"]);
+Mongo.prototype.runCommand = function runCommand(dbName, cmdObj, options) {
+ const cmdName = Object.keys(cmdObj)[0];
+ const commandsToRetry =
+ new Set(["mapReduce", "mapreduce", "shardCollection", "shardcollection"]);
- if (!commandsToRetry.has(cmdName)) {
- return mongoRunCommandOriginal.apply(this, arguments);
- }
+ if (!commandsToRetry.has(cmdName)) {
+ return mongoRunCommandOriginal.apply(this, arguments);
+ }
+
+ const maxAttempts = 10;
+ let numAttempts = 0;
+ let res;
- const maxAttempts = 10;
- let numAttempts = 0;
- let res;
+ while (numAttempts < maxAttempts) {
+ res = mongoRunCommandOriginal.apply(this, arguments);
+ ++numAttempts;
- while (numAttempts < maxAttempts) {
- res = mongoRunCommandOriginal.apply(this, arguments);
- ++numAttempts;
+ if (res.ok === 1 || res.code !== ErrorCodes.ManualInterventionRequired ||
+ numAttempts === maxAttempts) {
+ break;
+ }
- if (res.ok === 1 || res.code !== ErrorCodes.ManualInterventionRequired ||
- numAttempts === maxAttempts) {
+ print("Manual intervention retry attempt# " + numAttempts +
+ " because of error: " + tojson(res));
+
+ if (cmdName === "shardCollection" || cmdName === "shardcollection") {
+ const ns = cmdObj[cmdName];
+ ManualInterventionActions.removePartiallyWrittenChunks(this, ns, cmdObj, numAttempts);
+ } else if (cmdName === "mapReduce" || cmdName === "mapreduce") {
+ const out = cmdObj.out;
+
+ // The output collection can be specified as a string argument to the mapReduce
+ // command's 'out' option, or nested under 'out.replace', 'out.merge', or
+ // 'out.reduce'.
+ let outCollName;
+ if (typeof out === "string") {
+ outCollName = out;
+ } else if (typeof out === "object") {
+ outCollName = out.replace || out.merge || out.reduce;
+ } else {
+ print("Could not parse the output collection's name from 'out' option in " +
+ tojson(cmdObj) + "; not retrying on ManualInterventionRequired error " +
+ tojson(res));
break;
}
- print("Manual intervention retry attempt# " + numAttempts + " because of error: " +
- tojson(res));
-
- if (cmdName === "shardCollection" || cmdName === "shardcollection") {
- const ns = cmdObj[cmdName];
- ManualInterventionActions.removePartiallyWrittenChunks(
- this, ns, cmdObj, numAttempts);
- } else if (cmdName === "mapReduce" || cmdName === "mapreduce") {
- const out = cmdObj.out;
-
- // The output collection can be specified as a string argument to the mapReduce
- // command's 'out' option, or nested under 'out.replace', 'out.merge', or
- // 'out.reduce'.
- let outCollName;
- if (typeof out === "string") {
- outCollName = out;
- } else if (typeof out === "object") {
- outCollName = out.replace || out.merge || out.reduce;
- } else {
- print("Could not parse the output collection's name from 'out' option in " +
- tojson(cmdObj) + "; not retrying on ManualInterventionRequired error " +
- tojson(res));
- break;
- }
-
- // The output collection's database can optionally be specified under 'out.db',
- // else it defaults to the input collection's database.
- const outDbName = out.db || dbName;
-
- const ns = outDbName + "." + outCollName;
- ManualInterventionActions.removePartiallyWrittenChunksAndDropCollection(
- this, ns, cmdObj, numAttempts);
- }
+ // The output collection's database can optionally be specified under 'out.db',
+ // else it defaults to the input collection's database.
+ const outDbName = out.db || dbName;
+
+ const ns = outDbName + "." + outCollName;
+ ManualInterventionActions.removePartiallyWrittenChunksAndDropCollection(
+ this, ns, cmdObj, numAttempts);
}
- return res;
- };
+ }
+ return res;
+};
})();
diff --git a/jstests/libs/override_methods/network_error_and_txn_override.js b/jstests/libs/override_methods/network_error_and_txn_override.js
index 440f10d3c50..56cea366daa 100644
--- a/jstests/libs/override_methods/network_error_and_txn_override.js
+++ b/jstests/libs/override_methods/network_error_and_txn_override.js
@@ -26,1092 +26,1078 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/error_code_utils.js");
- load('jstests/libs/override_methods/override_helpers.js');
- load("jstests/libs/override_methods/read_and_write_concern_helpers.js");
- load("jstests/libs/retryable_writes_util.js");
- load("jstests/libs/transactions_util.js");
-
- // Truncates the 'print' output if it's too long to print.
- const kMaxPrintLength = 5000;
- const kNumPrintEndChars = kMaxPrintLength / 2;
- const originalPrint = print;
- print = function(msg) {
- if (typeof msg !== "string") {
- originalPrint(msg);
- return;
- }
-
- const len = msg.length;
- if (len <= kMaxPrintLength) {
- originalPrint(msg);
- return;
- }
-
- originalPrint(
- `${msg.substr(0, kNumPrintEndChars)}...${msg.substr(len - kNumPrintEndChars)}`);
- };
-
- function configuredForNetworkRetry() {
- assert(TestData.networkErrorAndTxnOverrideConfig, TestData);
- return TestData.networkErrorAndTxnOverrideConfig.retryOnNetworkErrors &&
- !jsTest.options().skipRetryOnNetworkError;
- }
-
- function configuredForTxnOverride() {
- assert(TestData.networkErrorAndTxnOverrideConfig, TestData);
- return TestData.networkErrorAndTxnOverrideConfig.wrapCRUDinTransactions;
+"use strict";
+
+load("jstests/libs/error_code_utils.js");
+load('jstests/libs/override_methods/override_helpers.js');
+load("jstests/libs/override_methods/read_and_write_concern_helpers.js");
+load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/transactions_util.js");
+
+// Truncates the 'print' output if it's too long to print.
+const kMaxPrintLength = 5000;
+const kNumPrintEndChars = kMaxPrintLength / 2;
+const originalPrint = print;
+print = function(msg) {
+ if (typeof msg !== "string") {
+ originalPrint(msg);
+ return;
}
- // Commands assumed to not be blindly retryable.
- const kNonRetryableCommands = new Set([
- // Commands that take write concern and do not support txnNumbers.
- "_configsvrAddShard",
- "_configsvrAddShardToZone",
- "_configsvrCommitChunkMerge",
- "_configsvrCommitChunkMigration",
- "_configsvrCommitChunkSplit",
- "_configsvrCreateDatabase",
- "_configsvrEnableSharding",
- "_configsvrMoveChunk",
- "_configsvrMovePrimary",
- "_configsvrRemoveShard",
- "_configsvrRemoveShardFromZone",
- "_configsvrShardCollection",
- "_configsvrUpdateZoneKeyRange",
- "_mergeAuthzCollections",
- "_recvChunkStart",
- "appendOplogNote",
- "applyOps",
- "captrunc",
- "cleanupOrphaned",
- "clone",
- "cloneCollection",
- "cloneCollectionAsCapped",
- "collMod",
- "convertToCapped",
- "create",
- "createIndexes",
- "createRole",
- "createUser",
- "deleteIndexes",
- "drop",
- "dropAllRolesFromDatabase",
- "dropAllUsersFromDatabase",
- "dropDatabase",
- "dropIndexes",
- "dropRole",
- "dropUser",
- "emptycapped",
- "godinsert",
- "grantPrivilegesToRole",
- "grantRolesToRole",
- "grantRolesToUser",
- "mapreduce.shardedfinish",
- "moveChunk",
- "renameCollection",
- "revokePrivilegesFromRole",
- "revokeRolesFromRole",
- "revokeRolesFromUser",
- "updateRole",
- "updateUser",
- ]);
-
- // These commands are not idempotent because they return errors if retried after successfully
- // completing (like IndexNotFound, NamespaceExists, etc.), but because they only take effect
- // once, and many tests use them to set up state, their errors on retries are handled specially.
- const kAcceptableNonRetryableCommands = new Set([
- "create",
- "createIndexes",
- "deleteIndexes",
- "drop",
- "dropDatabase", // Already ignores NamespaceNotFound errors, so not handled below.
- "dropIndexes",
- ]);
-
- // Returns if the given failed response is a safe response to ignore when retrying the
- // given command type.
- function isAcceptableRetryFailedResponse(cmdName, res) {
- assert(!res.ok, res);
- return ((cmdName === "create" && res.code === ErrorCodes.NamespaceExists) ||
- (cmdName === "createIndexes" && res.code === ErrorCodes.IndexAlreadyExists) ||
- (cmdName === "drop" && res.code === ErrorCodes.NamespaceNotFound) ||
- ((cmdName === "dropIndexes" || cmdName === "deleteIndexes") &&
- res.code === ErrorCodes.IndexNotFound));
+ const len = msg.length;
+ if (len <= kMaxPrintLength) {
+ originalPrint(msg);
+ return;
}
- const kCmdsThatInsert = new Set([
- 'insert',
- 'update',
- 'findAndModify',
- 'findandmodify',
- ]);
-
- // Commands that may return different values or fail if retried on a new primary after a
- // failover.
- const kNonFailoverTolerantCommands = new Set([
- "currentOp", // Failovers can change currentOp output.
- "getLog", // The log is different on different servers.
- "killOp", // Failovers may interrupt operations intended to be killed later in the test.
- "logRotate",
- "planCacheClear", // The plan cache isn't replicated.
- "planCacheClearFilters",
- "planCacheListFilters",
- "planCacheListPlans",
- "planCacheListQueryShapes",
- "planCacheSetFilter",
- "profile", // Not replicated, so can't tolerate failovers.
- "setParameter", // Not replicated, so can't tolerate failovers.
- "stageDebug",
- "startSession", // Sessions are flushed to disk asynchronously.
- ]);
-
- function isCommitOrAbort(cmdName) {
- return cmdName === "commitTransaction" || cmdName === "abortTransaction";
+ originalPrint(`${msg.substr(0, kNumPrintEndChars)}...${msg.substr(len - kNumPrintEndChars)}`);
+};
+
+function configuredForNetworkRetry() {
+ assert(TestData.networkErrorAndTxnOverrideConfig, TestData);
+ return TestData.networkErrorAndTxnOverrideConfig.retryOnNetworkErrors &&
+ !jsTest.options().skipRetryOnNetworkError;
+}
+
+function configuredForTxnOverride() {
+ assert(TestData.networkErrorAndTxnOverrideConfig, TestData);
+ return TestData.networkErrorAndTxnOverrideConfig.wrapCRUDinTransactions;
+}
+
+// Commands assumed to not be blindly retryable.
+const kNonRetryableCommands = new Set([
+ // Commands that take write concern and do not support txnNumbers.
+ "_configsvrAddShard",
+ "_configsvrAddShardToZone",
+ "_configsvrCommitChunkMerge",
+ "_configsvrCommitChunkMigration",
+ "_configsvrCommitChunkSplit",
+ "_configsvrCreateDatabase",
+ "_configsvrEnableSharding",
+ "_configsvrMoveChunk",
+ "_configsvrMovePrimary",
+ "_configsvrRemoveShard",
+ "_configsvrRemoveShardFromZone",
+ "_configsvrShardCollection",
+ "_configsvrUpdateZoneKeyRange",
+ "_mergeAuthzCollections",
+ "_recvChunkStart",
+ "appendOplogNote",
+ "applyOps",
+ "captrunc",
+ "cleanupOrphaned",
+ "clone",
+ "cloneCollection",
+ "cloneCollectionAsCapped",
+ "collMod",
+ "convertToCapped",
+ "create",
+ "createIndexes",
+ "createRole",
+ "createUser",
+ "deleteIndexes",
+ "drop",
+ "dropAllRolesFromDatabase",
+ "dropAllUsersFromDatabase",
+ "dropDatabase",
+ "dropIndexes",
+ "dropRole",
+ "dropUser",
+ "emptycapped",
+ "godinsert",
+ "grantPrivilegesToRole",
+ "grantRolesToRole",
+ "grantRolesToUser",
+ "mapreduce.shardedfinish",
+ "moveChunk",
+ "renameCollection",
+ "revokePrivilegesFromRole",
+ "revokeRolesFromRole",
+ "revokeRolesFromUser",
+ "updateRole",
+ "updateUser",
+]);
+
+// These commands are not idempotent because they return errors if retried after successfully
+// completing (like IndexNotFound, NamespaceExists, etc.), but because they only take effect
+// once, and many tests use them to set up state, their errors on retries are handled specially.
+const kAcceptableNonRetryableCommands = new Set([
+ "create",
+ "createIndexes",
+ "deleteIndexes",
+ "drop",
+ "dropDatabase", // Already ignores NamespaceNotFound errors, so not handled below.
+ "dropIndexes",
+]);
+
+// Returns if the given failed response is a safe response to ignore when retrying the
+// given command type.
+function isAcceptableRetryFailedResponse(cmdName, res) {
+ assert(!res.ok, res);
+ return ((cmdName === "create" && res.code === ErrorCodes.NamespaceExists) ||
+ (cmdName === "createIndexes" && res.code === ErrorCodes.IndexAlreadyExists) ||
+ (cmdName === "drop" && res.code === ErrorCodes.NamespaceNotFound) ||
+ ((cmdName === "dropIndexes" || cmdName === "deleteIndexes") &&
+ res.code === ErrorCodes.IndexNotFound));
+}
+
+const kCmdsThatInsert = new Set([
+ 'insert',
+ 'update',
+ 'findAndModify',
+ 'findandmodify',
+]);
+
+// Commands that may return different values or fail if retried on a new primary after a
+// failover.
+const kNonFailoverTolerantCommands = new Set([
+ "currentOp", // Failovers can change currentOp output.
+ "getLog", // The log is different on different servers.
+ "killOp", // Failovers may interrupt operations intended to be killed later in the test.
+ "logRotate",
+ "planCacheClear", // The plan cache isn't replicated.
+ "planCacheClearFilters",
+ "planCacheListFilters",
+ "planCacheListPlans",
+ "planCacheListQueryShapes",
+ "planCacheSetFilter",
+ "profile", // Not replicated, so can't tolerate failovers.
+ "setParameter", // Not replicated, so can't tolerate failovers.
+ "stageDebug",
+ "startSession", // Sessions are flushed to disk asynchronously.
+]);
+
+function isCommitOrAbort(cmdName) {
+ return cmdName === "commitTransaction" || cmdName === "abortTransaction";
+}
+
+function isCmdInTransaction(cmdObj) {
+ return cmdObj.hasOwnProperty("autocommit");
+}
+
+// Returns if the given command on the given database can retry network errors.
+function canRetryNetworkErrorForCommand(cmdName, cmdObj) {
+ if (!configuredForNetworkRetry()) {
+ return false;
}
- function isCmdInTransaction(cmdObj) {
- return cmdObj.hasOwnProperty("autocommit");
+ if (isCmdInTransaction(cmdObj)) {
+ // Commands in transactions cannot be retried at the statement level, except for the
+ // commit and abort.
+ return isCommitOrAbort(cmdName);
}
- // Returns if the given command on the given database can retry network errors.
- function canRetryNetworkErrorForCommand(cmdName, cmdObj) {
- if (!configuredForNetworkRetry()) {
- return false;
+ return true;
+}
+
+// Several commands that use the plan executor swallow the actual error code from a failed plan
+// into their error message and instead return OperationFailed.
+//
+// TODO SERVER-32208: Remove this function once it is no longer needed.
+function isRetryableExecutorCodeAndMessage(code, msg) {
+ return code === ErrorCodes.OperationFailed && typeof msg !== "undefined" &&
+ msg.indexOf("InterruptedDueToReplStateChange") >= 0;
+}
+
+// Returns true if the given response could have come from shardCollection being interrupted by
+// a failover.
+function isRetryableShardCollectionResponse(res) {
+ // shardCollection can bury the original error code in the error message.
+ return RetryableWritesUtil.errmsgContainsRetryableCodeName(res.errmsg) ||
+ // shardCollection creates collections on each shard that will receive a chunk using
+ // _cloneCollectionsOptionsFromPrimaryShard, which may fail with either of the following
+ // codes if interupted by a failover.
+ res.code === ErrorCodes.CallbackCanceled || res.code === 17405;
+}
+
+function hasError(res) {
+ return res.ok !== 1 || res.writeErrors;
+}
+
+function hasWriteConcernError(res) {
+ return res.hasOwnProperty("writeConcernError");
+}
+
+// Tracks if the current command is being run in a network retry. This is specifically for
+// retries that this file initiates, not ones that retryable writes initiates.
+let inCommandNetworkErrorRetry = false;
+
+// "Command ID" is an identifier for a given command being overridden. This is to track what log
+// messages come from what commands. This override is highly recursive and this is helpful for
+// debugging that recursion and following what commands initiated other commands.
+let currentCommandID = [];
+let newestCommandID = 0;
+
+// The "nesting level" specifies if this is a top level command or a command being recursively
+// run by the override itself.
+let nestingLevel = 0;
+function isNested() {
+ assert.gt(nestingLevel, 0);
+ return nestingLevel !== 1;
+}
+
+// An object that tracks the current stmtId and txnNumber of the most recently run transaction.
+let txnOptions = {
+ stmtId: new NumberInt(0),
+ txnNumber: new NumberLong(-1),
+};
+
+// Array to hold pairs of (dbName, cmdObj) that will be iterated over when retrying an entire
+// transaction.
+let ops = [];
+function clearOpsList() {
+ ops = [];
+}
+
+// The (initially empty) set of cursors belonging to aggregation operations that executed
+// outside of a transaction. Any getMore operations on these cursors must also execute outside
+// of a transaction. The set stores key/value pairs where the key is a cursor id and the value
+// is the true boolean value.
+let nonTxnAggCursorSet = {};
+
+// Set the max number of operations to run in a transaction. Once we've hit this number of
+// operations, we will commit the transaction. This is to prevent having to retry an extremely
+// long running transaction.
+const maxOpsInTransaction = 10;
+
+const kLogPrefix = "=-=-=-=";
+
+function logErrorFull(msg, cmdName, cmdObj, res) {
+ print(`${kLogPrefix} ${msg} :: ${cmdName}, CommandID: ${currentCommandID},` +
+ ` error: ${tojsononeline(res)}, command: ${tojsononeline(cmdObj)}`);
+ assert.eq(nestingLevel, currentCommandID.length);
+}
+
+function logMsgFull(msgHeader, msgFooter) {
+ print(`${kLogPrefix} ${msgHeader} :: CommandID: ${currentCommandID}, msg: ${msgFooter}`);
+ assert.eq(nestingLevel, currentCommandID.length);
+}
+
+// Validate the command before running it, to prevent tests with non-retryable commands
+// from being run.
+function validateCmdNetworkErrorCompatibility(cmdName, cmdObj) {
+ assert(!inCommandNetworkErrorRetry);
+ assert(!isNested());
+
+ const isRetryableWriteCmd = RetryableWritesUtil.isRetryableWriteCmdName(cmdName);
+ const canRetryWrites = _ServerSession.canRetryWrites(cmdObj);
+ const logSuffix = " CmdName: " + cmdName + ", CmdObj: " + tojson(cmdObj);
+
+ if (isRetryableWriteCmd && !canRetryWrites) {
+ throw new Error("Refusing to run a test that issues non-retryable write operations" +
+ " since the test likely makes assertions on the write results and" +
+ " can lead to spurious failures if a network error occurs." + logSuffix);
+ } else if (cmdName === "getMore") {
+ throw new Error(
+ "Refusing to run a test that issues a getMore command since if a network error" +
+ " occurs during it then we won't know whether the cursor was advanced or not." +
+ logSuffix);
+ } else if (kNonRetryableCommands.has(cmdName) &&
+ !kAcceptableNonRetryableCommands.has(cmdName)) {
+ throw new Error(
+ "Refusing to run a test that issues commands that are not blindly retryable, " +
+ logSuffix);
+ } else if (kNonFailoverTolerantCommands.has(cmdName)) {
+ throw new Error(
+ "Refusing to run a test that issues commands that may return different values" +
+ " after a failover, " + logSuffix);
+ } else if (cmdName === "aggregate") {
+ var stages = cmdObj.pipeline;
+
+ // $listLocalSessions must be the first stage in the pipeline.
+ const firstStage =
+ stages && Array.isArray(stages) && (stages.length > 0) ? stages[0] : undefined;
+ const hasListLocalStage = firstStage && (typeof firstStage === "object") &&
+ firstStage.hasOwnProperty("$listLocalSessions");
+ if (hasListLocalStage) {
+ throw new Error("Refusing to run a test that issues an aggregation command with" +
+ " $listLocalSessions because it relies on in-memory" +
+ " state that may not survive failovers." + logSuffix);
}
- if (isCmdInTransaction(cmdObj)) {
- // Commands in transactions cannot be retried at the statement level, except for the
- // commit and abort.
- return isCommitOrAbort(cmdName);
+ // Aggregate can be either a read or a write depending on whether it has a $out stage.
+ // $out is required to be the last stage of the pipeline.
+ const lastStage = stages && Array.isArray(stages) && (stages.length !== 0)
+ ? stages[stages.length - 1]
+ : undefined;
+ const hasOut =
+ lastStage && (typeof lastStage === "object") && lastStage.hasOwnProperty("$out");
+ if (hasOut) {
+ throw new Error("Refusing to run a test that issues an aggregation command" +
+ " with $out because it is not retryable." + logSuffix);
}
- return true;
- }
-
- // Several commands that use the plan executor swallow the actual error code from a failed plan
- // into their error message and instead return OperationFailed.
- //
- // TODO SERVER-32208: Remove this function once it is no longer needed.
- function isRetryableExecutorCodeAndMessage(code, msg) {
- return code === ErrorCodes.OperationFailed && typeof msg !== "undefined" &&
- msg.indexOf("InterruptedDueToReplStateChange") >= 0;
- }
-
- // Returns true if the given response could have come from shardCollection being interrupted by
- // a failover.
- function isRetryableShardCollectionResponse(res) {
- // shardCollection can bury the original error code in the error message.
- return RetryableWritesUtil.errmsgContainsRetryableCodeName(res.errmsg) ||
- // shardCollection creates collections on each shard that will receive a chunk using
- // _cloneCollectionsOptionsFromPrimaryShard, which may fail with either of the following
- // codes if interupted by a failover.
- res.code === ErrorCodes.CallbackCanceled || res.code === 17405;
- }
-
- function hasError(res) {
- return res.ok !== 1 || res.writeErrors;
- }
-
- function hasWriteConcernError(res) {
- return res.hasOwnProperty("writeConcernError");
- }
-
- // Tracks if the current command is being run in a network retry. This is specifically for
- // retries that this file initiates, not ones that retryable writes initiates.
- let inCommandNetworkErrorRetry = false;
-
- // "Command ID" is an identifier for a given command being overridden. This is to track what log
- // messages come from what commands. This override is highly recursive and this is helpful for
- // debugging that recursion and following what commands initiated other commands.
- let currentCommandID = [];
- let newestCommandID = 0;
-
- // The "nesting level" specifies if this is a top level command or a command being recursively
- // run by the override itself.
- let nestingLevel = 0;
- function isNested() {
- assert.gt(nestingLevel, 0);
- return nestingLevel !== 1;
- }
-
- // An object that tracks the current stmtId and txnNumber of the most recently run transaction.
- let txnOptions = {
- stmtId: new NumberInt(0),
- txnNumber: new NumberLong(-1),
- };
-
- // Array to hold pairs of (dbName, cmdObj) that will be iterated over when retrying an entire
- // transaction.
- let ops = [];
- function clearOpsList() {
- ops = [];
- }
-
- // The (initially empty) set of cursors belonging to aggregation operations that executed
- // outside of a transaction. Any getMore operations on these cursors must also execute outside
- // of a transaction. The set stores key/value pairs where the key is a cursor id and the value
- // is the true boolean value.
- let nonTxnAggCursorSet = {};
-
- // Set the max number of operations to run in a transaction. Once we've hit this number of
- // operations, we will commit the transaction. This is to prevent having to retry an extremely
- // long running transaction.
- const maxOpsInTransaction = 10;
-
- const kLogPrefix = "=-=-=-=";
-
- function logErrorFull(msg, cmdName, cmdObj, res) {
- print(`${kLogPrefix} ${msg} :: ${cmdName}, CommandID: ${currentCommandID},` +
- ` error: ${tojsononeline(res)}, command: ${tojsononeline(cmdObj)}`);
- assert.eq(nestingLevel, currentCommandID.length);
- }
-
- function logMsgFull(msgHeader, msgFooter) {
- print(`${kLogPrefix} ${msgHeader} :: CommandID: ${currentCommandID}, msg: ${msgFooter}`);
- assert.eq(nestingLevel, currentCommandID.length);
- }
-
- // Validate the command before running it, to prevent tests with non-retryable commands
- // from being run.
- function validateCmdNetworkErrorCompatibility(cmdName, cmdObj) {
- assert(!inCommandNetworkErrorRetry);
- assert(!isNested());
-
- const isRetryableWriteCmd = RetryableWritesUtil.isRetryableWriteCmdName(cmdName);
- const canRetryWrites = _ServerSession.canRetryWrites(cmdObj);
- const logSuffix = " CmdName: " + cmdName + ", CmdObj: " + tojson(cmdObj);
-
- if (isRetryableWriteCmd && !canRetryWrites) {
- throw new Error("Refusing to run a test that issues non-retryable write operations" +
- " since the test likely makes assertions on the write results and" +
- " can lead to spurious failures if a network error occurs." +
- logSuffix);
- } else if (cmdName === "getMore") {
- throw new Error(
- "Refusing to run a test that issues a getMore command since if a network error" +
- " occurs during it then we won't know whether the cursor was advanced or not." +
- logSuffix);
- } else if (kNonRetryableCommands.has(cmdName) &&
- !kAcceptableNonRetryableCommands.has(cmdName)) {
+ const hasExplain = cmdObj.hasOwnProperty("explain");
+ if (hasExplain) {
throw new Error(
- "Refusing to run a test that issues commands that are not blindly retryable, " +
+ "Refusing to run a test that issues an aggregation command with explain" +
+ " because it may return incomplete results if interrupted by a stepdown." +
logSuffix);
- } else if (kNonFailoverTolerantCommands.has(cmdName)) {
- throw new Error(
- "Refusing to run a test that issues commands that may return different values" +
- " after a failover, " + logSuffix);
- } else if (cmdName === "aggregate") {
- var stages = cmdObj.pipeline;
-
- // $listLocalSessions must be the first stage in the pipeline.
- const firstStage =
- stages && Array.isArray(stages) && (stages.length > 0) ? stages[0] : undefined;
- const hasListLocalStage = firstStage && (typeof firstStage === "object") &&
- firstStage.hasOwnProperty("$listLocalSessions");
- if (hasListLocalStage) {
- throw new Error("Refusing to run a test that issues an aggregation command with" +
- " $listLocalSessions because it relies on in-memory" +
- " state that may not survive failovers." + logSuffix);
- }
-
- // Aggregate can be either a read or a write depending on whether it has a $out stage.
- // $out is required to be the last stage of the pipeline.
- const lastStage = stages && Array.isArray(stages) && (stages.length !== 0)
- ? stages[stages.length - 1]
- : undefined;
- const hasOut =
- lastStage && (typeof lastStage === "object") && lastStage.hasOwnProperty("$out");
- if (hasOut) {
- throw new Error("Refusing to run a test that issues an aggregation command" +
- " with $out because it is not retryable." + logSuffix);
- }
-
- const hasExplain = cmdObj.hasOwnProperty("explain");
- if (hasExplain) {
- throw new Error(
- "Refusing to run a test that issues an aggregation command with explain" +
- " because it may return incomplete results if interrupted by a stepdown." +
- logSuffix);
- }
- } else if (cmdName === "mapReduce" || cmdName === "mapreduce") {
- throw new Error(
- "Refusing to run a test that issues a mapReduce command, because it calls " +
- " std::terminate() if interrupted by a stepdown." + logSuffix);
}
+ } else if (cmdName === "mapReduce" || cmdName === "mapreduce") {
+ throw new Error(
+ "Refusing to run a test that issues a mapReduce command, because it calls " +
+ " std::terminate() if interrupted by a stepdown." + logSuffix);
+ }
+}
+
+// Default read concern level to use for transactions. Snapshot read concern is not supported in
+// sharded transactions when majority reads are disabled.
+const kDefaultTransactionReadConcernLevel =
+ TestData.hasOwnProperty("defaultTransactionReadConcernLevel")
+ ? TestData.defaultTransactionReadConcernLevel
+ : (TestData.enableMajorityReadConcern !== false ? "snapshot" : "local");
+
+const kDefaultTransactionWriteConcernW = TestData.hasOwnProperty("defaultTransactionWriteConcernW")
+ ? TestData.defaultTransactionWriteConcernW
+ : "majority";
+
+// Default read concern level to use for commands that are not transactions.
+const kDefaultReadConcernLevel = (function() {
+ if (TestData.hasOwnProperty("defaultReadConcernLevel")) {
+ return TestData.defaultReadConcernLevel;
}
- // Default read concern level to use for transactions. Snapshot read concern is not supported in
- // sharded transactions when majority reads are disabled.
- const kDefaultTransactionReadConcernLevel =
- TestData.hasOwnProperty("defaultTransactionReadConcernLevel")
- ? TestData.defaultTransactionReadConcernLevel
- : (TestData.enableMajorityReadConcern !== false ? "snapshot" : "local");
-
- const kDefaultTransactionWriteConcernW =
- TestData.hasOwnProperty("defaultTransactionWriteConcernW")
- ? TestData.defaultTransactionWriteConcernW
- : "majority";
-
- // Default read concern level to use for commands that are not transactions.
- const kDefaultReadConcernLevel = (function() {
- if (TestData.hasOwnProperty("defaultReadConcernLevel")) {
- return TestData.defaultReadConcernLevel;
- }
-
- // Use majority if the suite didn't specify a level, unless the variant doesn't support it.
- return TestData.enableMajorityReadConcern !== false ? "majority" : "local";
- })();
+ // Use majority if the suite didn't specify a level, unless the variant doesn't support it.
+ return TestData.enableMajorityReadConcern !== false ? "majority" : "local";
+})();
- // Default write concern w to use for both transactions and non-transactions.
- const kDefaultWriteConcernW = TestData.hasOwnProperty("defaultWriteConcernW")
- ? TestData.defaultWriteConcernW
- : "majority";
+// Default write concern w to use for both transactions and non-transactions.
+const kDefaultWriteConcernW =
+ TestData.hasOwnProperty("defaultWriteConcernW") ? TestData.defaultWriteConcernW : "majority";
- // Use a "signature" value that won't typically match a value assigned in normal use. This way
- // the wtimeout set by this override is distinguishable in the server logs.
- const kDefaultWtimeout = 5 * 60 * 1000 + 567;
+// Use a "signature" value that won't typically match a value assigned in normal use. This way
+// the wtimeout set by this override is distinguishable in the server logs.
+const kDefaultWtimeout = 5 * 60 * 1000 + 567;
- function appendReadAndWriteConcern(conn, dbName, cmdName, cmdObj) {
- let shouldForceReadConcern = kCommandsSupportingReadConcern.has(cmdName);
- let shouldForceWriteConcern = kCommandsSupportingWriteConcern.has(cmdName);
+function appendReadAndWriteConcern(conn, dbName, cmdName, cmdObj) {
+ let shouldForceReadConcern = kCommandsSupportingReadConcern.has(cmdName);
+ let shouldForceWriteConcern = kCommandsSupportingWriteConcern.has(cmdName);
- if (isCmdInTransaction(cmdObj)) {
- shouldForceReadConcern = false;
- if (cmdObj.startTransaction === true) {
- shouldForceReadConcern = true;
- }
- if (!kCommandsSupportingWriteConcernInTransaction.has(cmdName)) {
- shouldForceWriteConcern = false;
- }
- } else if (cmdName === "aggregate") {
- if (OverrideHelpers.isAggregationWithListLocalSessionsStage(cmdName, cmdObj) ||
- OverrideHelpers.isAggregationWithChangeStreamStage(cmdName, cmdObj)) {
- // The $listLocalSessions stage can only be used with readConcern={level: "local"},
- // and the $changeStream stage can only be used with
- // readConcern={level: "majority"}.
- shouldForceReadConcern = false;
- }
-
- if (OverrideHelpers.isAggregationWithOutOrMergeStage(cmdName, cmdObj)) {
- // The $out stage can only be used with readConcern={level: "local"}.
- shouldForceReadConcern = false;
- } else {
- // A writeConcern can only be used with a $out stage.
- shouldForceWriteConcern = false;
- }
-
- if (cmdObj.explain) {
- // Attempting to specify a readConcern while explaining an aggregation would always
- // return an error prior to SERVER-30582 and it is otherwise only compatible with
- // readConcern={level: "local"}.
- shouldForceReadConcern = false;
- }
- } else if (OverrideHelpers.isMapReduceWithInlineOutput(cmdName, cmdObj)) {
- // A writeConcern can only be used with non-inline output.
+ if (isCmdInTransaction(cmdObj)) {
+ shouldForceReadConcern = false;
+ if (cmdObj.startTransaction === true) {
+ shouldForceReadConcern = true;
+ }
+ if (!kCommandsSupportingWriteConcernInTransaction.has(cmdName)) {
shouldForceWriteConcern = false;
}
+ } else if (cmdName === "aggregate") {
+ if (OverrideHelpers.isAggregationWithListLocalSessionsStage(cmdName, cmdObj) ||
+ OverrideHelpers.isAggregationWithChangeStreamStage(cmdName, cmdObj)) {
+ // The $listLocalSessions stage can only be used with readConcern={level: "local"},
+ // and the $changeStream stage can only be used with
+ // readConcern={level: "majority"}.
+ shouldForceReadConcern = false;
+ }
- // If we're retrying on network errors the write concern should already be majority.
- if ((cmdName === 'drop' || cmdName === 'convertToCapped') && configuredForTxnOverride() &&
- !configuredForNetworkRetry()) {
- // Convert all collection drops to w:majority so they won't prevent subsequent
- // operations in transactions from failing when failing to acquire collection locks.
- cmdObj.writeConcern =
- cmdObj.writeConcern || {w: "majority", wtimeout: kDefaultWtimeout};
+ if (OverrideHelpers.isAggregationWithOutOrMergeStage(cmdName, cmdObj)) {
+ // The $out stage can only be used with readConcern={level: "local"}.
+ shouldForceReadConcern = false;
+ } else {
+ // A writeConcern can only be used with a $out stage.
shouldForceWriteConcern = false;
}
- if (shouldForceReadConcern) {
- let readConcernLevel;
- if (cmdObj.startTransaction === true) {
- readConcernLevel = kDefaultTransactionReadConcernLevel;
- } else {
- readConcernLevel = kDefaultReadConcernLevel;
- }
+ if (cmdObj.explain) {
+ // Attempting to specify a readConcern while explaining an aggregation would always
+ // return an error prior to SERVER-30582 and it is otherwise only compatible with
+ // readConcern={level: "local"}.
+ shouldForceReadConcern = false;
+ }
+ } else if (OverrideHelpers.isMapReduceWithInlineOutput(cmdName, cmdObj)) {
+ // A writeConcern can only be used with non-inline output.
+ shouldForceWriteConcern = false;
+ }
- if (cmdObj.hasOwnProperty("readConcern") &&
- cmdObj.readConcern.hasOwnProperty("level") &&
- cmdObj.readConcern.level !== readConcernLevel) {
- throw new Error("refusing to override existing readConcern " +
- cmdObj.readConcern.level + " with readConcern " + readConcernLevel);
- } else {
- cmdObj.readConcern = {level: readConcernLevel};
- }
+ // If we're retrying on network errors the write concern should already be majority.
+ if ((cmdName === 'drop' || cmdName === 'convertToCapped') && configuredForTxnOverride() &&
+ !configuredForNetworkRetry()) {
+ // Convert all collection drops to w:majority so they won't prevent subsequent
+ // operations in transactions from failing when failing to acquire collection locks.
+ cmdObj.writeConcern = cmdObj.writeConcern || {w: "majority", wtimeout: kDefaultWtimeout};
+ shouldForceWriteConcern = false;
+ }
- // Only attach afterClusterTime if causal consistency is explicitly enabled. Note, it is
- // OK to send a readConcern with only afterClusterTime, which is interpreted as local
- // read concern by the server.
- if (TestData.hasOwnProperty("sessionOptions") &&
- TestData.sessionOptions.causalConsistency === true) {
- const driverSession = conn.getDB(dbName).getSession();
- const operationTime = driverSession.getOperationTime();
- if (operationTime !== undefined) {
- // The command object should always have a readConcern by this point.
- cmdObj.readConcern.afterClusterTime = operationTime;
- }
- }
+ if (shouldForceReadConcern) {
+ let readConcernLevel;
+ if (cmdObj.startTransaction === true) {
+ readConcernLevel = kDefaultTransactionReadConcernLevel;
+ } else {
+ readConcernLevel = kDefaultReadConcernLevel;
}
- if (shouldForceWriteConcern) {
- if (cmdObj.hasOwnProperty("writeConcern")) {
- let writeConcern = cmdObj.writeConcern;
- if (typeof writeConcern !== "object" || writeConcern === null ||
- (writeConcern.hasOwnProperty("w") &&
- bsonWoCompare({_: writeConcern.w}, {_: kDefaultWriteConcernW}) !== 0)) {
- throw new Error("Cowardly refusing to override write concern of command: " +
- tojson(cmdObj));
- }
- }
+ if (cmdObj.hasOwnProperty("readConcern") && cmdObj.readConcern.hasOwnProperty("level") &&
+ cmdObj.readConcern.level !== readConcernLevel) {
+ throw new Error("refusing to override existing readConcern " +
+ cmdObj.readConcern.level + " with readConcern " + readConcernLevel);
+ } else {
+ cmdObj.readConcern = {level: readConcernLevel};
+ }
- if (kCommandsSupportingWriteConcernInTransaction.has(cmdName)) {
- cmdObj.writeConcern = {
- w: kDefaultTransactionWriteConcernW,
- wtimeout: kDefaultWtimeout
- };
- } else {
- cmdObj.writeConcern = {w: kDefaultWriteConcernW, wtimeout: kDefaultWtimeout};
+ // Only attach afterClusterTime if causal consistency is explicitly enabled. Note, it is
+ // OK to send a readConcern with only afterClusterTime, which is interpreted as local
+ // read concern by the server.
+ if (TestData.hasOwnProperty("sessionOptions") &&
+ TestData.sessionOptions.causalConsistency === true) {
+ const driverSession = conn.getDB(dbName).getSession();
+ const operationTime = driverSession.getOperationTime();
+ if (operationTime !== undefined) {
+ // The command object should always have a readConcern by this point.
+ cmdObj.readConcern.afterClusterTime = operationTime;
}
}
}
- // Commits the given transaction. Throws on failure to commit.
- function commitTransaction(conn, lsid, txnNumber) {
- assert(configuredForTxnOverride());
- assert.gte(txnNumber, 0);
-
- logMsgFull('commitTransaction',
- `Committing transaction ${txnNumber} on session ${tojsononeline(lsid)}`);
-
- // Running the command on conn will reenter from the top of `runCommandOverride`, retrying
- // as needed.
- assert.commandWorked(conn.adminCommand({
- commitTransaction: 1,
- autocommit: false,
- lsid: lsid,
- txnNumber: txnNumber,
- }));
-
- // We've successfully committed the transaction, so we can forget the ops we've successfully
- // run.
- clearOpsList();
- }
-
- function abortTransaction(conn, lsid, txnNumber) {
- assert(configuredForTxnOverride());
- assert.gte(txnNumber, 0);
-
- logMsgFull('abortTransaction',
- `Aborting transaction ${txnNumber} on session ${tojsononeline(lsid)}`);
-
- // Running the command on conn will reenter from the top of `runCommandOverride`, retrying
- // as needed.
- const res = conn.adminCommand({
- abortTransaction: 1,
- autocommit: false,
- lsid: lsid,
- txnNumber: txnNumber,
- });
+ if (shouldForceWriteConcern) {
+ if (cmdObj.hasOwnProperty("writeConcern")) {
+ let writeConcern = cmdObj.writeConcern;
+ if (typeof writeConcern !== "object" || writeConcern === null ||
+ (writeConcern.hasOwnProperty("w") &&
+ bsonWoCompare({_: writeConcern.w}, {_: kDefaultWriteConcernW}) !== 0)) {
+ throw new Error("Cowardly refusing to override write concern of command: " +
+ tojson(cmdObj));
+ }
+ }
- // Transient transaction errors mean the transaction has aborted, so consider it a success.
- if (TransactionsUtil.isTransientTransactionError(res)) {
- return;
+ if (kCommandsSupportingWriteConcernInTransaction.has(cmdName)) {
+ cmdObj.writeConcern = {w: kDefaultTransactionWriteConcernW, wtimeout: kDefaultWtimeout};
+ } else {
+ cmdObj.writeConcern = {w: kDefaultWriteConcernW, wtimeout: kDefaultWtimeout};
}
- assert.commandWorked(res);
}
-
- function startNewTransaction(conn, cmdObj) {
- // Bump the txnNumber and reset the stmtId.
- txnOptions.txnNumber = new NumberLong(txnOptions.txnNumber + 1);
- txnOptions.stmtId = new NumberInt(1);
-
- // Used to communicate the txnNumber to unittests.
- TestData.currentTxnOverrideTxnNumber = txnOptions.txnNumber;
-
- cmdObj.startTransaction = true;
- return txnOptions.txnNumber;
+}
+
+// Commits the given transaction. Throws on failure to commit.
+function commitTransaction(conn, lsid, txnNumber) {
+ assert(configuredForTxnOverride());
+ assert.gte(txnNumber, 0);
+
+ logMsgFull('commitTransaction',
+ `Committing transaction ${txnNumber} on session ${tojsononeline(lsid)}`);
+
+ // Running the command on conn will reenter from the top of `runCommandOverride`, retrying
+ // as needed.
+ assert.commandWorked(conn.adminCommand({
+ commitTransaction: 1,
+ autocommit: false,
+ lsid: lsid,
+ txnNumber: txnNumber,
+ }));
+
+ // We've successfully committed the transaction, so we can forget the ops we've successfully
+ // run.
+ clearOpsList();
+}
+
+function abortTransaction(conn, lsid, txnNumber) {
+ assert(configuredForTxnOverride());
+ assert.gte(txnNumber, 0);
+
+ logMsgFull('abortTransaction',
+ `Aborting transaction ${txnNumber} on session ${tojsononeline(lsid)}`);
+
+ // Running the command on conn will reenter from the top of `runCommandOverride`, retrying
+ // as needed.
+ const res = conn.adminCommand({
+ abortTransaction: 1,
+ autocommit: false,
+ lsid: lsid,
+ txnNumber: txnNumber,
+ });
+
+ // Transient transaction errors mean the transaction has aborted, so consider it a success.
+ if (TransactionsUtil.isTransientTransactionError(res)) {
+ return;
}
-
- function calculateStmtIdInc(cmdName, cmdObj) {
- // Reserve the statement ids for batch writes.
- try {
- switch (cmdName) {
- case "insert":
- return cmdObj.documents.length;
- case "update":
- return cmdObj.updates.length;
- case "delete":
- return cmdObj.deletes.length;
- default:
- return 1;
- }
- } catch (e) {
- // Malformed command objects can cause errors to be thrown.
- return 1;
+ assert.commandWorked(res);
+}
+
+function startNewTransaction(conn, cmdObj) {
+ // Bump the txnNumber and reset the stmtId.
+ txnOptions.txnNumber = new NumberLong(txnOptions.txnNumber + 1);
+ txnOptions.stmtId = new NumberInt(1);
+
+ // Used to communicate the txnNumber to unittests.
+ TestData.currentTxnOverrideTxnNumber = txnOptions.txnNumber;
+
+ cmdObj.startTransaction = true;
+ return txnOptions.txnNumber;
+}
+
+function calculateStmtIdInc(cmdName, cmdObj) {
+ // Reserve the statement ids for batch writes.
+ try {
+ switch (cmdName) {
+ case "insert":
+ return cmdObj.documents.length;
+ case "update":
+ return cmdObj.updates.length;
+ case "delete":
+ return cmdObj.deletes.length;
+ default:
+ return 1;
}
+ } catch (e) {
+ // Malformed command objects can cause errors to be thrown.
+ return 1;
}
+}
- function continueTransaction(conn, dbName, cmdName, cmdObj) {
- cmdObj.txnNumber = txnOptions.txnNumber;
- cmdObj.stmtId = txnOptions.stmtId;
- cmdObj.autocommit = false;
+function continueTransaction(conn, dbName, cmdName, cmdObj) {
+ cmdObj.txnNumber = txnOptions.txnNumber;
+ cmdObj.stmtId = txnOptions.stmtId;
+ cmdObj.autocommit = false;
- // Bump the stmtId for the next statement. We do this after so that the stmtIds start at 1.
- txnOptions.stmtId = new NumberInt(txnOptions.stmtId + calculateStmtIdInc(cmdName, cmdObj));
+ // Bump the stmtId for the next statement. We do this after so that the stmtIds start at 1.
+ txnOptions.stmtId = new NumberInt(txnOptions.stmtId + calculateStmtIdInc(cmdName, cmdObj));
- // This function expects to get a command without any read or write concern properties.
- assert(!cmdObj.hasOwnProperty('readConcern'), cmdObj);
- assert(!cmdObj.hasOwnProperty('writeConcern'), cmdObj);
+ // This function expects to get a command without any read or write concern properties.
+ assert(!cmdObj.hasOwnProperty('readConcern'), cmdObj);
+ assert(!cmdObj.hasOwnProperty('writeConcern'), cmdObj);
- // If this is the first time we are running this command, push it to the ops array.
- if (!isNested() && !inCommandNetworkErrorRetry) {
- // Make a copy so the command does not get changed by the test.
- const objCopy = TransactionsUtil.deepCopyObject({}, cmdObj);
+ // If this is the first time we are running this command, push it to the ops array.
+ if (!isNested() && !inCommandNetworkErrorRetry) {
+ // Make a copy so the command does not get changed by the test.
+ const objCopy = TransactionsUtil.deepCopyObject({}, cmdObj);
- // Empty transaction state that needs to be refreshed. The stmtId and startTransaction
- // fields shouldn't need to be refreshed.
- delete objCopy.txnNumber;
- delete objCopy.$clusterTime;
+ // Empty transaction state that needs to be refreshed. The stmtId and startTransaction
+ // fields shouldn't need to be refreshed.
+ delete objCopy.txnNumber;
+ delete objCopy.$clusterTime;
- ops.push({
- dbName: dbName,
- cmdObj: objCopy,
- });
- }
+ ops.push({
+ dbName: dbName,
+ cmdObj: objCopy,
+ });
}
-
- // Returns true iff a command is a "getMore" on a cursor that is in the `nonTxnAggCursorSet`
- // dictionary of cursors that were created outside of any transaction.
- function isCommandNonTxnGetMore(cmdName, cmdObj) {
- return cmdName === "getMore" && nonTxnAggCursorSet[cmdObj.getMore];
+}
+
+// Returns true iff a command is a "getMore" on a cursor that is in the `nonTxnAggCursorSet`
+// dictionary of cursors that were created outside of any transaction.
+function isCommandNonTxnGetMore(cmdName, cmdObj) {
+ return cmdName === "getMore" && nonTxnAggCursorSet[cmdObj.getMore];
+}
+
+function setupTransactionCommand(conn, dbName, cmdName, cmdObj, lsid) {
+ // We want to overwrite whatever read and write concern is already set.
+ delete cmdObj.readConcern;
+ delete cmdObj.writeConcern;
+
+ // If sessions are explicitly disabled for this command, we skip overriding it to
+ // use transactions.
+ const driverSession = conn.getDB(dbName).getSession();
+ const commandSupportsTransaction = TransactionsUtil.commandSupportsTxn(dbName, cmdName, cmdObj);
+ if (commandSupportsTransaction && driverSession.getSessionId() !== null &&
+ !isCommandNonTxnGetMore(cmdName, cmdObj)) {
+ if (isNested()) {
+ // Nested commands should never start a new transaction.
+ } else if (ops.length === 0) {
+ // We should never end a transaction on a getMore.
+ assert.neq(cmdName, "getMore", cmdObj);
+ startNewTransaction(conn, cmdObj);
+ } else if (cmdName === "getMore") {
+ // If the command is a getMore, we cannot consider ending the transaction.
+ } else if (ops.length >= maxOpsInTransaction) {
+ logMsgFull('setupTransactionCommand',
+ `Committing transaction ${txnOptions.txnNumber} on session` +
+ ` ${tojsononeline(lsid)} because we have hit max ops length`);
+ commitTransaction(conn, lsid, txnOptions.txnNumber);
+ startNewTransaction(conn, cmdObj);
+ }
+ continueTransaction(conn, dbName, cmdName, cmdObj);
+
+ } else {
+ if (ops.length > 0 && !isNested()) {
+ logMsgFull('setupTransactionCommand',
+ `Committing transaction ${txnOptions.txnNumber} on session` +
+ ` ${tojsononeline(lsid)} to run a command that does not support` +
+ ` transactions: ${cmdName}`);
+ commitTransaction(conn, lsid, txnOptions.txnNumber);
+ }
}
+ appendReadAndWriteConcern(conn, dbName, cmdName, cmdObj);
+}
+
+// Retries the entire transaction without committing it. Returns immediately on an error with
+// the response from the failed command. This may recursively retry the entire transaction in
+// which case parent retries are completed early.
+function retryEntireTransaction(conn, lsid) {
+ // Re-run every command in the ops array.
+ assert.gt(ops.length, 0);
+
+ // Keep track of what txnNumber this retry is attempting.
+ const retriedTxnNumber = startNewTransaction(conn, {"ignored object": 1});
+
+ logMsgFull('Retrying entire transaction',
+ `txnNumber: ${retriedTxnNumber}, lsid: ${tojsononeline(lsid)}`);
+ let res;
+ for (let op of ops) {
+ logMsgFull('Retrying op',
+ `txnNumber: ${retriedTxnNumber}, lsid: ${tojsononeline(lsid)},` +
+ ` db: ${op.dbName}, op: ${tojsononeline(op.cmdObj)}`);
+ // Running the command on conn will reenter from the top of `runCommandOverride`,
+ // individual statement retries will be suppressed by tracking nesting level.
+ res = conn.getDB(op.dbName).runCommand(op.cmdObj);
+
+ if (hasError(res) || hasWriteConcernError(res)) {
+ return res;
+ }
+ // Sanity check that we checked for an error correctly.
+ assert.commandWorked(res);
- function setupTransactionCommand(conn, dbName, cmdName, cmdObj, lsid) {
- // We want to overwrite whatever read and write concern is already set.
- delete cmdObj.readConcern;
- delete cmdObj.writeConcern;
-
- // If sessions are explicitly disabled for this command, we skip overriding it to
- // use transactions.
- const driverSession = conn.getDB(dbName).getSession();
- const commandSupportsTransaction =
- TransactionsUtil.commandSupportsTxn(dbName, cmdName, cmdObj);
- if (commandSupportsTransaction && driverSession.getSessionId() !== null &&
- !isCommandNonTxnGetMore(cmdName, cmdObj)) {
- if (isNested()) {
- // Nested commands should never start a new transaction.
- } else if (ops.length === 0) {
- // We should never end a transaction on a getMore.
- assert.neq(cmdName, "getMore", cmdObj);
- startNewTransaction(conn, cmdObj);
- } else if (cmdName === "getMore") {
- // If the command is a getMore, we cannot consider ending the transaction.
- } else if (ops.length >= maxOpsInTransaction) {
- logMsgFull('setupTransactionCommand',
- `Committing transaction ${txnOptions.txnNumber} on session` +
- ` ${tojsononeline(lsid)} because we have hit max ops length`);
- commitTransaction(conn, lsid, txnOptions.txnNumber);
- startNewTransaction(conn, cmdObj);
- }
- continueTransaction(conn, dbName, cmdName, cmdObj);
-
- } else {
- if (ops.length > 0 && !isNested()) {
- logMsgFull('setupTransactionCommand',
- `Committing transaction ${txnOptions.txnNumber} on session` +
- ` ${tojsononeline(lsid)} to run a command that does not support` +
- ` transactions: ${cmdName}`);
- commitTransaction(conn, lsid, txnOptions.txnNumber);
- }
+ // If we recursively retried the entire transaction, we do not want to continue this
+ // retry. We just pass up the response from the retry that completed.
+ if (txnOptions.txnNumber !== retriedTxnNumber) {
+ return res;
}
- appendReadAndWriteConcern(conn, dbName, cmdName, cmdObj);
}
- // Retries the entire transaction without committing it. Returns immediately on an error with
- // the response from the failed command. This may recursively retry the entire transaction in
- // which case parent retries are completed early.
- function retryEntireTransaction(conn, lsid) {
- // Re-run every command in the ops array.
- assert.gt(ops.length, 0);
+ // We do not commit the transaction and let it continue in the next operation.
+ return res;
+}
+
+// Creates the given collection, retrying if needed. Throws on failure.
+function createCollectionExplicitly(conn, dbName, collName, lsid) {
+ logMsgFull(
+ 'create',
+ `Explicitly creating collection ${dbName}.${collName} and then retrying transaction`);
+
+ // Always majority commit the create because this is not expected to roll back once
+ // successful.
+ const createCmdObj = {
+ create: collName,
+ lsid: lsid,
+ writeConcern: {w: 'majority'},
+ };
- // Keep track of what txnNumber this retry is attempting.
- const retriedTxnNumber = startNewTransaction(conn, {"ignored object": 1});
-
- logMsgFull('Retrying entire transaction',
- `txnNumber: ${retriedTxnNumber}, lsid: ${tojsononeline(lsid)}`);
- let res;
- for (let op of ops) {
- logMsgFull('Retrying op',
- `txnNumber: ${retriedTxnNumber}, lsid: ${tojsononeline(lsid)},` +
- ` db: ${op.dbName}, op: ${tojsononeline(op.cmdObj)}`);
- // Running the command on conn will reenter from the top of `runCommandOverride`,
- // individual statement retries will be suppressed by tracking nesting level.
- res = conn.getDB(op.dbName).runCommand(op.cmdObj);
-
- if (hasError(res) || hasWriteConcernError(res)) {
- return res;
- }
- // Sanity check that we checked for an error correctly.
- assert.commandWorked(res);
+ // Running the command on conn will reenter from the top of `runCommandOverride`, retrying
+ // as needed. If an error returned by `create` were tolerable, it would already have been
+ // retried by the time it surfaced here.
+ assert.commandWorked(conn.getDB(dbName).runCommand(createCmdObj));
+}
+
+// Processes the response to the command if we are configured for txn override. Performs retries
+// if necessary for implicit collection creation or transient transaction errors.
+// Returns the last command response received by a command or retry.
+function retryWithTxnOverride(res, conn, dbName, cmdName, cmdObj, lsid, logError) {
+ assert(configuredForTxnOverride());
+
+ const failedOnCRUDStatement =
+ hasError(res) && !isCommitOrAbort(cmdName) && isCmdInTransaction(cmdObj);
+ if (failedOnCRUDStatement) {
+ assert.gt(ops.length, 0);
+ abortTransaction(conn, lsid, txnOptions.txnNumber);
+
+ // If the command inserted data and is not supported in a transaction, we assume it
+ // failed because the collection did not exist. We will create the collection and retry
+ // the entire transaction. We should not receive this error in this override for any
+ // other reason.
+ // Tests that expect collections to not exist will have to be skipped.
+ if (kCmdsThatInsert.has(cmdName) &&
+ includesErrorCode(res, ErrorCodes.OperationNotSupportedInTransaction)) {
+ const collName = cmdObj[cmdName];
+ createCollectionExplicitly(conn, dbName, collName, lsid);
- // If we recursively retried the entire transaction, we do not want to continue this
- // retry. We just pass up the response from the retry that completed.
- if (txnOptions.txnNumber !== retriedTxnNumber) {
- return res;
- }
+ return retryEntireTransaction(conn, lsid);
}
- // We do not commit the transaction and let it continue in the next operation.
- return res;
+ // Transaction statements cannot be retried, but retryable codes are expected to succeed
+ // on full transaction retry.
+ if (configuredForNetworkRetry() && RetryableWritesUtil.isRetryableCode(res.code)) {
+ logError("Retrying on retryable error for transaction statement");
+ return retryEntireTransaction(conn, lsid);
+ }
}
- // Creates the given collection, retrying if needed. Throws on failure.
- function createCollectionExplicitly(conn, dbName, collName, lsid) {
- logMsgFull(
- 'create',
- `Explicitly creating collection ${dbName}.${collName} and then retrying transaction`);
-
- // Always majority commit the create because this is not expected to roll back once
- // successful.
- const createCmdObj = {
- create: collName,
- lsid: lsid,
- writeConcern: {w: 'majority'},
- };
+ // Transient transaction errors should retry the entire transaction. A
+ // TransientTransactionError on "abortTransaction" is considered a success.
+ if (TransactionsUtil.isTransientTransactionError(res) && cmdName !== "abortTransaction") {
+ logError("Retrying on TransientTransactionError response");
+ res = retryEntireTransaction(conn, lsid);
- // Running the command on conn will reenter from the top of `runCommandOverride`, retrying
- // as needed. If an error returned by `create` were tolerable, it would already have been
- // retried by the time it surfaced here.
- assert.commandWorked(conn.getDB(dbName).runCommand(createCmdObj));
+ // If we got a TransientTransactionError on 'commitTransaction' retrying the transaction
+ // will not retry it, so we retry it here.
+ if (!hasError(res) && cmdName === "commitTransaction") {
+ commitTransaction(conn, lsid, txnOptions.txnNumber);
+ }
+ return res;
}
- // Processes the response to the command if we are configured for txn override. Performs retries
- // if necessary for implicit collection creation or transient transaction errors.
- // Returns the last command response received by a command or retry.
- function retryWithTxnOverride(res, conn, dbName, cmdName, cmdObj, lsid, logError) {
- assert(configuredForTxnOverride());
+ return res;
+}
- const failedOnCRUDStatement =
- hasError(res) && !isCommitOrAbort(cmdName) && isCmdInTransaction(cmdObj);
- if (failedOnCRUDStatement) {
- assert.gt(ops.length, 0);
- abortTransaction(conn, lsid, txnOptions.txnNumber);
+// Returns true if any error code in a response's "raw" field is retryable.
+function rawResponseHasRetryableError(rawRes, cmdName, logError) {
+ for (let shard in rawRes) {
+ const shardRes = rawRes[shard];
- // If the command inserted data and is not supported in a transaction, we assume it
- // failed because the collection did not exist. We will create the collection and retry
- // the entire transaction. We should not receive this error in this override for any
- // other reason.
- // Tests that expect collections to not exist will have to be skipped.
- if (kCmdsThatInsert.has(cmdName) &&
- includesErrorCode(res, ErrorCodes.OperationNotSupportedInTransaction)) {
- const collName = cmdObj[cmdName];
- createCollectionExplicitly(conn, dbName, collName, lsid);
-
- return retryEntireTransaction(conn, lsid);
- }
+ const logShardError = (msg) => {
+ const msgWithShardPrefix = `Processing raw response from shard: ${shard} :: ${msg}`;
+ logError(msgWithShardPrefix);
+ };
- // Transaction statements cannot be retried, but retryable codes are expected to succeed
- // on full transaction retry.
- if (configuredForNetworkRetry() && RetryableWritesUtil.isRetryableCode(res.code)) {
- logError("Retrying on retryable error for transaction statement");
- return retryEntireTransaction(conn, lsid);
- }
+ // Don't override the responses from each shard because only the top-level code in a
+ // response is used to determine if a command succeeded or not.
+ const networkRetryShardRes = shouldRetryWithNetworkErrorOverride(
+ shardRes, cmdName, logShardError, false /* shouldOverrideAcceptableError */);
+ if (networkRetryShardRes === kContinue) {
+ return true;
}
-
- // Transient transaction errors should retry the entire transaction. A
- // TransientTransactionError on "abortTransaction" is considered a success.
- if (TransactionsUtil.isTransientTransactionError(res) && cmdName !== "abortTransaction") {
- logError("Retrying on TransientTransactionError response");
- res = retryEntireTransaction(conn, lsid);
-
- // If we got a TransientTransactionError on 'commitTransaction' retrying the transaction
- // will not retry it, so we retry it here.
- if (!hasError(res) && cmdName === "commitTransaction") {
- commitTransaction(conn, lsid, txnOptions.txnNumber);
- }
- return res;
+ }
+ return false;
+}
+
+const kContinue = Object.create(null);
+
+// Processes the command response if we are configured for network error retries. Returns the
+// provided response if we should not retry in this override. Returns kContinue if we should
+// retry the current command without subtracting from our retry allocation. By default sets ok=1
+// for failures with acceptable error codes, unless shouldOverrideAcceptableError is false.
+function shouldRetryWithNetworkErrorOverride(
+ res, cmdName, logError, shouldOverrideAcceptableError = true) {
+ assert(configuredForNetworkRetry());
+
+ if (RetryableWritesUtil.isRetryableWriteCmdName(cmdName)) {
+ if ((cmdName === "findandmodify" || cmdName === "findAndModify") &&
+ isRetryableExecutorCodeAndMessage(res.code, res.errmsg)) {
+ // findAndModify can fail during the find stage and return an executor error.
+ logError("Retrying because of executor interruption");
+ return kContinue;
}
+ // Don't interfere with retryable writes.
return res;
}
- // Returns true if any error code in a response's "raw" field is retryable.
- function rawResponseHasRetryableError(rawRes, cmdName, logError) {
- for (let shard in rawRes) {
- const shardRes = rawRes[shard];
-
- const logShardError = (msg) => {
- const msgWithShardPrefix = `Processing raw response from shard: ${shard} :: ${msg}`;
- logError(msgWithShardPrefix);
- };
-
- // Don't override the responses from each shard because only the top-level code in a
- // response is used to determine if a command succeeded or not.
- const networkRetryShardRes = shouldRetryWithNetworkErrorOverride(
- shardRes, cmdName, logShardError, false /* shouldOverrideAcceptableError */);
- if (networkRetryShardRes === kContinue) {
- return true;
- }
- }
- return false;
+ // commitTransaction should be retried on any write concern error.
+ if (cmdName === "commitTransaction" && hasWriteConcernError(res)) {
+ logError("Retrying write concern error response for commitTransaction");
+ return kContinue;
}
- const kContinue = Object.create(null);
-
- // Processes the command response if we are configured for network error retries. Returns the
- // provided response if we should not retry in this override. Returns kContinue if we should
- // retry the current command without subtracting from our retry allocation. By default sets ok=1
- // for failures with acceptable error codes, unless shouldOverrideAcceptableError is false.
- function shouldRetryWithNetworkErrorOverride(
- res, cmdName, logError, shouldOverrideAcceptableError = true) {
- assert(configuredForNetworkRetry());
-
- if (RetryableWritesUtil.isRetryableWriteCmdName(cmdName)) {
- if ((cmdName === "findandmodify" || cmdName === "findAndModify") &&
- isRetryableExecutorCodeAndMessage(res.code, res.errmsg)) {
- // findAndModify can fail during the find stage and return an executor error.
- logError("Retrying because of executor interruption");
- return kContinue;
- }
-
- // Don't interfere with retryable writes.
- return res;
+ if (cmdName === "explain") {
+ // If an explain is interrupted by a stepdown, and it returns before its connection is
+ // closed, it will return incomplete results. To prevent failing the test, force retries
+ // of interrupted explains.
+ if (res.hasOwnProperty("executionStats") && !res.executionStats.executionSuccess &&
+ (RetryableWritesUtil.isRetryableCode(res.executionStats.errorCode) ||
+ isRetryableExecutorCodeAndMessage(res.executionStats.errorCode,
+ res.executionStats.errorMessage))) {
+ logError("Forcing retry of interrupted explain");
+ return kContinue;
}
- // commitTransaction should be retried on any write concern error.
- if (cmdName === "commitTransaction" && hasWriteConcernError(res)) {
- logError("Retrying write concern error response for commitTransaction");
+ // An explain command can fail if its child command cannot be run on the current server.
+ // This can be hit if a primary only or not explicitly slaveOk command is accepted by a
+ // primary node that then steps down and returns before having its connection closed.
+ if (!res.ok && res.errmsg.indexOf("child command cannot run on this node") >= 0) {
+ logError("Forcing retry of explain likely interrupted by transition to secondary");
return kContinue;
}
+ }
- if (cmdName === "explain") {
- // If an explain is interrupted by a stepdown, and it returns before its connection is
- // closed, it will return incomplete results. To prevent failing the test, force retries
- // of interrupted explains.
- if (res.hasOwnProperty("executionStats") && !res.executionStats.executionSuccess &&
- (RetryableWritesUtil.isRetryableCode(res.executionStats.errorCode) ||
- isRetryableExecutorCodeAndMessage(res.executionStats.errorCode,
- res.executionStats.errorMessage))) {
- logError("Forcing retry of interrupted explain");
- return kContinue;
- }
-
- // An explain command can fail if its child command cannot be run on the current server.
- // This can be hit if a primary only or not explicitly slaveOk command is accepted by a
- // primary node that then steps down and returns before having its connection closed.
- if (!res.ok && res.errmsg.indexOf("child command cannot run on this node") >= 0) {
- logError("Forcing retry of explain likely interrupted by transition to secondary");
- return kContinue;
- }
+ if (!res.ok) {
+ if (RetryableWritesUtil.isRetryableCode(res.code)) {
+ // Don't decrement retries, because the command returned before the connection was
+ // closed, so a subsequent attempt will receive a network error (or NotMaster error)
+ // and need to retry.
+ logError("Retrying failed response with retryable code");
+ return kContinue;
}
- if (!res.ok) {
- if (RetryableWritesUtil.isRetryableCode(res.code)) {
- // Don't decrement retries, because the command returned before the connection was
- // closed, so a subsequent attempt will receive a network error (or NotMaster error)
- // and need to retry.
- logError("Retrying failed response with retryable code");
- return kContinue;
- }
-
- if (isRetryableExecutorCodeAndMessage(res.code, res.errmsg)) {
- logError("Retrying because of executor interruption");
- return kContinue;
- }
+ if (isRetryableExecutorCodeAndMessage(res.code, res.errmsg)) {
+ logError("Retrying because of executor interruption");
+ return kContinue;
+ }
- // listCollections and listIndexes called through mongos may return OperationFailed if
- // the request to establish a cursor on the targeted shard fails with a network error.
- //
- // TODO SERVER-30949: Remove this check once those two commands retry on retryable
- // errors automatically.
- if ((cmdName === "listCollections" || cmdName === "listIndexes") &&
- res.code === ErrorCodes.OperationFailed && res.hasOwnProperty("errmsg") &&
- res.errmsg.indexOf("failed to read command response from shard") >= 0) {
- logError("Retrying failed mongos cursor command");
- return kContinue;
- }
+ // listCollections and listIndexes called through mongos may return OperationFailed if
+ // the request to establish a cursor on the targeted shard fails with a network error.
+ //
+ // TODO SERVER-30949: Remove this check once those two commands retry on retryable
+ // errors automatically.
+ if ((cmdName === "listCollections" || cmdName === "listIndexes") &&
+ res.code === ErrorCodes.OperationFailed && res.hasOwnProperty("errmsg") &&
+ res.errmsg.indexOf("failed to read command response from shard") >= 0) {
+ logError("Retrying failed mongos cursor command");
+ return kContinue;
+ }
- // Thrown when an index build is interrupted during its collection scan.
- if (cmdName === "createIndexes" && res.codeName === "InterruptedDueToReplStateChange") {
- logError("Retrying because of interrupted collection scan");
- return kContinue;
- }
+ // Thrown when an index build is interrupted during its collection scan.
+ if (cmdName === "createIndexes" && res.codeName === "InterruptedDueToReplStateChange") {
+ logError("Retrying because of interrupted collection scan");
+ return kContinue;
+ }
- // Some sharding commands return raw responses from all contacted shards and there won't
- // be a top level code if shards returned more than one error code, in which case retry
- // if any error is retryable.
- if (res.hasOwnProperty("raw") && !res.hasOwnProperty("code") &&
- rawResponseHasRetryableError(res.raw, cmdName, logError)) {
- logError("Retrying because of retryable code in raw response");
- return kContinue;
- }
+ // Some sharding commands return raw responses from all contacted shards and there won't
+ // be a top level code if shards returned more than one error code, in which case retry
+ // if any error is retryable.
+ if (res.hasOwnProperty("raw") && !res.hasOwnProperty("code") &&
+ rawResponseHasRetryableError(res.raw, cmdName, logError)) {
+ logError("Retrying because of retryable code in raw response");
+ return kContinue;
+ }
- // Check for the retryable error codes from an interrupted shardCollection.
- if (cmdName === "shardCollection" && isRetryableShardCollectionResponse(res)) {
- logError("Retrying interrupted shardCollection");
- return kContinue;
- }
+ // Check for the retryable error codes from an interrupted shardCollection.
+ if (cmdName === "shardCollection" && isRetryableShardCollectionResponse(res)) {
+ logError("Retrying interrupted shardCollection");
+ return kContinue;
+ }
- // In a sharded cluster, drop may bury the original error code in the error message if
- // interrupted.
- if (cmdName === "drop" &&
- RetryableWritesUtil.errmsgContainsRetryableCodeName(res.errmsg)) {
- logError("Retrying interrupted drop");
- return kContinue;
- }
+ // In a sharded cluster, drop may bury the original error code in the error message if
+ // interrupted.
+ if (cmdName === "drop" && RetryableWritesUtil.errmsgContainsRetryableCodeName(res.errmsg)) {
+ logError("Retrying interrupted drop");
+ return kContinue;
+ }
- if (!shouldOverrideAcceptableError || !isAcceptableRetryFailedResponse(cmdName, res)) {
- // Pass up unretryable errors.
- return res;
- }
+ if (!shouldOverrideAcceptableError || !isAcceptableRetryFailedResponse(cmdName, res)) {
+ // Pass up unretryable errors.
+ return res;
+ }
- // Swallow safe errors that may come from a retry since the command may have completed
- // before the connection was closed.
- logError("Overriding safe failed response for");
- res.ok = 1;
+ // Swallow safe errors that may come from a retry since the command may have completed
+ // before the connection was closed.
+ logError("Overriding safe failed response for");
+ res.ok = 1;
- // Fall through to retry on write concern errors if needed.
- }
+ // Fall through to retry on write concern errors if needed.
+ }
- // Do not retry on a write concern error at this point if there is an actual error.
- // TransientTransactionErrors would already have been retried at an earlier point.
- if (hasWriteConcernError(res) && !hasError(res)) {
- if (RetryableWritesUtil.isRetryableCode(res.writeConcernError.code)) {
- logError("Retrying write concern error response with retryable code");
- return kContinue;
- }
+ // Do not retry on a write concern error at this point if there is an actual error.
+ // TransientTransactionErrors would already have been retried at an earlier point.
+ if (hasWriteConcernError(res) && !hasError(res)) {
+ if (RetryableWritesUtil.isRetryableCode(res.writeConcernError.code)) {
+ logError("Retrying write concern error response with retryable code");
+ return kContinue;
}
-
- return res;
}
- // Processes exceptions if configured for txn override. Retries the entire transaction on
- // transient transaction errors or network errors if configured for network errors as well.
- // If a retry fails, returns the response, or returns null for further exception processing.
- function retryWithTxnOverrideException(e, conn, cmdName, cmdObj, lsid, logError) {
- assert(configuredForTxnOverride());
+ return res;
+}
- if (TransactionsUtil.isTransientTransactionError(e) && cmdName !== "abortTransaction") {
- logError("Retrying on TransientTransactionError exception for command");
- const res = retryEntireTransaction(conn, lsid);
+// Processes exceptions if configured for txn override. Retries the entire transaction on
+// transient transaction errors or network errors if configured for network errors as well.
+// If a retry fails, returns the response, or returns null for further exception processing.
+function retryWithTxnOverrideException(e, conn, cmdName, cmdObj, lsid, logError) {
+ assert(configuredForTxnOverride());
- // If we got a TransientTransactionError on 'commitTransaction' retrying the transaction
- // will not retry it, so we retry it here.
- if (!hasError(res) && cmdName === "commitTransaction") {
- commitTransaction(conn, lsid, txnOptions.txnNumber);
- }
- return res;
- }
+ if (TransactionsUtil.isTransientTransactionError(e) && cmdName !== "abortTransaction") {
+ logError("Retrying on TransientTransactionError exception for command");
+ const res = retryEntireTransaction(conn, lsid);
- if (configuredForNetworkRetry() && isNetworkError(e) &&
- !canRetryNetworkErrorForCommand(cmdName, cmdObj)) {
- logError("Retrying on network exception for transaction statement");
- return retryEntireTransaction(conn, lsid);
+ // If we got a TransientTransactionError on 'commitTransaction' retrying the transaction
+ // will not retry it, so we retry it here.
+ if (!hasError(res) && cmdName === "commitTransaction") {
+ commitTransaction(conn, lsid, txnOptions.txnNumber);
}
- return null;
+ return res;
}
- // Processes exceptions if configured for network error retry. Returns whether to subtract one
- // from the number of command retries this override counts. Throws if we should not retry.
- function shouldRetryWithNetworkExceptionOverride(
- e, cmdName, cmdObj, startTime, numNetworkErrorRetries, logError) {
- assert(configuredForNetworkRetry());
-
- const kReplicaSetMonitorError =
- /^Could not find host matching read preference.*mode: "primary"/;
- if (numNetworkErrorRetries === 0) {
- logError("No retries, throwing");
- throw e;
- } else if (e.message.match(kReplicaSetMonitorError) &&
- Date.now() - startTime < 5 * 60 * 1000) {
- // ReplicaSetMonitor::getHostOrRefresh() waits up to 15 seconds to find the
- // primary of the replica set. It is possible for the step up attempt of another
- // node in the replica set to take longer than 15 seconds so we allow retrying
- // for up to 5 minutes.
- logError("Failed to find primary when attempting to run command," +
- " will retry for another 15 seconds");
- return false;
- } else if ((e.message.indexOf("writeConcernError") >= 0) && isRetryableError(e)) {
- logError("Retrying write concern error exception with retryable code");
- return false;
- } else if (!isNetworkError(e)) {
- logError("Not a network error, throwing");
+ if (configuredForNetworkRetry() && isNetworkError(e) &&
+ !canRetryNetworkErrorForCommand(cmdName, cmdObj)) {
+ logError("Retrying on network exception for transaction statement");
+ return retryEntireTransaction(conn, lsid);
+ }
+ return null;
+}
+
+// Processes exceptions if configured for network error retry. Returns whether to subtract one
+// from the number of command retries this override counts. Throws if we should not retry.
+function shouldRetryWithNetworkExceptionOverride(
+ e, cmdName, cmdObj, startTime, numNetworkErrorRetries, logError) {
+ assert(configuredForNetworkRetry());
+
+ const kReplicaSetMonitorError =
+ /^Could not find host matching read preference.*mode: "primary"/;
+ if (numNetworkErrorRetries === 0) {
+ logError("No retries, throwing");
+ throw e;
+ } else if (e.message.match(kReplicaSetMonitorError) && Date.now() - startTime < 5 * 60 * 1000) {
+ // ReplicaSetMonitor::getHostOrRefresh() waits up to 15 seconds to find the
+ // primary of the replica set. It is possible for the step up attempt of another
+ // node in the replica set to take longer than 15 seconds so we allow retrying
+ // for up to 5 minutes.
+ logError("Failed to find primary when attempting to run command," +
+ " will retry for another 15 seconds");
+ return false;
+ } else if ((e.message.indexOf("writeConcernError") >= 0) && isRetryableError(e)) {
+ logError("Retrying write concern error exception with retryable code");
+ return false;
+ } else if (!isNetworkError(e)) {
+ logError("Not a network error, throwing");
+ throw e;
+ } else if (RetryableWritesUtil.isRetryableWriteCmdName(cmdName)) {
+ if (_ServerSession.canRetryWrites(cmdObj)) {
+ // If the command is retryable, assume the command has already gone through
+ // or will go through the retry logic in SessionAwareClient, so propagate
+ // the error.
+ logError("Letting retryable writes code retry, throwing");
throw e;
- } else if (RetryableWritesUtil.isRetryableWriteCmdName(cmdName)) {
- if (_ServerSession.canRetryWrites(cmdObj)) {
- // If the command is retryable, assume the command has already gone through
- // or will go through the retry logic in SessionAwareClient, so propagate
- // the error.
- logError("Letting retryable writes code retry, throwing");
- throw e;
- }
}
-
- logError("Retrying on ordinary network error, subtracting from retry count");
- return true;
}
- const kMaxNumRetries = 3;
+ logError("Retrying on ordinary network error, subtracting from retry count");
+ return true;
+}
- // This function is the heart of the override with the main error retry loop.
- function runCommandOverrideBody(
- conn, dbName, cmdName, cmdObj, lsid, clientFunction, makeFuncArgs) {
- const startTime = Date.now();
+const kMaxNumRetries = 3;
- const isTxnStatement = isCmdInTransaction(cmdObj);
+// This function is the heart of the override with the main error retry loop.
+function runCommandOverrideBody(conn, dbName, cmdName, cmdObj, lsid, clientFunction, makeFuncArgs) {
+ const startTime = Date.now();
- if (configuredForNetworkRetry() && !isNested() && !isTxnStatement) {
- // If this is a top level command, make sure that the command supports network error
- // retries. Don't validate transaction statements because their encompassing transaction
- // can be retried at a higher level, even if each statement isn't retryable on its own.
- validateCmdNetworkErrorCompatibility(cmdName, cmdObj);
- }
+ const isTxnStatement = isCmdInTransaction(cmdObj);
- if (configuredForTxnOverride()) {
- setupTransactionCommand(conn, dbName, cmdName, cmdObj, lsid);
- }
+ if (configuredForNetworkRetry() && !isNested() && !isTxnStatement) {
+ // If this is a top level command, make sure that the command supports network error
+ // retries. Don't validate transaction statements because their encompassing transaction
+ // can be retried at a higher level, even if each statement isn't retryable on its own.
+ validateCmdNetworkErrorCompatibility(cmdName, cmdObj);
+ }
- const canRetryNetworkError = canRetryNetworkErrorForCommand(cmdName, cmdObj);
- let numNetworkErrorRetries = canRetryNetworkError ? kMaxNumRetries : 0;
- do {
- try {
- // Actually run the provided command.
- let res = clientFunction.apply(conn, makeFuncArgs(cmdObj));
- if (configuredForTxnOverride()) {
- logMsgFull("Override got response",
- `res: ${tojsononeline(res)}, cmd: ${tojsononeline(cmdObj)}`);
-
- if (!hasError(res) &&
- TransactionsUtil.commandIsNonTxnAggregation(cmdName, cmdObj)) {
- nonTxnAggCursorSet[res.cursor.id] = true;
- }
- }
+ if (configuredForTxnOverride()) {
+ setupTransactionCommand(conn, dbName, cmdName, cmdObj, lsid);
+ }
- const logError = (msg) => logErrorFull(msg, cmdName, cmdObj, res);
+ const canRetryNetworkError = canRetryNetworkErrorForCommand(cmdName, cmdObj);
+ let numNetworkErrorRetries = canRetryNetworkError ? kMaxNumRetries : 0;
+ do {
+ try {
+ // Actually run the provided command.
+ let res = clientFunction.apply(conn, makeFuncArgs(cmdObj));
+ if (configuredForTxnOverride()) {
+ logMsgFull("Override got response",
+ `res: ${tojsononeline(res)}, cmd: ${tojsononeline(cmdObj)}`);
- if (configuredForTxnOverride()) {
- res = retryWithTxnOverride(res, conn, dbName, cmdName, cmdObj, lsid, logError);
+ if (!hasError(res) &&
+ TransactionsUtil.commandIsNonTxnAggregation(cmdName, cmdObj)) {
+ nonTxnAggCursorSet[res.cursor.id] = true;
}
+ }
+
+ const logError = (msg) => logErrorFull(msg, cmdName, cmdObj, res);
+
+ if (configuredForTxnOverride()) {
+ res = retryWithTxnOverride(res, conn, dbName, cmdName, cmdObj, lsid, logError);
+ }
- if (canRetryNetworkError) {
- const networkRetryRes =
- shouldRetryWithNetworkErrorOverride(res, cmdName, logError);
- if (networkRetryRes === kContinue) {
- continue;
- } else {
- res = networkRetryRes;
- }
+ if (canRetryNetworkError) {
+ const networkRetryRes = shouldRetryWithNetworkErrorOverride(res, cmdName, logError);
+ if (networkRetryRes === kContinue) {
+ continue;
+ } else {
+ res = networkRetryRes;
}
+ }
- return res;
+ return res;
- } catch (e) {
- const logError = (msg) => logErrorFull(msg, cmdName, cmdObj, e);
+ } catch (e) {
+ const logError = (msg) => logErrorFull(msg, cmdName, cmdObj, e);
- if (configuredForTxnOverride()) {
- const txnRetryOnException =
- retryWithTxnOverrideException(e, conn, cmdName, cmdObj, lsid, logError);
- if (txnRetryOnException) {
- return txnRetryOnException;
- }
+ if (configuredForTxnOverride()) {
+ const txnRetryOnException =
+ retryWithTxnOverrideException(e, conn, cmdName, cmdObj, lsid, logError);
+ if (txnRetryOnException) {
+ return txnRetryOnException;
}
+ }
- if (canRetryNetworkError) {
- const decrementRetryCount = shouldRetryWithNetworkExceptionOverride(
- e, cmdName, cmdObj, startTime, numNetworkErrorRetries, logError);
- if (decrementRetryCount) {
- --numNetworkErrorRetries;
- logMsgFull("Decrementing command network error retry count",
- `New count: ${numNetworkErrorRetries}`);
- }
-
- logErrorFull("Retrying on network error for command", cmdName, cmdObj, e);
- inCommandNetworkErrorRetry = true;
- continue;
+ if (canRetryNetworkError) {
+ const decrementRetryCount = shouldRetryWithNetworkExceptionOverride(
+ e, cmdName, cmdObj, startTime, numNetworkErrorRetries, logError);
+ if (decrementRetryCount) {
+ --numNetworkErrorRetries;
+ logMsgFull("Decrementing command network error retry count",
+ `New count: ${numNetworkErrorRetries}`);
}
- throw e;
+ logErrorFull("Retrying on network error for command", cmdName, cmdObj, e);
+ inCommandNetworkErrorRetry = true;
+ continue;
}
- } while (numNetworkErrorRetries >= 0);
- throw new Error("MONGO UNREACHABLE");
- }
- // Top level runCommand override function.
- function runCommandOverride(conn, dbName, cmdName, cmdObj, clientFunction, makeFuncArgs) {
- currentCommandID.push(newestCommandID++);
- nestingLevel++;
-
- // If the command is in a wrapped form, then we look for the actual command object
- // inside the query/$query object.
- if (cmdName === "query" || cmdName === "$query") {
- cmdObj = cmdObj[cmdName];
- cmdName = Object.keys(cmdObj)[0];
+ throw e;
}
+ } while (numNetworkErrorRetries >= 0);
+ throw new Error("MONGO UNREACHABLE");
+}
+
+// Top level runCommand override function.
+function runCommandOverride(conn, dbName, cmdName, cmdObj, clientFunction, makeFuncArgs) {
+ currentCommandID.push(newestCommandID++);
+ nestingLevel++;
+
+ // If the command is in a wrapped form, then we look for the actual command object
+ // inside the query/$query object.
+ if (cmdName === "query" || cmdName === "$query") {
+ cmdObj = cmdObj[cmdName];
+ cmdName = Object.keys(cmdObj)[0];
+ }
- const lsid = cmdObj.lsid;
- try {
- const res = runCommandOverrideBody(
- conn, dbName, cmdName, cmdObj, lsid, clientFunction, makeFuncArgs);
-
- // Many tests run queries that are expected to fail. In this case, when we wrap CRUD ops
- // in transactions, the transaction including the failed query will not be able to
- // commit. This override expects transactions to be able to commit. Rather than
- // blacklisting all tests containing queries that are expected to fail, we clear the ops
- // list when we return an error to the test so we do not retry the failed query.
- if (configuredForTxnOverride() && !isNested() && hasError(res) && (ops.length > 0)) {
- logMsgFull("Clearing ops on failed command",
- `res: ${tojsononeline(res)}, cmd: ${tojsononeline(cmdObj)}`);
- clearOpsList();
- abortTransaction(conn, lsid, txnOptions.txnNumber);
- }
-
- return res;
- } finally {
- // Reset recursion and retry state tracking.
- nestingLevel--;
- currentCommandID.pop();
- inCommandNetworkErrorRetry = false;
+ const lsid = cmdObj.lsid;
+ try {
+ const res = runCommandOverrideBody(
+ conn, dbName, cmdName, cmdObj, lsid, clientFunction, makeFuncArgs);
+
+ // Many tests run queries that are expected to fail. In this case, when we wrap CRUD ops
+ // in transactions, the transaction including the failed query will not be able to
+ // commit. This override expects transactions to be able to commit. Rather than
+ // blacklisting all tests containing queries that are expected to fail, we clear the ops
+ // list when we return an error to the test so we do not retry the failed query.
+ if (configuredForTxnOverride() && !isNested() && hasError(res) && (ops.length > 0)) {
+ logMsgFull("Clearing ops on failed command",
+ `res: ${tojsononeline(res)}, cmd: ${tojsononeline(cmdObj)}`);
+ clearOpsList();
+ abortTransaction(conn, lsid, txnOptions.txnNumber);
}
- }
- if (configuredForNetworkRetry()) {
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/network_error_and_txn_override.js");
-
- const connectOriginal = connect;
-
- connect = function(url, user, pass) {
- let retVal;
-
- let connectionAttempts = 0;
- assert.soon(
- () => {
- try {
- connectionAttempts += 1;
- retVal = connectOriginal.apply(this, arguments);
- return true;
- } catch (e) {
- print(kLogPrefix + " Retrying connection to: " + url + ", attempts: " +
- connectionAttempts + ", failed with: " + tojson(e));
- }
- },
- "Failed connecting to url: " + tojson(url),
- undefined, // Default timeout.
- 2000); // 2 second interval.
-
- return retVal;
- };
+ return res;
+ } finally {
+ // Reset recursion and retry state tracking.
+ nestingLevel--;
+ currentCommandID.pop();
+ inCommandNetworkErrorRetry = false;
}
+}
+
+if (configuredForNetworkRetry()) {
+ OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/network_error_and_txn_override.js");
+
+ const connectOriginal = connect;
+
+ connect = function(url, user, pass) {
+ let retVal;
+
+ let connectionAttempts = 0;
+ assert.soon(
+ () => {
+ try {
+ connectionAttempts += 1;
+ retVal = connectOriginal.apply(this, arguments);
+ return true;
+ } catch (e) {
+ print(kLogPrefix + " Retrying connection to: " + url +
+ ", attempts: " + connectionAttempts + ", failed with: " + tojson(e));
+ }
+ },
+ "Failed connecting to url: " + tojson(url),
+ undefined, // Default timeout.
+ 2000); // 2 second interval.
- if (configuredForTxnOverride()) {
- startParallelShell = function() {
- throw new Error(
- "Cowardly refusing to run test with transaction override enabled when it uses" +
- "startParalleShell()");
- };
- }
+ return retVal;
+ };
+}
+
+if (configuredForTxnOverride()) {
+ startParallelShell = function() {
+ throw new Error(
+ "Cowardly refusing to run test with transaction override enabled when it uses" +
+ "startParalleShell()");
+ };
+}
- OverrideHelpers.overrideRunCommand(runCommandOverride);
+OverrideHelpers.overrideRunCommand(runCommandOverride);
})();
diff --git a/jstests/libs/override_methods/retry_writes_at_least_once.js b/jstests/libs/override_methods/retry_writes_at_least_once.js
index f122769eadc..cde81b5cc7f 100644
--- a/jstests/libs/override_methods/retry_writes_at_least_once.js
+++ b/jstests/libs/override_methods/retry_writes_at_least_once.js
@@ -4,56 +4,55 @@
* command. Returns the result of the latest attempt.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/override_methods/override_helpers.js");
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/override_methods/override_helpers.js");
+load("jstests/libs/retryable_writes_util.js");
- Random.setRandomSeed();
+Random.setRandomSeed();
- const kExtraRetryProbability = 0.2;
+const kExtraRetryProbability = 0.2;
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- const mongoRunCommandWithMetadataOriginal = Mongo.prototype.runCommandWithMetadata;
+const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+const mongoRunCommandWithMetadataOriginal = Mongo.prototype.runCommandWithMetadata;
- Mongo.prototype.runCommand = function runCommand(dbName, cmdObj, options) {
- return runWithRetries(this, cmdObj, mongoRunCommandOriginal, arguments);
- };
+Mongo.prototype.runCommand = function runCommand(dbName, cmdObj, options) {
+ return runWithRetries(this, cmdObj, mongoRunCommandOriginal, arguments);
+};
- Mongo.prototype.runCommandWithMetadata = function runCommandWithMetadata(
- dbName, metadata, cmdObj) {
- return runWithRetries(this, cmdObj, mongoRunCommandWithMetadataOriginal, arguments);
- };
+Mongo.prototype.runCommandWithMetadata = function runCommandWithMetadata(dbName, metadata, cmdObj) {
+ return runWithRetries(this, cmdObj, mongoRunCommandWithMetadataOriginal, arguments);
+};
- function runWithRetries(mongo, cmdObj, clientFunction, clientFunctionArguments) {
- let cmdName = Object.keys(cmdObj)[0];
+function runWithRetries(mongo, cmdObj, clientFunction, clientFunctionArguments) {
+ let cmdName = Object.keys(cmdObj)[0];
- // If the command is in a wrapped form, then we look for the actual command object
- // inside the query/$query object.
- if (cmdName === "query" || cmdName === "$query") {
- cmdObj = cmdObj[cmdName];
- cmdName = Object.keys(cmdObj)[0];
- }
-
- const isRetryableWriteCmd = RetryableWritesUtil.isRetryableWriteCmdName(cmdName);
- const canRetryWrites = _ServerSession.canRetryWrites(cmdObj);
+ // If the command is in a wrapped form, then we look for the actual command object
+ // inside the query/$query object.
+ if (cmdName === "query" || cmdName === "$query") {
+ cmdObj = cmdObj[cmdName];
+ cmdName = Object.keys(cmdObj)[0];
+ }
- let res = clientFunction.apply(mongo, clientFunctionArguments);
+ const isRetryableWriteCmd = RetryableWritesUtil.isRetryableWriteCmdName(cmdName);
+ const canRetryWrites = _ServerSession.canRetryWrites(cmdObj);
- if (isRetryableWriteCmd && canRetryWrites) {
- let retryAttempt = 1;
- do {
- print("*** Retry attempt: " + retryAttempt + ", for command: " + cmdName +
- " with txnNumber: " + tojson(cmdObj.txnNumber) + ", and lsid: " +
- tojson(cmdObj.lsid));
- ++retryAttempt;
- res = clientFunction.apply(mongo, clientFunctionArguments);
- } while (Random.rand() <= kExtraRetryProbability);
- }
+ let res = clientFunction.apply(mongo, clientFunctionArguments);
- return res;
+ if (isRetryableWriteCmd && canRetryWrites) {
+ let retryAttempt = 1;
+ do {
+ print("*** Retry attempt: " + retryAttempt + ", for command: " + cmdName +
+ " with txnNumber: " + tojson(cmdObj.txnNumber) +
+ ", and lsid: " + tojson(cmdObj.lsid));
+ ++retryAttempt;
+ res = clientFunction.apply(mongo, clientFunctionArguments);
+ } while (Random.rand() <= kExtraRetryProbability);
}
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/retry_writes_at_least_once.js");
+ return res;
+}
+
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/retry_writes_at_least_once.js");
})();
diff --git a/jstests/libs/override_methods/set_read_and_write_concerns.js b/jstests/libs/override_methods/set_read_and_write_concerns.js
index 4b9adfb23fa..19c799714e7 100644
--- a/jstests/libs/override_methods/set_read_and_write_concerns.js
+++ b/jstests/libs/override_methods/set_read_and_write_concerns.js
@@ -15,153 +15,153 @@
*
*/
(function() {
- "use strict";
-
- load("jstests/libs/override_methods/override_helpers.js");
- load("jstests/libs/override_methods/read_and_write_concern_helpers.js");
+"use strict";
+
+load("jstests/libs/override_methods/override_helpers.js");
+load("jstests/libs/override_methods/read_and_write_concern_helpers.js");
+
+if (typeof TestData === "undefined" || !TestData.hasOwnProperty("defaultReadConcernLevel")) {
+ throw new Error("The readConcern level to use must be set as the 'defaultReadConcernLevel'" +
+ " property on the global TestData object");
+}
+
+// If the default read concern level is null, that indicates that no read concern overrides
+// should be applied.
+const kDefaultReadConcern = {
+ level: TestData.defaultReadConcernLevel
+};
+const kDefaultWriteConcern =
+ (TestData.hasOwnProperty("defaultWriteConcern")) ? TestData.defaultWriteConcern : {
+ w: "majority",
+ // Use a "signature" value that won't typically match a value assigned in normal use.
+ // This way the wtimeout set by this override is distinguishable in the server logs.
+ wtimeout: 5 * 60 * 1000 + 321, // 300321ms
+ };
+
+function runCommandWithReadAndWriteConcerns(
+ conn, dbName, commandName, commandObj, func, makeFuncArgs) {
+ if (typeof commandObj !== "object" || commandObj === null) {
+ return func.apply(conn, makeFuncArgs(commandObj));
+ }
- if (typeof TestData === "undefined" || !TestData.hasOwnProperty("defaultReadConcernLevel")) {
- throw new Error(
- "The readConcern level to use must be set as the 'defaultReadConcernLevel'" +
- " property on the global TestData object");
+ // If the command is in a wrapped form, then we look for the actual command object inside
+ // the query/$query object.
+ let commandObjUnwrapped = commandObj;
+ if (commandName === "query" || commandName === "$query") {
+ commandObjUnwrapped = commandObj[commandName];
+ commandName = Object.keys(commandObjUnwrapped)[0];
}
- // If the default read concern level is null, that indicates that no read concern overrides
- // should be applied.
- const kDefaultReadConcern = {level: TestData.defaultReadConcernLevel};
- const kDefaultWriteConcern =
- (TestData.hasOwnProperty("defaultWriteConcern")) ? TestData.defaultWriteConcern : {
- w: "majority",
- // Use a "signature" value that won't typically match a value assigned in normal use.
- // This way the wtimeout set by this override is distinguishable in the server logs.
- wtimeout: 5 * 60 * 1000 + 321, // 300321ms
- };
-
- function runCommandWithReadAndWriteConcerns(
- conn, dbName, commandName, commandObj, func, makeFuncArgs) {
- if (typeof commandObj !== "object" || commandObj === null) {
- return func.apply(conn, makeFuncArgs(commandObj));
- }
+ let shouldForceReadConcern = kCommandsSupportingReadConcern.has(commandName);
+ let shouldForceWriteConcern = kCommandsSupportingWriteConcern.has(commandName);
- // If the command is in a wrapped form, then we look for the actual command object inside
- // the query/$query object.
- let commandObjUnwrapped = commandObj;
- if (commandName === "query" || commandName === "$query") {
- commandObjUnwrapped = commandObj[commandName];
- commandName = Object.keys(commandObjUnwrapped)[0];
+ // All commands in a multi-document transaction have the autocommit property.
+ if (commandObj.hasOwnProperty("autocommit")) {
+ shouldForceReadConcern = false;
+ if (!kCommandsSupportingWriteConcernInTransaction.has(commandName)) {
+ shouldForceWriteConcern = false;
}
-
- let shouldForceReadConcern = kCommandsSupportingReadConcern.has(commandName);
- let shouldForceWriteConcern = kCommandsSupportingWriteConcern.has(commandName);
-
- // All commands in a multi-document transaction have the autocommit property.
- if (commandObj.hasOwnProperty("autocommit")) {
+ }
+ if (commandName === "aggregate") {
+ if (OverrideHelpers.isAggregationWithListLocalSessionsStage(commandName,
+ commandObjUnwrapped)) {
+ // The $listLocalSessions stage can only be used with readConcern={level: "local"}.
shouldForceReadConcern = false;
- if (!kCommandsSupportingWriteConcernInTransaction.has(commandName)) {
- shouldForceWriteConcern = false;
- }
}
- if (commandName === "aggregate") {
- if (OverrideHelpers.isAggregationWithListLocalSessionsStage(commandName,
- commandObjUnwrapped)) {
- // The $listLocalSessions stage can only be used with readConcern={level: "local"}.
- shouldForceReadConcern = false;
- }
- if (OverrideHelpers.isAggregationWithOutOrMergeStage(commandName,
- commandObjUnwrapped)) {
- // The $out stage can only be used with readConcern={level: "local"} or
- // readConcern={level: "majority"}
- if (TestData.defaultReadConcernLevel === "linearizable") {
- shouldForceReadConcern = false;
- }
- } else {
- // A writeConcern can only be used with a $out stage.
- shouldForceWriteConcern = false;
- }
-
- if (commandObjUnwrapped.explain) {
- // Attempting to specify a readConcern while explaining an aggregation would always
- // return an error prior to SERVER-30582 and it otherwise only compatible with
- // readConcern={level: "local"}.
+ if (OverrideHelpers.isAggregationWithOutOrMergeStage(commandName, commandObjUnwrapped)) {
+ // The $out stage can only be used with readConcern={level: "local"} or
+ // readConcern={level: "majority"}
+ if (TestData.defaultReadConcernLevel === "linearizable") {
shouldForceReadConcern = false;
}
- } else if (OverrideHelpers.isMapReduceWithInlineOutput(commandName, commandObjUnwrapped)) {
- // A writeConcern can only be used with non-inline output.
+ } else {
+ // A writeConcern can only be used with a $out stage.
shouldForceWriteConcern = false;
}
- if (kCommandsOnlySupportingReadConcernSnapshot.has(commandName) &&
- kDefaultReadConcern.level === "snapshot") {
- shouldForceReadConcern = true;
+ if (commandObjUnwrapped.explain) {
+ // Attempting to specify a readConcern while explaining an aggregation would always
+ // return an error prior to SERVER-30582 and it otherwise only compatible with
+ // readConcern={level: "local"}.
+ shouldForceReadConcern = false;
}
+ } else if (OverrideHelpers.isMapReduceWithInlineOutput(commandName, commandObjUnwrapped)) {
+ // A writeConcern can only be used with non-inline output.
+ shouldForceWriteConcern = false;
+ }
- const inWrappedForm = commandObj !== commandObjUnwrapped;
-
- // Only override read concern if an override level was specified.
- if (shouldForceReadConcern && (kDefaultReadConcern.level !== null)) {
- // We create a copy of 'commandObj' to avoid mutating the parameter the caller
- // specified.
- commandObj = Object.assign({}, commandObj);
- if (inWrappedForm) {
- commandObjUnwrapped = Object.assign({}, commandObjUnwrapped);
- commandObj[Object.keys(commandObj)[0]] = commandObjUnwrapped;
- } else {
- commandObjUnwrapped = commandObj;
- }
+ if (kCommandsOnlySupportingReadConcernSnapshot.has(commandName) &&
+ kDefaultReadConcern.level === "snapshot") {
+ shouldForceReadConcern = true;
+ }
- let readConcern;
- if (commandObjUnwrapped.hasOwnProperty("readConcern")) {
- readConcern = commandObjUnwrapped.readConcern;
+ const inWrappedForm = commandObj !== commandObjUnwrapped;
+
+ // Only override read concern if an override level was specified.
+ if (shouldForceReadConcern && (kDefaultReadConcern.level !== null)) {
+ // We create a copy of 'commandObj' to avoid mutating the parameter the caller
+ // specified.
+ commandObj = Object.assign({}, commandObj);
+ if (inWrappedForm) {
+ commandObjUnwrapped = Object.assign({}, commandObjUnwrapped);
+ commandObj[Object.keys(commandObj)[0]] = commandObjUnwrapped;
+ } else {
+ commandObjUnwrapped = commandObj;
+ }
- if (typeof readConcern !== "object" || readConcern === null ||
- (readConcern.hasOwnProperty("level") &&
- bsonWoCompare({_: readConcern.level}, {_: kDefaultReadConcern.level}) !== 0)) {
- throw new Error("Cowardly refusing to override read concern of command: " +
- tojson(commandObj));
- }
- }
+ let readConcern;
+ if (commandObjUnwrapped.hasOwnProperty("readConcern")) {
+ readConcern = commandObjUnwrapped.readConcern;
- // We create a copy of the readConcern object to avoid mutating the parameter the
- // caller specified.
- readConcern = Object.assign({}, readConcern, kDefaultReadConcern);
- commandObjUnwrapped.readConcern = readConcern;
+ if (typeof readConcern !== "object" || readConcern === null ||
+ (readConcern.hasOwnProperty("level") &&
+ bsonWoCompare({_: readConcern.level}, {_: kDefaultReadConcern.level}) !== 0)) {
+ throw new Error("Cowardly refusing to override read concern of command: " +
+ tojson(commandObj));
+ }
}
- if (shouldForceWriteConcern) {
- // We create a copy of 'commandObj' to avoid mutating the parameter the caller
- // specified.
- commandObj = Object.assign({}, commandObj);
- if (inWrappedForm) {
- commandObjUnwrapped = Object.assign({}, commandObjUnwrapped);
- commandObj[Object.keys(commandObj)[0]] = commandObjUnwrapped;
- } else {
- commandObjUnwrapped = commandObj;
- }
+ // We create a copy of the readConcern object to avoid mutating the parameter the
+ // caller specified.
+ readConcern = Object.assign({}, readConcern, kDefaultReadConcern);
+ commandObjUnwrapped.readConcern = readConcern;
+ }
- let writeConcern;
- if (commandObjUnwrapped.hasOwnProperty("writeConcern")) {
- writeConcern = commandObjUnwrapped.writeConcern;
+ if (shouldForceWriteConcern) {
+ // We create a copy of 'commandObj' to avoid mutating the parameter the caller
+ // specified.
+ commandObj = Object.assign({}, commandObj);
+ if (inWrappedForm) {
+ commandObjUnwrapped = Object.assign({}, commandObjUnwrapped);
+ commandObj[Object.keys(commandObj)[0]] = commandObjUnwrapped;
+ } else {
+ commandObjUnwrapped = commandObj;
+ }
- if (typeof writeConcern !== "object" || writeConcern === null ||
- (writeConcern.hasOwnProperty("w") &&
- bsonWoCompare({_: writeConcern.w}, {_: kDefaultWriteConcern.w}) !== 0)) {
- throw new Error("Cowardly refusing to override write concern of command: " +
- tojson(commandObj));
- }
- }
+ let writeConcern;
+ if (commandObjUnwrapped.hasOwnProperty("writeConcern")) {
+ writeConcern = commandObjUnwrapped.writeConcern;
- // We create a copy of the writeConcern object to avoid mutating the parameter the
- // caller specified.
- writeConcern = Object.assign({}, writeConcern, kDefaultWriteConcern);
- commandObjUnwrapped.writeConcern = writeConcern;
+ if (typeof writeConcern !== "object" || writeConcern === null ||
+ (writeConcern.hasOwnProperty("w") &&
+ bsonWoCompare({_: writeConcern.w}, {_: kDefaultWriteConcern.w}) !== 0)) {
+ throw new Error("Cowardly refusing to override write concern of command: " +
+ tojson(commandObj));
+ }
}
- return func.apply(conn, makeFuncArgs(commandObj));
+ // We create a copy of the writeConcern object to avoid mutating the parameter the
+ // caller specified.
+ writeConcern = Object.assign({}, writeConcern, kDefaultWriteConcern);
+ commandObjUnwrapped.writeConcern = writeConcern;
}
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/set_read_and_write_concerns.js");
+ return func.apply(conn, makeFuncArgs(commandObj));
+}
+
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/set_read_and_write_concerns.js");
- OverrideHelpers.overrideRunCommand(runCommandWithReadAndWriteConcerns);
+OverrideHelpers.overrideRunCommand(runCommandWithReadAndWriteConcerns);
})();
diff --git a/jstests/libs/override_methods/set_read_preference_secondary.js b/jstests/libs/override_methods/set_read_preference_secondary.js
index 562711776f7..e962437140b 100644
--- a/jstests/libs/override_methods/set_read_preference_secondary.js
+++ b/jstests/libs/override_methods/set_read_preference_secondary.js
@@ -2,167 +2,166 @@
* Use prototype overrides to set read preference to "secondary" when running tests.
*/
(function() {
- "use strict";
-
- load("jstests/libs/override_methods/override_helpers.js");
-
- const kReadPreferenceSecondary = {mode: "secondary"};
- const kCommandsSupportingReadPreference = new Set([
- "aggregate",
- "collStats",
- "count",
- "dbStats",
- "distinct",
- "find",
- "geoSearch",
- ]);
- const kDatabasesOnConfigServers = new Set(["config", "admin"]);
-
- // This list of cursor-generating commands is incomplete. For example, "listCollections",
- // "listIndexes", and "repairCursor" are all missing from this list.
- // If we ever add tests that attempt to run getMore or killCursors on cursors generated from
- // those commands, then we should update the contents of this list and also handle any
- // differences in the server's response format.
- const kCursorGeneratingCommands = new Set(["aggregate", "find"]);
-
- const CursorTracker = (function() {
- const kNoCursor = new NumberLong(0);
-
- const connectionsByCursorId = {};
-
- return {
- getConnectionUsedForCursor: function getConnectionUsedForCursor(cursorId) {
- return (cursorId instanceof NumberLong) ? connectionsByCursorId[cursorId]
- : undefined;
- },
-
- setConnectionUsedForCursor: function setConnectionUsedForCursor(cursorId, cursorConn) {
- if (cursorId instanceof NumberLong &&
- !bsonBinaryEqual({_: cursorId}, {_: kNoCursor})) {
- connectionsByCursorId[cursorId] = cursorConn;
- }
- },
- };
- })();
-
- function runCommandWithReadPreferenceSecondary(
- conn, dbName, commandName, commandObj, func, makeFuncArgs) {
- if (typeof commandObj !== "object" || commandObj === null) {
- return func.apply(conn, makeFuncArgs(commandObj));
- }
+"use strict";
+
+load("jstests/libs/override_methods/override_helpers.js");
+
+const kReadPreferenceSecondary = {
+ mode: "secondary"
+};
+const kCommandsSupportingReadPreference = new Set([
+ "aggregate",
+ "collStats",
+ "count",
+ "dbStats",
+ "distinct",
+ "find",
+ "geoSearch",
+]);
+const kDatabasesOnConfigServers = new Set(["config", "admin"]);
+
+// This list of cursor-generating commands is incomplete. For example, "listCollections",
+// "listIndexes", and "repairCursor" are all missing from this list.
+// If we ever add tests that attempt to run getMore or killCursors on cursors generated from
+// those commands, then we should update the contents of this list and also handle any
+// differences in the server's response format.
+const kCursorGeneratingCommands = new Set(["aggregate", "find"]);
+
+const CursorTracker = (function() {
+ const kNoCursor = new NumberLong(0);
+
+ const connectionsByCursorId = {};
+
+ return {
+ getConnectionUsedForCursor: function getConnectionUsedForCursor(cursorId) {
+ return (cursorId instanceof NumberLong) ? connectionsByCursorId[cursorId] : undefined;
+ },
+
+ setConnectionUsedForCursor: function setConnectionUsedForCursor(cursorId, cursorConn) {
+ if (cursorId instanceof NumberLong && !bsonBinaryEqual({_: cursorId}, {_: kNoCursor})) {
+ connectionsByCursorId[cursorId] = cursorConn;
+ }
+ },
+ };
+})();
- // If the command is in a wrapped form, then we look for the actual command object inside
- // the query/$query object.
- let commandObjUnwrapped = commandObj;
- if (commandName === "query" || commandName === "$query") {
- commandObjUnwrapped = commandObj[commandName];
- commandName = Object.keys(commandObjUnwrapped)[0];
- }
+function runCommandWithReadPreferenceSecondary(
+ conn, dbName, commandName, commandObj, func, makeFuncArgs) {
+ if (typeof commandObj !== "object" || commandObj === null) {
+ return func.apply(conn, makeFuncArgs(commandObj));
+ }
- if (commandObj[commandName] === "system.profile" || commandName === 'profile') {
- throw new Error(
- "Cowardly refusing to run test that interacts with the system profiler as the " +
- "'system.profile' collection is not replicated" + tojson(commandObj));
- }
+ // If the command is in a wrapped form, then we look for the actual command object inside
+ // the query/$query object.
+ let commandObjUnwrapped = commandObj;
+ if (commandName === "query" || commandName === "$query") {
+ commandObjUnwrapped = commandObj[commandName];
+ commandName = Object.keys(commandObjUnwrapped)[0];
+ }
- if (conn.isReplicaSetConnection()) {
- // When a "getMore" or "killCursors" command is issued on a replica set connection, we
- // attempt to automatically route the command to the server the cursor(s) were
- // originally established on. This makes it possible to use the
- // set_read_preference_secondary.js override without needing to update calls of
- // DB#runCommand() to explicitly track the connection that was used. If the connection
- // is actually a direct connection to a mongod or mongos process, or if the cursor id
- // cannot be found in the CursorTracker, then we'll fall back to using DBClientRS's
- // server selection and send the operation to the current primary. It is possible that
- // the test is trying to exercise the behavior around when an unknown cursor id is sent
- // to the server.
- if (commandName === "getMore") {
- const cursorId = commandObjUnwrapped[commandName];
- const cursorConn = CursorTracker.getConnectionUsedForCursor(cursorId);
- if (cursorConn !== undefined) {
- return func.apply(cursorConn, makeFuncArgs(commandObj));
- }
- } else if (commandName === "killCursors") {
- const cursorIds = commandObjUnwrapped.cursors;
- if (Array.isArray(cursorIds)) {
- let cursorConn;
-
- for (let cursorId of cursorIds) {
- const otherCursorConn = CursorTracker.getConnectionUsedForCursor(cursorId);
- if (cursorConn === undefined) {
- cursorConn = otherCursorConn;
- } else if (otherCursorConn !== undefined) {
- // We set 'cursorConn' back to undefined and break out of the loop so
- // that we don't attempt to automatically route the "killCursors"
- // command when there are cursors from different servers.
- cursorConn = undefined;
- break;
- }
- }
+ if (commandObj[commandName] === "system.profile" || commandName === 'profile') {
+ throw new Error(
+ "Cowardly refusing to run test that interacts with the system profiler as the " +
+ "'system.profile' collection is not replicated" + tojson(commandObj));
+ }
- if (cursorConn !== undefined) {
- return func.apply(cursorConn, makeFuncArgs(commandObj));
+ if (conn.isReplicaSetConnection()) {
+ // When a "getMore" or "killCursors" command is issued on a replica set connection, we
+ // attempt to automatically route the command to the server the cursor(s) were
+ // originally established on. This makes it possible to use the
+ // set_read_preference_secondary.js override without needing to update calls of
+ // DB#runCommand() to explicitly track the connection that was used. If the connection
+ // is actually a direct connection to a mongod or mongos process, or if the cursor id
+ // cannot be found in the CursorTracker, then we'll fall back to using DBClientRS's
+ // server selection and send the operation to the current primary. It is possible that
+ // the test is trying to exercise the behavior around when an unknown cursor id is sent
+ // to the server.
+ if (commandName === "getMore") {
+ const cursorId = commandObjUnwrapped[commandName];
+ const cursorConn = CursorTracker.getConnectionUsedForCursor(cursorId);
+ if (cursorConn !== undefined) {
+ return func.apply(cursorConn, makeFuncArgs(commandObj));
+ }
+ } else if (commandName === "killCursors") {
+ const cursorIds = commandObjUnwrapped.cursors;
+ if (Array.isArray(cursorIds)) {
+ let cursorConn;
+
+ for (let cursorId of cursorIds) {
+ const otherCursorConn = CursorTracker.getConnectionUsedForCursor(cursorId);
+ if (cursorConn === undefined) {
+ cursorConn = otherCursorConn;
+ } else if (otherCursorConn !== undefined) {
+ // We set 'cursorConn' back to undefined and break out of the loop so
+ // that we don't attempt to automatically route the "killCursors"
+ // command when there are cursors from different servers.
+ cursorConn = undefined;
+ break;
}
}
- }
- }
- let shouldForceReadPreference = kCommandsSupportingReadPreference.has(commandName);
- if (OverrideHelpers.isAggregationWithOutOrMergeStage(commandName, commandObjUnwrapped)) {
- // An aggregation with a $out stage must be sent to the primary.
- shouldForceReadPreference = false;
- } else if ((commandName === "mapReduce" || commandName === "mapreduce") &&
- !OverrideHelpers.isMapReduceWithInlineOutput(commandName, commandObjUnwrapped)) {
- // A map-reduce operation with non-inline output must be sent to the primary.
- shouldForceReadPreference = false;
- } else if (conn.isMongos() && kDatabasesOnConfigServers.has(dbName)) {
- // Avoid overriding the read preference for config server since there may only be one
- // of them.
- shouldForceReadPreference = false;
+ if (cursorConn !== undefined) {
+ return func.apply(cursorConn, makeFuncArgs(commandObj));
+ }
+ }
}
+ }
- if (TestData.doNotOverrideReadPreference) {
- // Use this TestData flag to allow certain runCommands to be exempted from
- // setting secondary read preference.
- shouldForceReadPreference = false;
- }
+ let shouldForceReadPreference = kCommandsSupportingReadPreference.has(commandName);
+ if (OverrideHelpers.isAggregationWithOutOrMergeStage(commandName, commandObjUnwrapped)) {
+ // An aggregation with a $out stage must be sent to the primary.
+ shouldForceReadPreference = false;
+ } else if ((commandName === "mapReduce" || commandName === "mapreduce") &&
+ !OverrideHelpers.isMapReduceWithInlineOutput(commandName, commandObjUnwrapped)) {
+ // A map-reduce operation with non-inline output must be sent to the primary.
+ shouldForceReadPreference = false;
+ } else if (conn.isMongos() && kDatabasesOnConfigServers.has(dbName)) {
+ // Avoid overriding the read preference for config server since there may only be one
+ // of them.
+ shouldForceReadPreference = false;
+ }
- if (shouldForceReadPreference) {
- if (commandObj === commandObjUnwrapped) {
- // We wrap the command object using a "query" field rather than a "$query" field to
- // match the implementation of DB.prototype._attachReadPreferenceToCommand().
- commandObj = {query: commandObj};
- } else {
- // We create a copy of 'commandObj' to avoid mutating the parameter the caller
- // specified.
- commandObj = Object.assign({}, commandObj);
- }
+ if (TestData.doNotOverrideReadPreference) {
+ // Use this TestData flag to allow certain runCommands to be exempted from
+ // setting secondary read preference.
+ shouldForceReadPreference = false;
+ }
- if (commandObj.hasOwnProperty("$readPreference") &&
- !bsonBinaryEqual({_: commandObj.$readPreference}, {_: kReadPreferenceSecondary})) {
- throw new Error("Cowardly refusing to override read preference of command: " +
- tojson(commandObj));
- }
+ if (shouldForceReadPreference) {
+ if (commandObj === commandObjUnwrapped) {
+ // We wrap the command object using a "query" field rather than a "$query" field to
+ // match the implementation of DB.prototype._attachReadPreferenceToCommand().
+ commandObj = {query: commandObj};
+ } else {
+ // We create a copy of 'commandObj' to avoid mutating the parameter the caller
+ // specified.
+ commandObj = Object.assign({}, commandObj);
+ }
- commandObj.$readPreference = kReadPreferenceSecondary;
+ if (commandObj.hasOwnProperty("$readPreference") &&
+ !bsonBinaryEqual({_: commandObj.$readPreference}, {_: kReadPreferenceSecondary})) {
+ throw new Error("Cowardly refusing to override read preference of command: " +
+ tojson(commandObj));
}
- const serverResponse = func.apply(conn, makeFuncArgs(commandObj));
+ commandObj.$readPreference = kReadPreferenceSecondary;
+ }
- if (conn.isReplicaSetConnection() && kCursorGeneratingCommands.has(commandName) &&
- serverResponse.ok === 1 && serverResponse.hasOwnProperty("cursor")) {
- // We associate the cursor id returned by the server with the connection that was used
- // to establish it so that we can attempt to automatically route subsequent "getMore"
- // and "killCursors" commands.
- CursorTracker.setConnectionUsedForCursor(serverResponse.cursor.id,
- serverResponse._mongo);
- }
+ const serverResponse = func.apply(conn, makeFuncArgs(commandObj));
- return serverResponse;
+ if (conn.isReplicaSetConnection() && kCursorGeneratingCommands.has(commandName) &&
+ serverResponse.ok === 1 && serverResponse.hasOwnProperty("cursor")) {
+ // We associate the cursor id returned by the server with the connection that was used
+ // to establish it so that we can attempt to automatically route subsequent "getMore"
+ // and "killCursors" commands.
+ CursorTracker.setConnectionUsedForCursor(serverResponse.cursor.id, serverResponse._mongo);
}
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/set_read_preference_secondary.js");
+ return serverResponse;
+}
+
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/set_read_preference_secondary.js");
- OverrideHelpers.overrideRunCommand(runCommandWithReadPreferenceSecondary);
+OverrideHelpers.overrideRunCommand(runCommandWithReadPreferenceSecondary);
})();
diff --git a/jstests/libs/override_methods/sharding_continuous_config_stepdown.js b/jstests/libs/override_methods/sharding_continuous_config_stepdown.js
index 362310b5248..ad0e8e3d6de 100644
--- a/jstests/libs/override_methods/sharding_continuous_config_stepdown.js
+++ b/jstests/libs/override_methods/sharding_continuous_config_stepdown.js
@@ -1,36 +1,35 @@
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/override_methods/continuous_stepdown.js");
- load("jstests/libs/override_methods/mongos_manual_intervention_actions.js");
+load("jstests/libs/override_methods/continuous_stepdown.js");
+load("jstests/libs/override_methods/mongos_manual_intervention_actions.js");
- ContinuousStepdown.configure({
- configStepdown: true,
- electionTimeoutMS: 5 * 1000,
- shardStepdown: false,
- stepdownDurationSecs: 10,
- stepdownIntervalMS: 8 * 1000,
- },
- {
- verbositySetting: {
- verbosity: 0,
- command: {verbosity: 1},
- network: {verbosity: 1, asio: {verbosity: 2}},
- tracking: {verbosity: 0}
- }
- });
+ContinuousStepdown.configure({
+ configStepdown: true,
+ electionTimeoutMS: 5 * 1000,
+ shardStepdown: false,
+ stepdownDurationSecs: 10,
+ stepdownIntervalMS: 8 * 1000,
+},
+ {
+ verbositySetting: {
+ verbosity: 0,
+ command: {verbosity: 1},
+ network: {verbosity: 1, asio: {verbosity: 2}},
+ tracking: {verbosity: 0}
+ }
+ });
- const originalShardingTest = ShardingTest;
- ShardingTest = function() {
- originalShardingTest.apply(this, arguments);
+const originalShardingTest = ShardingTest;
+ShardingTest = function() {
+ originalShardingTest.apply(this, arguments);
- // Automatically start the continuous stepdown thread on the config server replica set.
- this.startContinuousFailover();
- };
-
- // The checkUUIDsConsistentAcrossCluster() function is defined on ShardingTest's prototype, but
- // ShardingTest's prototype gets reset when ShardingTest is reassigned. We reload the override
- // to redefine checkUUIDsConsistentAcrossCluster() on the new ShardingTest's prototype.
- load('jstests/libs/override_methods/check_uuids_consistent_across_cluster.js');
+ // Automatically start the continuous stepdown thread on the config server replica set.
+ this.startContinuousFailover();
+};
+// The checkUUIDsConsistentAcrossCluster() function is defined on ShardingTest's prototype, but
+// ShardingTest's prototype gets reset when ShardingTest is reassigned. We reload the override
+// to redefine checkUUIDsConsistentAcrossCluster() on the new ShardingTest's prototype.
+load('jstests/libs/override_methods/check_uuids_consistent_across_cluster.js');
})();
diff --git a/jstests/libs/override_methods/txn_passthrough_cmd_massage.js b/jstests/libs/override_methods/txn_passthrough_cmd_massage.js
index 374578da166..114e26c2b46 100644
--- a/jstests/libs/override_methods/txn_passthrough_cmd_massage.js
+++ b/jstests/libs/override_methods/txn_passthrough_cmd_massage.js
@@ -3,72 +3,72 @@
* statement transaction suites.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/override_methods/override_helpers.js");
+load("jstests/libs/override_methods/override_helpers.js");
- function runCommandInMultiStmtTxnPassthrough(
- conn, dbName, commandName, commandObj, func, makeFuncArgs) {
- if (typeof commandObj !== "object" || commandObj === null) {
- return func.apply(conn, makeFuncArgs(commandObj));
- }
+function runCommandInMultiStmtTxnPassthrough(
+ conn, dbName, commandName, commandObj, func, makeFuncArgs) {
+ if (typeof commandObj !== "object" || commandObj === null) {
+ return func.apply(conn, makeFuncArgs(commandObj));
+ }
- // If the command is in a wrapped form, then we look for the actual command object inside
- // the query/$query object.
- let commandObjUnwrapped = commandObj;
- if (commandName === "query" || commandName === "$query") {
- commandObjUnwrapped = commandObj[commandName];
- commandName = Object.keys(commandObjUnwrapped)[0];
- }
+ // If the command is in a wrapped form, then we look for the actual command object inside
+ // the query/$query object.
+ let commandObjUnwrapped = commandObj;
+ if (commandName === "query" || commandName === "$query") {
+ commandObjUnwrapped = commandObj[commandName];
+ commandName = Object.keys(commandObjUnwrapped)[0];
+ }
- // Ignore all commands that are part of multi statement transactions.
- if (commandObj.hasOwnProperty("autocommit")) {
- return func.apply(conn, makeFuncArgs(commandObj));
- }
+ // Ignore all commands that are part of multi statement transactions.
+ if (commandObj.hasOwnProperty("autocommit")) {
+ return func.apply(conn, makeFuncArgs(commandObj));
+ }
- const majority = {w: 'majority'};
- let massagedCmd = Object.extend(commandObjUnwrapped, {});
+ const majority = {w: 'majority'};
+ let massagedCmd = Object.extend(commandObjUnwrapped, {});
- // Adjust mapReduce and drop to use { w: majority } to make sure that all pending drops that
- // occurred while running these commands are finished after the command returns. This
- // is done to make sure that the pending drop of the two phase drop won't try to contest
- // with db/coll locks in the background.
+ // Adjust mapReduce and drop to use { w: majority } to make sure that all pending drops that
+ // occurred while running these commands are finished after the command returns. This
+ // is done to make sure that the pending drop of the two phase drop won't try to contest
+ // with db/coll locks in the background.
- if (commandName === "mapReduce" || commandName === "mapreduce") {
- if (typeof massagedCmd.out === 'string') {
- massagedCmd.out = {replace: commandObjUnwrapped.out, writeConcern: majority};
- } else if (typeof massagedCmd.out === 'object') {
- let outOptions = massagedCmd.out;
- if (!outOptions.hasOwnProperty('inline')) {
- if (outOptions.hasOwnProperty('writeConcern')) {
- if (outOptions.writeConcern.w !== 'majority') {
- throw new Error(
- 'Running mapReduce with non majority write concern: ' +
- tojson(commandObj) + '. Consider blacklisting the test ' +
- 'since the 2 phase drop can interfere with lock acquisitions.');
- }
- } else {
- outOptions.writeConcern = majority;
+ if (commandName === "mapReduce" || commandName === "mapreduce") {
+ if (typeof massagedCmd.out === 'string') {
+ massagedCmd.out = {replace: commandObjUnwrapped.out, writeConcern: majority};
+ } else if (typeof massagedCmd.out === 'object') {
+ let outOptions = massagedCmd.out;
+ if (!outOptions.hasOwnProperty('inline')) {
+ if (outOptions.hasOwnProperty('writeConcern')) {
+ if (outOptions.writeConcern.w !== 'majority') {
+ throw new Error(
+ 'Running mapReduce with non majority write concern: ' +
+ tojson(commandObj) + '. Consider blacklisting the test ' +
+ 'since the 2 phase drop can interfere with lock acquisitions.');
}
+ } else {
+ outOptions.writeConcern = majority;
}
}
- } else if (commandName === 'drop') {
- if (massagedCmd.hasOwnProperty('writeConcern')) {
- if (massagedCmd.writeConcern.w !== 'majority') {
- throw new Error('Running drop with non majority write concern: ' +
- tojson(commandObj) + '. Consider blacklisting the test ' +
- 'since the 2 phase drop can interfere with lock acquisitions.');
- }
- } else {
- massagedCmd.writeConcern = majority;
+ }
+ } else if (commandName === 'drop') {
+ if (massagedCmd.hasOwnProperty('writeConcern')) {
+ if (massagedCmd.writeConcern.w !== 'majority') {
+ throw new Error('Running drop with non majority write concern: ' +
+ tojson(commandObj) + '. Consider blacklisting the test ' +
+ 'since the 2 phase drop can interfere with lock acquisitions.');
}
+ } else {
+ massagedCmd.writeConcern = majority;
}
-
- return func.apply(conn, makeFuncArgs(massagedCmd));
}
- OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/txn_passthrough_cmd_massage.js");
+ return func.apply(conn, makeFuncArgs(massagedCmd));
+}
+
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/txn_passthrough_cmd_massage.js");
- OverrideHelpers.overrideRunCommand(runCommandInMultiStmtTxnPassthrough);
+OverrideHelpers.overrideRunCommand(runCommandInMultiStmtTxnPassthrough);
})();
diff --git a/jstests/libs/override_methods/validate_collections_on_shutdown.js b/jstests/libs/override_methods/validate_collections_on_shutdown.js
index 49036790739..130404dfe6f 100644
--- a/jstests/libs/override_methods/validate_collections_on_shutdown.js
+++ b/jstests/libs/override_methods/validate_collections_on_shutdown.js
@@ -4,116 +4,110 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/command_sequence_with_retries.js"); // for CommandSequenceWithRetries
+load("jstests/libs/command_sequence_with_retries.js"); // for CommandSequenceWithRetries
- MongoRunner.validateCollectionsCallback = function(port) {
- if (jsTest.options().skipCollectionAndIndexValidation) {
- print("Skipping collection validation during mongod shutdown");
- return;
- }
+MongoRunner.validateCollectionsCallback = function(port) {
+ if (jsTest.options().skipCollectionAndIndexValidation) {
+ print("Skipping collection validation during mongod shutdown");
+ return;
+ }
- let conn;
- try {
- conn = new Mongo("localhost:" + port);
- } catch (e) {
- print(
- "Skipping collection validation because we couldn't establish a connection to the" +
- " server on port " + port);
- return;
- }
+ let conn;
+ try {
+ conn = new Mongo("localhost:" + port);
+ } catch (e) {
+ print("Skipping collection validation because we couldn't establish a connection to the" +
+ " server on port " + port);
+ return;
+ }
- // Set slaveOk=true so that we can run commands against any secondaries.
- conn.setSlaveOk();
+ // Set slaveOk=true so that we can run commands against any secondaries.
+ conn.setSlaveOk();
- let dbNames;
- let result =
- new CommandSequenceWithRetries(conn)
- .then("running the isMaster command",
- function(conn) {
- const res = assert.commandWorked(conn.adminCommand({isMaster: 1}));
- if (res.msg === "isdbgrid") {
- return {
- shouldStop: true,
- reason: "not running validate against mongos"
- };
- } else if (!res.ismaster && !res.secondary) {
- return {
- shouldStop: true,
- reason: "not running validate since mongod isn't in the PRIMARY" +
- " or SECONDARY states"
- };
- }
- })
- .then("authenticating",
- function(conn) {
- if (jsTest.options().keyFile) {
- jsTest.authenticate(conn);
- }
- })
- .then(
- "best effort to step down node forever",
- function(conn) {
- if (conn.isReplicaSetMember()) {
- // This node should never run for election again. If the node has not
- // been initialized yet, then it cannot get elected.
- const kFreezeTimeSecs = 24 * 60 * 60; // 24 hours.
+ let dbNames;
+ let result =
+ new CommandSequenceWithRetries(conn)
+ .then("running the isMaster command",
+ function(conn) {
+ const res = assert.commandWorked(conn.adminCommand({isMaster: 1}));
+ if (res.msg === "isdbgrid") {
+ return {shouldStop: true, reason: "not running validate against mongos"};
+ } else if (!res.ismaster && !res.secondary) {
+ return {
+ shouldStop: true,
+ reason: "not running validate since mongod isn't in the PRIMARY" +
+ " or SECONDARY states"
+ };
+ }
+ })
+ .then("authenticating",
+ function(conn) {
+ if (jsTest.options().keyFile) {
+ jsTest.authenticate(conn);
+ }
+ })
+ .then("best effort to step down node forever",
+ function(conn) {
+ if (conn.isReplicaSetMember()) {
+ // This node should never run for election again. If the node has not
+ // been initialized yet, then it cannot get elected.
+ const kFreezeTimeSecs = 24 * 60 * 60; // 24 hours.
- assert.commandWorkedOrFailedWithCode(
- conn.adminCommand({replSetStepDown: kFreezeTimeSecs, force: true}),
- [
+ assert.commandWorkedOrFailedWithCode(
+ conn.adminCommand({replSetStepDown: kFreezeTimeSecs, force: true}), [
ErrorCodes.NotMaster,
ErrorCodes.NotYetInitialized,
ErrorCodes.Unauthorized
- ]);
+ ]);
- assert.commandWorkedOrFailedWithCode(
- conn.adminCommand({replSetFreeze: kFreezeTimeSecs}), [
- ErrorCodes.NotYetInitialized,
- ErrorCodes.Unauthorized,
- // We include "NotSecondary" because if replSetStepDown receives
- // "NotYetInitialized", then this command will fail with
- // "NotSecondary". This is why this is a "best-effort".
- ErrorCodes.NotSecondary
- ]);
- }
- })
- .then("getting the list of databases",
- function(conn) {
- const res = conn.adminCommand({listDatabases: 1});
- if (!res.ok) {
- // TODO: SERVER-31916 for the KeyNotFound error
- assert.commandFailedWithCode(
- res, [ErrorCodes.Unauthorized, ErrorCodes.KeyNotFound]);
- return {shouldStop: true, reason: "cannot run listDatabases"};
- }
- assert.commandWorked(res);
- dbNames = res.databases.map(dbInfo => dbInfo.name);
- })
- .execute();
+ assert.commandWorkedOrFailedWithCode(
+ conn.adminCommand({replSetFreeze: kFreezeTimeSecs}), [
+ ErrorCodes.NotYetInitialized,
+ ErrorCodes.Unauthorized,
+ // We include "NotSecondary" because if replSetStepDown receives
+ // "NotYetInitialized", then this command will fail with
+ // "NotSecondary". This is why this is a "best-effort".
+ ErrorCodes.NotSecondary
+ ]);
+ }
+ })
+ .then("getting the list of databases",
+ function(conn) {
+ const res = conn.adminCommand({listDatabases: 1});
+ if (!res.ok) {
+ // TODO: SERVER-31916 for the KeyNotFound error
+ assert.commandFailedWithCode(
+ res, [ErrorCodes.Unauthorized, ErrorCodes.KeyNotFound]);
+ return {shouldStop: true, reason: "cannot run listDatabases"};
+ }
+ assert.commandWorked(res);
+ dbNames = res.databases.map(dbInfo => dbInfo.name);
+ })
+ .execute();
- if (!result.ok) {
- print("Skipping collection validation: " + result.msg);
- return;
- }
+ if (!result.ok) {
+ print("Skipping collection validation: " + result.msg);
+ return;
+ }
- load('jstests/hooks/validate_collections.js'); // for validateCollections
+ load('jstests/hooks/validate_collections.js'); // for validateCollections
- const cmds = new CommandSequenceWithRetries(conn);
- for (let i = 0; i < dbNames.length; ++i) {
- const dbName = dbNames[i];
- cmds.then("validating " + dbName, function(conn) {
- const validate_res = validateCollections(conn.getDB(dbName), {full: true});
- if (!validate_res.ok) {
- return {
- shouldStop: true,
- reason: "collection validation failed " + tojson(validate_res)
- };
- }
- });
- }
+ const cmds = new CommandSequenceWithRetries(conn);
+ for (let i = 0; i < dbNames.length; ++i) {
+ const dbName = dbNames[i];
+ cmds.then("validating " + dbName, function(conn) {
+ const validate_res = validateCollections(conn.getDB(dbName), {full: true});
+ if (!validate_res.ok) {
+ return {
+ shouldStop: true,
+ reason: "collection validation failed " + tojson(validate_res)
+ };
+ }
+ });
+ }
- assert.commandWorked(cmds.execute());
- };
+ assert.commandWorked(cmds.execute());
+};
})();
diff --git a/jstests/libs/test_background_ops.js b/jstests/libs/test_background_ops.js
index dd2f75a9da5..1393871e1ab 100644
--- a/jstests/libs/test_background_ops.js
+++ b/jstests/libs/test_background_ops.js
@@ -6,7 +6,6 @@
* Allows synchronization between background ops and the test operations
*/
var waitForLock = function(mongo, name) {
-
var ts = new ObjectId();
var lockColl = mongo.getCollection("config.testLocks");
@@ -32,13 +31,13 @@ var waitForLock = function(mongo, name) {
return gleObj.n == 1 || gleObj.updatedExisting;
}, "could not acquire lock", 30 * 1000, 100);
- print("Acquired lock " + tojson({_id: name, ts: ts}) + " curr : " +
- tojson(lockColl.findOne({_id: name})));
+ print("Acquired lock " + tojson({_id: name, ts: ts}) +
+ " curr : " + tojson(lockColl.findOne({_id: name})));
// Set the state back to 0
var unlock = function() {
- print("Releasing lock " + tojson({_id: name, ts: ts}) + " curr : " +
- tojson(lockColl.findOne({_id: name})));
+ print("Releasing lock " + tojson({_id: name, ts: ts}) +
+ " curr : " + tojson(lockColl.findOne({_id: name})));
lockColl.update({_id: name, ts: ts}, {$set: {state: 0}});
};
@@ -101,7 +100,6 @@ function startParallelShell(jsCode, port) {
}
startParallelOps = function(mongo, proc, args, context) {
-
var procName = proc.name + "-" + new ObjectId();
var seed = new ObjectId(new ObjectId().valueOf().split("").reverse().join(""))
.getTimestamp()
@@ -121,7 +119,6 @@ startParallelOps = function(mongo, proc, args, context) {
setResult: setResult,
setup: function(context, stored) {
-
waitForLock = function() {
return context.waitForLock(db.getMongo(), context.procName);
};
@@ -138,7 +135,6 @@ startParallelOps = function(mongo, proc, args, context) {
};
var bootstrapper = function(stored) {
-
var procContext = stored.procContext;
eval("procContext = " + procContext);
procContext.setup(procContext, stored);
@@ -147,7 +143,7 @@ startParallelOps = function(mongo, proc, args, context) {
eval("contexts = " + contexts);
for (var i = 0; i < contexts.length; i++) {
- if (typeof(contexts[i]) != "undefined") {
+ if (typeof (contexts[i]) != "undefined") {
// Evaluate all contexts
contexts[i](procContext);
}
@@ -188,8 +184,11 @@ startParallelOps = function(mongo, proc, args, context) {
var bootstrapStartup = "{ var procName = '" + procName + "'; " +
"var stored = db.getMongo().getCollection( '" + testDataColl + "' )" +
- ".findOne({ _id : procName }); " + "var bootstrapper = stored.bootstrapper; " +
- "eval( 'bootstrapper = ' + bootstrapper ); " + "bootstrapper( stored ); " + "}";
+ ".findOne({ _id : procName }); " +
+ "var bootstrapper = stored.bootstrapper; " +
+ "eval( 'bootstrapper = ' + bootstrapper ); " +
+ "bootstrapper( stored ); " +
+ "}";
// Save the global db object if it exists, so that we can restore it after starting the parallel
// shell.
@@ -236,7 +235,6 @@ startParallelOps = function(mongo, proc, args, context) {
};
var RandomFunctionContext = function(context) {
-
Random.srand(context.seed);
Random.randBool = function() {
@@ -244,7 +242,6 @@ var RandomFunctionContext = function(context) {
};
Random.randInt = function(min, max) {
-
if (max == undefined) {
max = min;
min = 0;
@@ -254,7 +251,6 @@ var RandomFunctionContext = function(context) {
};
Random.randShardKey = function() {
-
var numFields = 2; // Random.randInt(1, 3)
var key = {};
@@ -267,7 +263,6 @@ var RandomFunctionContext = function(context) {
};
Random.randShardKeyValue = function(shardKey) {
-
var keyValue = {};
for (field in shardKey) {
keyValue[field] = Random.randInt(1, 100);
@@ -277,7 +272,6 @@ var RandomFunctionContext = function(context) {
};
Random.randCluster = function() {
-
var numShards = 2; // Random.randInt( 1, 10 )
var rs = false; // Random.randBool()
var st = new ShardingTest({shards: numShards, mongos: 4, other: {rs: rs}});
diff --git a/jstests/libs/transactions_util.js b/jstests/libs/transactions_util.js
index af9cccb44c0..3ddb09d5c28 100644
--- a/jstests/libs/transactions_util.js
+++ b/jstests/libs/transactions_util.js
@@ -76,7 +76,7 @@ var TransactionsUtil = (function() {
function deepCopyObject(dst, src) {
for (var k in src) {
var v = src[k];
- if (typeof(v) == "object" && v !== null) {
+ if (typeof (v) == "object" && v !== null) {
if (v.constructor === ObjectId) { // convert ObjectId properly
eval("v = " + tojson(v));
} else if (v instanceof NumberLong) { // convert NumberLong properly
diff --git a/jstests/libs/txns/txn_passthrough_runner.js b/jstests/libs/txns/txn_passthrough_runner.js
index 1e2640cd11b..43d1ecf6575 100644
--- a/jstests/libs/txns/txn_passthrough_runner.js
+++ b/jstests/libs/txns/txn_passthrough_runner.js
@@ -1,13 +1,13 @@
(function() {
- 'use strict';
+'use strict';
- const testFile = TestData.multiStmtTxnTestFile;
+const testFile = TestData.multiStmtTxnTestFile;
- try {
- load(testFile);
- } finally {
- // Run a lightweight command to allow the override file to commit the last command.
- // Ensure this command runs even if the test errors.
- assert.commandWorked(db.runCommand({ping: 1}));
- }
+try {
+ load(testFile);
+} finally {
+ // Run a lightweight command to allow the override file to commit the last command.
+ // Ensure this command runs even if the test errors.
+ assert.commandWorked(db.runCommand({ping: 1}));
+}
})();
diff --git a/jstests/libs/txns/txn_passthrough_runner_selftest.js b/jstests/libs/txns/txn_passthrough_runner_selftest.js
index 86308f45188..b7836315b59 100644
--- a/jstests/libs/txns/txn_passthrough_runner_selftest.js
+++ b/jstests/libs/txns/txn_passthrough_runner_selftest.js
@@ -2,32 +2,31 @@
// check that operation is not visible immediately, but is visible after the transaction commits.
(function() {
- 'use strict';
+'use strict';
- const testName = jsTest.name();
+const testName = jsTest.name();
- // Use a unique db for every test so burn_in_tests can run this test multiple times.
- db = db.getSiblingDB('txn_self_test' + Random.srand());
+// Use a unique db for every test so burn_in_tests can run this test multiple times.
+db = db.getSiblingDB('txn_self_test' + Random.srand());
- // Profile all commands.
- db.setProfilingLevel(2);
+// Profile all commands.
+db.setProfilingLevel(2);
- const coll = db[testName];
+const coll = db[testName];
- assert.commandWorked(coll.insert({x: 1}));
- let commands = db.system.profile.find().toArray();
- // Check that the insert is not visible because the txn has not committed.
- assert.eq(commands.length, 1);
- assert.eq(commands[0].command.create, testName);
+assert.commandWorked(coll.insert({x: 1}));
+let commands = db.system.profile.find().toArray();
+// Check that the insert is not visible because the txn has not committed.
+assert.eq(commands.length, 1);
+assert.eq(commands[0].command.create, testName);
- // Use a dummy, unrelated operation to signal the txn runner to commit the transaction.
- assert.commandWorked(db.runCommand({ping: 1}));
-
- commands = db.system.profile.find().toArray();
- // Assert the insert is now visible.
- assert.eq(commands.length, 3);
- assert.eq(commands[0].command.create, testName);
- assert.eq(commands[1].command.insert, testName);
- assert.eq(commands[2].command.find, 'system.profile');
+// Use a dummy, unrelated operation to signal the txn runner to commit the transaction.
+assert.commandWorked(db.runCommand({ping: 1}));
+commands = db.system.profile.find().toArray();
+// Assert the insert is now visible.
+assert.eq(commands.length, 3);
+assert.eq(commands[0].command.create, testName);
+assert.eq(commands[1].command.insert, testName);
+assert.eq(commands[2].command.find, 'system.profile');
})();