summaryrefslogtreecommitdiff
path: root/jstests/multiVersion
diff options
context:
space:
mode:
authorclang-format-7.0.1 <adam.martin@10gen.com>2019-07-26 18:20:35 -0400
committerADAM David Alan Martin <adam.martin@10gen.com>2019-07-27 11:02:23 -0400
commit134a4083953270e8a11430395357fb70a29047ad (patch)
treedd428e1230e31d92b20b393dfdc17ffe7fa79cb6 /jstests/multiVersion
parent1e46b5049003f427047e723ea5fab15b5a9253ca (diff)
downloadmongo-134a4083953270e8a11430395357fb70a29047ad.tar.gz
SERVER-41772 Apply clang-format 7.0.1 to the codebase
Diffstat (limited to 'jstests/multiVersion')
-rw-r--r--jstests/multiVersion/2_test_launching_cluster.js90
-rw-r--r--jstests/multiVersion/add_invalid_shard.js65
-rw-r--r--jstests/multiVersion/change_streams_feature_compatibility_version.js182
-rw-r--r--jstests/multiVersion/change_streams_multi_version_transaction.js224
-rw-r--r--jstests/multiVersion/clone_helper.js101
-rw-r--r--jstests/multiVersion/config_transactions_set_fcv.js761
-rw-r--r--jstests/multiVersion/copydb_helper.js93
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/collection_validator_feature_compatibility_version.js397
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js87
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js586
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/downgrade_after_rollback_via_refetch.js98
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/feature_compatibility_version_lagging_secondary.js73
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js546
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/migration_between_mixed_FCV_mixed_version_mongods.js67
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary.js18
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary_drop_target.js20
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary.js18
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary_drop_target.js20
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/repair_feature_compatibility_version.js139
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/rollback_last_stable_to_latest.js8
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_last_stable.js8
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/setFCV_collmod_transaction_rollback.js52
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js747
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_while_creating_collection.js113
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/view_definition_feature_compatibility_version.js339
-rw-r--r--jstests/multiVersion/index_value_empty_string_repl.js42
-rw-r--r--jstests/multiVersion/index_value_empty_string_upgrade.js76
-rw-r--r--jstests/multiVersion/initialize_from_old_node.js36
-rw-r--r--jstests/multiVersion/keystring_index.js419
-rw-r--r--jstests/multiVersion/libs/data_generators.js2
-rw-r--r--jstests/multiVersion/libs/dumprestore_helpers.js7
-rw-r--r--jstests/multiVersion/libs/global_snapshot_reads_helpers.js18
-rw-r--r--jstests/multiVersion/libs/initial_sync.js1
-rw-r--r--jstests/multiVersion/libs/multi_cluster.js1
-rw-r--r--jstests/multiVersion/libs/multi_rs.js3
-rw-r--r--jstests/multiVersion/libs/sharded_txn_upgrade_downgrade_cluster_shared.js14
-rw-r--r--jstests/multiVersion/libs/verify_collection_data.js2
-rw-r--r--jstests/multiVersion/libs/verify_versions.js59
-rw-r--r--jstests/multiVersion/migration_between_mixed_version_mongods.js192
-rw-r--r--jstests/multiVersion/minor_version_tags_new_old_new.js24
-rw-r--r--jstests/multiVersion/minor_version_tags_old_new_old.js24
-rw-r--r--jstests/multiVersion/new_mongos_old_mongod_wire_version_clash.js69
-rw-r--r--jstests/multiVersion/remove_feature_compatibility_version.js21
-rw-r--r--jstests/multiVersion/sharded_txn_downgrade_cluster.js126
-rw-r--r--jstests/multiVersion/sharded_txn_upgrade_cluster.js101
-rw-r--r--jstests/multiVersion/skip_level_upgrade.js121
-rw-r--r--jstests/multiVersion/unique_index_empty_collmod.js62
-rw-r--r--jstests/multiVersion/upgrade_downgrade_cluster.js306
-rw-r--r--jstests/multiVersion/verify_versions_test.js154
49 files changed, 3341 insertions, 3391 deletions
diff --git a/jstests/multiVersion/2_test_launching_cluster.js b/jstests/multiVersion/2_test_launching_cluster.js
index f26d3e78ac0..30da8d39a73 100644
--- a/jstests/multiVersion/2_test_launching_cluster.js
+++ b/jstests/multiVersion/2_test_launching_cluster.js
@@ -13,49 +13,49 @@
load('./jstests/multiVersion/libs/verify_versions.js');
(function() {
- "use strict";
- // Check our latest versions
- var versionsToCheck = ["last-stable", "latest"];
- var versionsToCheckConfig = ["latest"];
- var versionsToCheckMongos = ["last-stable"];
-
- jsTest.log("Testing mixed versions...");
-
- // Set up a multi-version cluster
- var st = new ShardingTest({
- shards: 2,
- mongos: 2,
- other: {
- mongosOptions: {binVersion: versionsToCheckMongos},
- configOptions: {binVersion: versionsToCheckConfig},
- shardOptions: {binVersion: versionsToCheck},
- enableBalancer: true,
- shardAsReplicaSet: false
- }
- });
-
- var shards = [st.shard0, st.shard1];
- var mongoses = [st.s0, st.s1];
- var configs = [st.config0, st.config1, st.config2];
-
- // Make sure we have hosts of all the different versions
- var versionsFound = [];
- for (var j = 0; j < shards.length; j++)
- versionsFound.push(shards[j].getBinVersion());
-
- assert.allBinVersions(versionsToCheck, versionsFound);
-
- versionsFound = [];
- for (var j = 0; j < mongoses.length; j++)
- versionsFound.push(mongoses[j].getBinVersion());
-
- assert.allBinVersions(versionsToCheckMongos, versionsFound);
-
- versionsFound = [];
- for (var j = 0; j < configs.length; j++)
- versionsFound.push(configs[j].getBinVersion());
-
- assert.allBinVersions(versionsToCheckConfig, versionsFound);
-
- st.stop();
+"use strict";
+// Check our latest versions
+var versionsToCheck = ["last-stable", "latest"];
+var versionsToCheckConfig = ["latest"];
+var versionsToCheckMongos = ["last-stable"];
+
+jsTest.log("Testing mixed versions...");
+
+// Set up a multi-version cluster
+var st = new ShardingTest({
+ shards: 2,
+ mongos: 2,
+ other: {
+ mongosOptions: {binVersion: versionsToCheckMongos},
+ configOptions: {binVersion: versionsToCheckConfig},
+ shardOptions: {binVersion: versionsToCheck},
+ enableBalancer: true,
+ shardAsReplicaSet: false
+ }
+});
+
+var shards = [st.shard0, st.shard1];
+var mongoses = [st.s0, st.s1];
+var configs = [st.config0, st.config1, st.config2];
+
+// Make sure we have hosts of all the different versions
+var versionsFound = [];
+for (var j = 0; j < shards.length; j++)
+ versionsFound.push(shards[j].getBinVersion());
+
+assert.allBinVersions(versionsToCheck, versionsFound);
+
+versionsFound = [];
+for (var j = 0; j < mongoses.length; j++)
+ versionsFound.push(mongoses[j].getBinVersion());
+
+assert.allBinVersions(versionsToCheckMongos, versionsFound);
+
+versionsFound = [];
+for (var j = 0; j < configs.length; j++)
+ versionsFound.push(configs[j].getBinVersion());
+
+assert.allBinVersions(versionsToCheckConfig, versionsFound);
+
+st.stop();
})();
diff --git a/jstests/multiVersion/add_invalid_shard.js b/jstests/multiVersion/add_invalid_shard.js
index caaeb23b839..1f9dfc9c40f 100644
--- a/jstests/multiVersion/add_invalid_shard.js
+++ b/jstests/multiVersion/add_invalid_shard.js
@@ -3,49 +3,48 @@
*/
(function() {
- "use strict";
+"use strict";
- var st = new ShardingTest({shards: 1});
+var st = new ShardingTest({shards: 1});
- var configDB = st.s.getDB('config');
- var shardDoc = configDB.shards.findOne();
+var configDB = st.s.getDB('config');
+var shardDoc = configDB.shards.findOne();
- // Can't add mongos as shard.
- assert.commandFailedWithCode(st.admin.runCommand({addshard: st.s.host}),
- ErrorCodes.IllegalOperation);
+// Can't add mongos as shard.
+assert.commandFailedWithCode(st.admin.runCommand({addshard: st.s.host}),
+ ErrorCodes.IllegalOperation);
- // Can't add a mongod with a lower binary version than our featureCompatibilityVersion.
- var lastStableMongod = MongoRunner.runMongod({binVersion: "last-stable", shardsvr: ""});
- assert.commandFailedWithCode(st.admin.runCommand({addshard: lastStableMongod.host}),
- ErrorCodes.IncompatibleServerVersion);
- MongoRunner.stopMongod(lastStableMongod);
+// Can't add a mongod with a lower binary version than our featureCompatibilityVersion.
+var lastStableMongod = MongoRunner.runMongod({binVersion: "last-stable", shardsvr: ""});
+assert.commandFailedWithCode(st.admin.runCommand({addshard: lastStableMongod.host}),
+ ErrorCodes.IncompatibleServerVersion);
+MongoRunner.stopMongod(lastStableMongod);
- // Can't add config servers as shard.
- assert.commandFailed(st.admin.runCommand({addshard: st._configDB}));
+// Can't add config servers as shard.
+assert.commandFailed(st.admin.runCommand({addshard: st._configDB}));
- var replTest = new ReplSetTest({nodes: 2, nodeOptions: {shardsvr: ""}});
- replTest.startSet({oplogSize: 10});
- replTest.initiate();
+var replTest = new ReplSetTest({nodes: 2, nodeOptions: {shardsvr: ""}});
+replTest.startSet({oplogSize: 10});
+replTest.initiate();
- var rsConnStr = replTest.getURL();
- // Can't add replSet as shard if the name doesn't match the replSet config.
- assert.commandFailed(st.admin.runCommand({addshard: "prefix_" + rsConnStr}));
+var rsConnStr = replTest.getURL();
+// Can't add replSet as shard if the name doesn't match the replSet config.
+assert.commandFailed(st.admin.runCommand({addshard: "prefix_" + rsConnStr}));
- assert.commandWorked(st.admin.runCommand({addshard: rsConnStr, name: 'dummyRS'}));
+assert.commandWorked(st.admin.runCommand({addshard: rsConnStr, name: 'dummyRS'}));
- // Cannot add the same replSet shard host twice when using a unique shard name.
- assert.commandFailed(st.admin.runCommand({addshard: rsConnStr, name: 'dupRS'}));
+// Cannot add the same replSet shard host twice when using a unique shard name.
+assert.commandFailed(st.admin.runCommand({addshard: rsConnStr, name: 'dupRS'}));
- // Cannot add the same stand alone shard host twice with a unique shard name.
- assert.commandFailed(st.admin.runCommand({addshard: shardDoc.host, name: 'dupShard'}));
+// Cannot add the same stand alone shard host twice with a unique shard name.
+assert.commandFailed(st.admin.runCommand({addshard: shardDoc.host, name: 'dupShard'}));
- // Cannot add a replica set connection string containing a member that isn't actually part of
- // the replica set.
- var truncatedRSConnStr = rsConnStr.substring(0, rsConnStr.indexOf(','));
- assert.commandFailed(
- st.admin.runCommand({addshard: truncatedRSConnStr + 'fakehost', name: 'dummyRS'}));
-
- replTest.stopSet();
- st.stop();
+// Cannot add a replica set connection string containing a member that isn't actually part of
+// the replica set.
+var truncatedRSConnStr = rsConnStr.substring(0, rsConnStr.indexOf(','));
+assert.commandFailed(
+ st.admin.runCommand({addshard: truncatedRSConnStr + 'fakehost', name: 'dummyRS'}));
+replTest.stopSet();
+st.stop();
})();
diff --git a/jstests/multiVersion/change_streams_feature_compatibility_version.js b/jstests/multiVersion/change_streams_feature_compatibility_version.js
index 23c489893e8..37c8ac7621b 100644
--- a/jstests/multiVersion/change_streams_feature_compatibility_version.js
+++ b/jstests/multiVersion/change_streams_feature_compatibility_version.js
@@ -3,103 +3,101 @@
// stream after network errors.
// @tags: [uses_change_streams]
(function() {
- "use strict";
+"use strict";
- load("jstests/multiVersion/libs/multi_rs.js"); // For upgradeSet.
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+load("jstests/multiVersion/libs/multi_rs.js"); // For upgradeSet.
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
- const rst = new ReplSetTest({
- nodes: 2,
- nodeOptions: {binVersion: "last-stable"},
- });
+const rst = new ReplSetTest({
+ nodes: 2,
+ nodeOptions: {binVersion: "last-stable"},
+});
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
-
- rst.initiate();
-
- let testDB = rst.getPrimary().getDB(jsTestName());
- let coll = testDB.change_stream_upgrade;
-
- // Open a change stream against a 4.0 binary. We will use the resume token from this stream to
- // resume the stream once the set has been upgraded.
- let streamStartedOnOldVersion = coll.watch();
- assert.commandWorked(coll.insert({_id: "first insert, just for resume token"}));
-
- assert.soon(() => streamStartedOnOldVersion.hasNext());
- let change = streamStartedOnOldVersion.next();
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.documentKey._id, "first insert, just for resume token", tojson(change));
- const resumeTokenFromLastStable = change._id;
-
- assert.commandWorked(coll.insert({_id: "before binary upgrade"}));
- // Upgrade the set to the new binary version, but keep the feature compatibility version at 4.0.
- rst.upgradeSet({binVersion: "latest"});
- testDB = rst.getPrimary().getDB(jsTestName());
- coll = testDB.change_stream_upgrade;
-
- // Test that we can resume the stream on the new binaries.
- streamStartedOnOldVersion = coll.watch([], {resumeAfter: resumeTokenFromLastStable});
- assert.soon(() => streamStartedOnOldVersion.hasNext());
- change = streamStartedOnOldVersion.next();
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.documentKey._id, "before binary upgrade", tojson(change));
-
- let streamStartedOnNewVersionOldFCV = coll.watch();
-
- assert.commandWorked(coll.insert({_id: "after binary upgrade, before fcv switch"}));
-
- let resumeTokenFromNewVersionOldFCV;
- [streamStartedOnOldVersion, streamStartedOnNewVersionOldFCV].forEach(stream => {
- assert.soon(() => stream.hasNext());
- change = stream.next();
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(
- change.documentKey._id, "after binary upgrade, before fcv switch", tojson(change));
- if (resumeTokenFromNewVersionOldFCV === undefined) {
- resumeTokenFromNewVersionOldFCV = change._id;
- } else {
- assert.eq(resumeTokenFromNewVersionOldFCV, change._id);
- }
- });
-
- // Explicitly set feature compatibility version to 4.2.
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
-
- const streamStartedOnNewVersion = coll.watch();
-
- // Test that we can still resume with the token from the old version. We should see the same
- // document again.
- streamStartedOnOldVersion = coll.watch([], {resumeAfter: resumeTokenFromLastStable});
- assert.soon(() => streamStartedOnOldVersion.hasNext());
- change = streamStartedOnOldVersion.next();
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.documentKey._id, "before binary upgrade", tojson(change));
-
- assert.soon(() => streamStartedOnOldVersion.hasNext());
- change = streamStartedOnOldVersion.next();
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
+
+rst.initiate();
+
+let testDB = rst.getPrimary().getDB(jsTestName());
+let coll = testDB.change_stream_upgrade;
+
+// Open a change stream against a 4.0 binary. We will use the resume token from this stream to
+// resume the stream once the set has been upgraded.
+let streamStartedOnOldVersion = coll.watch();
+assert.commandWorked(coll.insert({_id: "first insert, just for resume token"}));
+
+assert.soon(() => streamStartedOnOldVersion.hasNext());
+let change = streamStartedOnOldVersion.next();
+assert.eq(change.operationType, "insert", tojson(change));
+assert.eq(change.documentKey._id, "first insert, just for resume token", tojson(change));
+const resumeTokenFromLastStable = change._id;
+
+assert.commandWorked(coll.insert({_id: "before binary upgrade"}));
+// Upgrade the set to the new binary version, but keep the feature compatibility version at 4.0.
+rst.upgradeSet({binVersion: "latest"});
+testDB = rst.getPrimary().getDB(jsTestName());
+coll = testDB.change_stream_upgrade;
+
+// Test that we can resume the stream on the new binaries.
+streamStartedOnOldVersion = coll.watch([], {resumeAfter: resumeTokenFromLastStable});
+assert.soon(() => streamStartedOnOldVersion.hasNext());
+change = streamStartedOnOldVersion.next();
+assert.eq(change.operationType, "insert", tojson(change));
+assert.eq(change.documentKey._id, "before binary upgrade", tojson(change));
+
+let streamStartedOnNewVersionOldFCV = coll.watch();
+
+assert.commandWorked(coll.insert({_id: "after binary upgrade, before fcv switch"}));
+
+let resumeTokenFromNewVersionOldFCV;
+[streamStartedOnOldVersion, streamStartedOnNewVersionOldFCV].forEach(stream => {
+ assert.soon(() => stream.hasNext());
+ change = stream.next();
assert.eq(change.operationType, "insert", tojson(change));
assert.eq(change.documentKey._id, "after binary upgrade, before fcv switch", tojson(change));
-
- assert.commandWorked(coll.insert({_id: "after fcv upgrade"}));
- const resumedStreamOnNewVersion =
- coll.watch([], {resumeAfter: resumeTokenFromNewVersionOldFCV});
-
- // Test that all open streams continue to produce change events, and that the newly resumed
- // stream sees the write that just happened since it comes after the resume token used.
- for (let stream of[streamStartedOnOldVersion,
- streamStartedOnNewVersionOldFCV,
- streamStartedOnNewVersion,
- resumedStreamOnNewVersion]) {
- assert.soon(() => stream.hasNext());
- change = stream.next();
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.documentKey._id, "after fcv upgrade", tojson(change));
- stream.close();
+ if (resumeTokenFromNewVersionOldFCV === undefined) {
+ resumeTokenFromNewVersionOldFCV = change._id;
+ } else {
+ assert.eq(resumeTokenFromNewVersionOldFCV, change._id);
}
+});
+
+// Explicitly set feature compatibility version to 4.2.
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: "4.2"}));
+
+const streamStartedOnNewVersion = coll.watch();
+
+// Test that we can still resume with the token from the old version. We should see the same
+// document again.
+streamStartedOnOldVersion = coll.watch([], {resumeAfter: resumeTokenFromLastStable});
+assert.soon(() => streamStartedOnOldVersion.hasNext());
+change = streamStartedOnOldVersion.next();
+assert.eq(change.operationType, "insert", tojson(change));
+assert.eq(change.documentKey._id, "before binary upgrade", tojson(change));
+
+assert.soon(() => streamStartedOnOldVersion.hasNext());
+change = streamStartedOnOldVersion.next();
+assert.eq(change.operationType, "insert", tojson(change));
+assert.eq(change.documentKey._id, "after binary upgrade, before fcv switch", tojson(change));
+
+assert.commandWorked(coll.insert({_id: "after fcv upgrade"}));
+const resumedStreamOnNewVersion = coll.watch([], {resumeAfter: resumeTokenFromNewVersionOldFCV});
+
+// Test that all open streams continue to produce change events, and that the newly resumed
+// stream sees the write that just happened since it comes after the resume token used.
+for (let stream of [streamStartedOnOldVersion,
+ streamStartedOnNewVersionOldFCV,
+ streamStartedOnNewVersion,
+ resumedStreamOnNewVersion]) {
+ assert.soon(() => stream.hasNext());
+ change = stream.next();
+ assert.eq(change.operationType, "insert", tojson(change));
+ assert.eq(change.documentKey._id, "after fcv upgrade", tojson(change));
+ stream.close();
+}
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/multiVersion/change_streams_multi_version_transaction.js b/jstests/multiVersion/change_streams_multi_version_transaction.js
index 1c2bfbd7e33..0ec28a63235 100644
--- a/jstests/multiVersion/change_streams_multi_version_transaction.js
+++ b/jstests/multiVersion/change_streams_multi_version_transaction.js
@@ -4,127 +4,125 @@
//
// @tags: [uses_change_streams, uses_transactions, requires_replication]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/feature_compatibility_version.js");
- load('jstests/multiVersion/libs/multi_rs.js'); // For upgradeSet.
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+load("jstests/libs/feature_compatibility_version.js");
+load('jstests/multiVersion/libs/multi_rs.js'); // For upgradeSet.
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
- const rst = new ReplSetTest({
- nodes: 2,
- nopeOptions: {binVersion: "last-stable"},
- });
+const rst = new ReplSetTest({
+ nodes: 2,
+ nopeOptions: {binVersion: "last-stable"},
+});
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
- rst.initiate();
+rst.initiate();
- const dbName = jsTestName();
- const watchedCollName = "change_stream_watched";
- const unwatchedCollName = "change_stream_unwatched";
+const dbName = jsTestName();
+const watchedCollName = "change_stream_watched";
+const unwatchedCollName = "change_stream_unwatched";
- rst.getPrimary().getDB(dbName).createCollection(watchedCollName);
- rst.getPrimary().getDB(dbName).createCollection(unwatchedCollName);
+rst.getPrimary().getDB(dbName).createCollection(watchedCollName);
+rst.getPrimary().getDB(dbName).createCollection(unwatchedCollName);
- // Calls next() on a change stream cursor 'n' times and returns an array with the results.
- function getChangeStreamResults(cursor, n) {
- let results = [];
- for (let i = 0; i < n; ++i) {
- assert.soon(() => cursor.hasNext(), "Timed out waiting for change stream result " + i);
- results.push(cursor.next());
- }
- assert(!cursor.hasNext()); // The change stream should always have exactly 'n' results.
- return results;
+// Calls next() on a change stream cursor 'n' times and returns an array with the results.
+function getChangeStreamResults(cursor, n) {
+ let results = [];
+ for (let i = 0; i < n; ++i) {
+ assert.soon(() => cursor.hasNext(), "Timed out waiting for change stream result " + i);
+ results.push(cursor.next());
}
-
- // Compare expected changes with output from a change stream, failing an assertion if they do
- // not match.
- function compareChanges(expectedChanges, observedChanges) {
- assert.eq(expectedChanges.length, observedChanges.length);
- for (let i = 0; i < expectedChanges.length; ++i) {
- assert.eq(expectedChanges[i].operationType, observedChanges[i].operationType);
- if (expectedChanges[i].hasOwnProperty("fullDocument")) {
- assert.eq(expectedChanges[i].fullDocument, observedChanges[i].fullDocument);
- }
- if (expectedChanges[i].hasOwnProperty("updateDescription")) {
- assert.eq(expectedChanges[i].updateDescription,
- observedChanges[i].updateDescription);
- }
- if (expectedChanges[i].hasOwnProperty("documentKey")) {
- assert.eq(expectedChanges[i].documentKey, observedChanges[i].documentKey);
- }
+ assert(!cursor.hasNext()); // The change stream should always have exactly 'n' results.
+ return results;
+}
+
+// Compare expected changes with output from a change stream, failing an assertion if they do
+// not match.
+function compareChanges(expectedChanges, observedChanges) {
+ assert.eq(expectedChanges.length, observedChanges.length);
+ for (let i = 0; i < expectedChanges.length; ++i) {
+ assert.eq(expectedChanges[i].operationType, observedChanges[i].operationType);
+ if (expectedChanges[i].hasOwnProperty("fullDocument")) {
+ assert.eq(expectedChanges[i].fullDocument, observedChanges[i].fullDocument);
+ }
+ if (expectedChanges[i].hasOwnProperty("updateDescription")) {
+ assert.eq(expectedChanges[i].updateDescription, observedChanges[i].updateDescription);
+ }
+ if (expectedChanges[i].hasOwnProperty("documentKey")) {
+ assert.eq(expectedChanges[i].documentKey, observedChanges[i].documentKey);
}
}
-
- // Opens a $changeStream and then performs inserts, deletes, updates both within a transaction
- // and outside the transaction. Leaves all collections empty when done.
- function performDBOps(mongod) {
- const session = mongod.startSession();
- session.startTransaction();
-
- const watchedColl = session.getDatabase(dbName)[watchedCollName];
- assert.commandWorked(watchedColl.insert({_id: 1}));
- assert.commandWorked(watchedColl.updateOne({_id: 1}, {$set: {a: 1}}));
- assert.commandWorked(watchedColl.remove({_id: 1}));
-
- const unwatchedColl = session.getDatabase(dbName)[unwatchedCollName];
- assert.commandWorked(unwatchedColl.insert({_id: 1}));
- assert.commandWorked(unwatchedColl.remove({_id: 1}));
-
- const watchedCollNoTxn = mongod.getDB(dbName)[watchedCollName];
- assert.commandWorked(watchedCollNoTxn.insert({_id: 2}));
- assert.commandWorked(watchedCollNoTxn.remove({_id: 2}));
-
- session.commitTransaction();
- }
-
- // Resume a change stream from each of the resume tokens in the 'changeStreamDocs' array and
- // verify that we always see the same set of changes.
- function resumeChangeStreamFromEachToken(mongod, changeStreamDocs, expectedChanges) {
- changeStreamDocs.forEach(function(changeDoc, i) {
- const testDB = mongod.getDB(dbName);
- const resumedCursor = testDB[watchedCollName].watch([], {resumeAfter: changeDoc._id});
-
- // Resuming from document 'i' should return all the documents from 'i' + 1 to the end of
- // the array.
- const expectedChangesAfterResumeToken = expectedChanges.slice(i + 1);
- compareChanges(
- expectedChangesAfterResumeToken,
- getChangeStreamResults(resumedCursor, expectedChangesAfterResumeToken.length));
- });
- }
-
- const expectedChanges = [
- {operationType: "insert", fullDocument: {_id: 2}},
- {operationType: "delete", documentKey: {_id: 2}},
- {operationType: "insert", fullDocument: {_id: 1}},
- {operationType: "update", updateDescription: {updatedFields: {a: 1}, removedFields: []}},
- {operationType: "delete", documentKey: {_id: 1}},
- ];
-
- // Create the original change stream, verify it gives us the changes we expect, and verify that
- // we can correctly resume from any resume token.
- const changeStreamCursor = rst.getPrimary().getDB(dbName)[watchedCollName].watch();
- performDBOps(rst.getPrimary());
- const changeStreamDocs = getChangeStreamResults(changeStreamCursor, expectedChanges.length);
- compareChanges(expectedChanges, changeStreamDocs);
- resumeChangeStreamFromEachToken(rst.getPrimary(), changeStreamDocs, expectedChanges);
-
- // Upgrade the replica set (while leaving featureCompatibilityVersion as it is) and verify that
- // we can correctly resume from any resume token.
- rst.upgradeSet({binVersion: "latest"});
- resumeChangeStreamFromEachToken(rst.getPrimary(), changeStreamDocs, expectedChanges);
-
- // Upgrade the featureCompatibilityVersion and verify that we can correctly resume from any
- // resume token.
- assert.commandWorked(
- rst.getPrimary().adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(rst.getPrimary().getDB("admin"), latestFCV);
- resumeChangeStreamFromEachToken(rst.getPrimary(), changeStreamDocs, expectedChanges);
-
- rst.stopSet();
+}
+
+// Opens a $changeStream and then performs inserts, deletes, updates both within a transaction
+// and outside the transaction. Leaves all collections empty when done.
+function performDBOps(mongod) {
+ const session = mongod.startSession();
+ session.startTransaction();
+
+ const watchedColl = session.getDatabase(dbName)[watchedCollName];
+ assert.commandWorked(watchedColl.insert({_id: 1}));
+ assert.commandWorked(watchedColl.updateOne({_id: 1}, {$set: {a: 1}}));
+ assert.commandWorked(watchedColl.remove({_id: 1}));
+
+ const unwatchedColl = session.getDatabase(dbName)[unwatchedCollName];
+ assert.commandWorked(unwatchedColl.insert({_id: 1}));
+ assert.commandWorked(unwatchedColl.remove({_id: 1}));
+
+ const watchedCollNoTxn = mongod.getDB(dbName)[watchedCollName];
+ assert.commandWorked(watchedCollNoTxn.insert({_id: 2}));
+ assert.commandWorked(watchedCollNoTxn.remove({_id: 2}));
+
+ session.commitTransaction();
+}
+
+// Resume a change stream from each of the resume tokens in the 'changeStreamDocs' array and
+// verify that we always see the same set of changes.
+function resumeChangeStreamFromEachToken(mongod, changeStreamDocs, expectedChanges) {
+ changeStreamDocs.forEach(function(changeDoc, i) {
+ const testDB = mongod.getDB(dbName);
+ const resumedCursor = testDB[watchedCollName].watch([], {resumeAfter: changeDoc._id});
+
+ // Resuming from document 'i' should return all the documents from 'i' + 1 to the end of
+ // the array.
+ const expectedChangesAfterResumeToken = expectedChanges.slice(i + 1);
+ compareChanges(
+ expectedChangesAfterResumeToken,
+ getChangeStreamResults(resumedCursor, expectedChangesAfterResumeToken.length));
+ });
+}
+
+const expectedChanges = [
+ {operationType: "insert", fullDocument: {_id: 2}},
+ {operationType: "delete", documentKey: {_id: 2}},
+ {operationType: "insert", fullDocument: {_id: 1}},
+ {operationType: "update", updateDescription: {updatedFields: {a: 1}, removedFields: []}},
+ {operationType: "delete", documentKey: {_id: 1}},
+];
+
+// Create the original change stream, verify it gives us the changes we expect, and verify that
+// we can correctly resume from any resume token.
+const changeStreamCursor = rst.getPrimary().getDB(dbName)[watchedCollName].watch();
+performDBOps(rst.getPrimary());
+const changeStreamDocs = getChangeStreamResults(changeStreamCursor, expectedChanges.length);
+compareChanges(expectedChanges, changeStreamDocs);
+resumeChangeStreamFromEachToken(rst.getPrimary(), changeStreamDocs, expectedChanges);
+
+// Upgrade the replica set (while leaving featureCompatibilityVersion as it is) and verify that
+// we can correctly resume from any resume token.
+rst.upgradeSet({binVersion: "latest"});
+resumeChangeStreamFromEachToken(rst.getPrimary(), changeStreamDocs, expectedChanges);
+
+// Upgrade the featureCompatibilityVersion and verify that we can correctly resume from any
+// resume token.
+assert.commandWorked(rst.getPrimary().adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+checkFCV(rst.getPrimary().getDB("admin"), latestFCV);
+resumeChangeStreamFromEachToken(rst.getPrimary(), changeStreamDocs, expectedChanges);
+
+rst.stopSet();
}());
diff --git a/jstests/multiVersion/clone_helper.js b/jstests/multiVersion/clone_helper.js
index 0c230189e34..9253c0ffc31 100644
--- a/jstests/multiVersion/clone_helper.js
+++ b/jstests/multiVersion/clone_helper.js
@@ -1,67 +1,66 @@
// SERVER-36438 Ensure the 4.2 cloneDatabase() shell helper still successfully executes the clone
// command on a 4.0 server, now that the clone command has been removed as of 4.2.
(function() {
- "use strict";
- const oldVersion = "4.0";
+"use strict";
+const oldVersion = "4.0";
- let numDocs = 2000;
+let numDocs = 2000;
- // 1kb string
- let str = new Array(1000).toString();
+// 1kb string
+let str = new Array(1000).toString();
- let replsetDBName = "cloneDBreplset";
- let standaloneDBName = "cloneDBstandalone";
- let testColName = "foo";
- let testViewName = "view";
+let replsetDBName = "cloneDBreplset";
+let standaloneDBName = "cloneDBstandalone";
+let testColName = "foo";
+let testViewName = "view";
- jsTest.log("Create replica set");
- let replTest =
- new ReplSetTest({name: "testSet", nodes: 3, nodeOptions: {binVersion: oldVersion}});
- replTest.startSet();
- replTest.initiate();
- let master = replTest.getPrimary();
- let masterDB = master.getDB(replsetDBName);
- masterDB.dropDatabase();
+jsTest.log("Create replica set");
+let replTest = new ReplSetTest({name: "testSet", nodes: 3, nodeOptions: {binVersion: oldVersion}});
+replTest.startSet();
+replTest.initiate();
+let master = replTest.getPrimary();
+let masterDB = master.getDB(replsetDBName);
+masterDB.dropDatabase();
- jsTest.log("Create standalone server");
- let standalone = MongoRunner.runMongod({binVersion: oldVersion});
- let standaloneDB = standalone.getDB(replsetDBName);
- standaloneDB.dropDatabase();
+jsTest.log("Create standalone server");
+let standalone = MongoRunner.runMongod({binVersion: oldVersion});
+let standaloneDB = standalone.getDB(replsetDBName);
+standaloneDB.dropDatabase();
- jsTest.log("Insert data into replica set");
- let bulk = masterDB[testColName].initializeUnorderedBulkOp();
- for (let i = 0; i < numDocs; i++) {
- bulk.insert({x: i, text: str});
- }
- assert.writeOK(bulk.execute({w: 3}));
+jsTest.log("Insert data into replica set");
+let bulk = masterDB[testColName].initializeUnorderedBulkOp();
+for (let i = 0; i < numDocs; i++) {
+ bulk.insert({x: i, text: str});
+}
+assert.writeOK(bulk.execute({w: 3}));
- jsTest.log("Create view on replica set");
- assert.commandWorked(masterDB.runCommand({create: testViewName, viewOn: testColName}));
+jsTest.log("Create view on replica set");
+assert.commandWorked(masterDB.runCommand({create: testViewName, viewOn: testColName}));
- // Make sure all writes have replicated to secondary.
- replTest.awaitReplication();
+// Make sure all writes have replicated to secondary.
+replTest.awaitReplication();
- jsTest.log("Clone db from replica set to standalone server");
- standaloneDB.cloneDatabase(replTest.getURL());
- assert.eq(numDocs,
- standaloneDB[testColName].find().itcount(),
- "cloneDatabase from replset to standalone failed (document counts do not match)");
- assert.eq(numDocs,
- standaloneDB[testViewName].find().itcount(),
- "cloneDatabase from replset to standalone failed (count on view incorrect)");
+jsTest.log("Clone db from replica set to standalone server");
+standaloneDB.cloneDatabase(replTest.getURL());
+assert.eq(numDocs,
+ standaloneDB[testColName].find().itcount(),
+ "cloneDatabase from replset to standalone failed (document counts do not match)");
+assert.eq(numDocs,
+ standaloneDB[testViewName].find().itcount(),
+ "cloneDatabase from replset to standalone failed (count on view incorrect)");
- jsTest.log("Clone db from replica set PRIMARY to standalone server");
- standaloneDB.dropDatabase();
- standaloneDB.cloneDatabase(master.host);
- assert.eq(numDocs,
- standaloneDB[testColName].find().itcount(),
- "cloneDatabase from PRIMARY to standalone failed (document counts do not match)");
- assert.eq(numDocs,
- standaloneDB[testViewName].find().itcount(),
- "cloneDatabase from PRIMARY to standalone failed (count on view incorrect)");
+jsTest.log("Clone db from replica set PRIMARY to standalone server");
+standaloneDB.dropDatabase();
+standaloneDB.cloneDatabase(master.host);
+assert.eq(numDocs,
+ standaloneDB[testColName].find().itcount(),
+ "cloneDatabase from PRIMARY to standalone failed (document counts do not match)");
+assert.eq(numDocs,
+ standaloneDB[testViewName].find().itcount(),
+ "cloneDatabase from PRIMARY to standalone failed (count on view incorrect)");
- jsTest.log("Shut down replica set and standalone server");
- MongoRunner.stopMongod(standalone);
+jsTest.log("Shut down replica set and standalone server");
+MongoRunner.stopMongod(standalone);
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/multiVersion/config_transactions_set_fcv.js b/jstests/multiVersion/config_transactions_set_fcv.js
index c5dd323c580..9f2a059a4d2 100644
--- a/jstests/multiVersion/config_transactions_set_fcv.js
+++ b/jstests/multiVersion/config_transactions_set_fcv.js
@@ -6,426 +6,431 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/libs/feature_compatibility_version.js");
- load('jstests/sharding/libs/sharded_transactions_helpers.js');
-
- const dbName = "test";
- const collName = "config_transactions_set_fcv";
-
- // Define autocommit as a variable so it can be used in object literals w/o an explicit value.
- const autocommit = false;
-
- // Start a replica set with an odd number of members to verify nodes outside the majority behave
- // correctly around setFeatureCompatibilityVersion, which uses majority writes to update the FCV
- // document. The primary isn't expected to change, so each secondary is given priority 0.
- const rst =
- new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
-
- let testDB = rst.getPrimary().getDB(dbName);
- let adminDB = rst.getPrimary().getDB("admin");
-
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+"use strict";
+load("jstests/libs/feature_compatibility_version.js");
+load('jstests/sharding/libs/sharded_transactions_helpers.js');
+
+const dbName = "test";
+const collName = "config_transactions_set_fcv";
+
+// Define autocommit as a variable so it can be used in object literals w/o an explicit value.
+const autocommit = false;
+
+// Start a replica set with an odd number of members to verify nodes outside the majority behave
+// correctly around setFeatureCompatibilityVersion, which uses majority writes to update the FCV
+// document. The primary isn't expected to change, so each secondary is given priority 0.
+const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
+
+let testDB = rst.getPrimary().getDB(dbName);
+let adminDB = rst.getPrimary().getDB("admin");
+
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+// Starts a dummy transaction, commits or aborts it with or without prepare, then returns the
+// commit or abort response. Returns the response from prepare if it fails.
+function runTxn({lsid, txnNumber}, {commit, prepare, leaveOpen}) {
+ const startTransactionRes = testDB.runCommand({
+ insert: collName,
+ documents: [{x: "dummy_txn"}],
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ lsid,
+ autocommit,
+ });
+ if (!startTransactionRes.ok || leaveOpen) {
+ return startTransactionRes;
+ }
- // Starts a dummy transaction, commits or aborts it with or without prepare, then returns the
- // commit or abort response. Returns the response from prepare if it fails.
- function runTxn({lsid, txnNumber}, {commit, prepare, leaveOpen}) {
- const startTransactionRes = testDB.runCommand({
- insert: collName,
- documents: [{x: "dummy_txn"}],
+ if (prepare) {
+ const prepareRes = testDB.adminCommand({
+ prepareTransaction: 1,
txnNumber: NumberLong(txnNumber),
- startTransaction: true, lsid, autocommit,
+ lsid,
+ autocommit,
+ writeConcern: {w: "majority"}
});
- if (!startTransactionRes.ok || leaveOpen) {
- return startTransactionRes;
- }
-
- if (prepare) {
- const prepareRes = testDB.adminCommand({
- prepareTransaction: 1,
- txnNumber: NumberLong(txnNumber), lsid, autocommit,
- writeConcern: {w: "majority"}
- });
- if (!prepareRes.ok) {
- return prepareRes;
- }
-
- if (commit) {
- // Add 1 to the increment so that the commitTimestamp is after the prepareTimestamp.
- const commitTimestamp = Timestamp(prepareRes.prepareTimestamp.getTime(),
- prepareRes.prepareTimestamp.getInc() + 1);
- return testDB.adminCommand({
- commitTransaction: 1,
- commitTimestamp,
- txnNumber: NumberLong(txnNumber), lsid, autocommit
- });
- } else {
- return testDB.adminCommand(
- {abortTransaction: 1, txnNumber: NumberLong(txnNumber), lsid, autocommit});
- }
+ if (!prepareRes.ok) {
+ return prepareRes;
}
if (commit) {
- return testDB.adminCommand(
- {commitTransaction: 1, txnNumber: NumberLong(txnNumber), lsid, autocommit});
+ // Add 1 to the increment so that the commitTimestamp is after the prepareTimestamp.
+ const commitTimestamp = Timestamp(prepareRes.prepareTimestamp.getTime(),
+ prepareRes.prepareTimestamp.getInc() + 1);
+ return testDB.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp,
+ txnNumber: NumberLong(txnNumber),
+ lsid,
+ autocommit
+ });
} else {
return testDB.adminCommand(
{abortTransaction: 1, txnNumber: NumberLong(txnNumber), lsid, autocommit});
}
}
- // Retries commitTransaction for the given txnId, returning the response.
- function retryCommit({lsid, txnNumber}) {
+ if (commit) {
return testDB.adminCommand(
{commitTransaction: 1, txnNumber: NumberLong(txnNumber), lsid, autocommit});
+ } else {
+ return testDB.adminCommand(
+ {abortTransaction: 1, txnNumber: NumberLong(txnNumber), lsid, autocommit});
}
-
- // Asserts aborting the given txnId returns NoSuchTransaction.
- function assertTransactionAborted({lsid, txnNumber}) {
- assert.commandFailedWithCode(
- testDB.adminCommand(
- {abortTransaction: 1, txnNumber: NumberLong(txnNumber), lsid, autocommit}),
- ErrorCodes.NoSuchTransaction);
- }
-
- // Global counter for the number of retryable writes completed. Used to verify retried retryable
- // writes aren't double applied.
- let numRetryableWrites = 0;
-
- // Runs a dummy retryable write and increments the retryable write counter.
- function assertRetryableWriteWorked({lsid, txnNumber}) {
- numRetryableWrites += 1;
- assert.commandWorked(testDB.runCommand({
- insert: collName,
- documents: [{fromRetryableWrite: true}],
- txnNumber: NumberLong(txnNumber), lsid
- }));
- }
-
- // Verifies a txnId has already been used for a retryable write by running a dummy retryable
- // write and asserting the write isn't applied.
- function assertRetryableWriteCanBeRetried({lsid, txnNumber}) {
- assert.commandWorked(testDB.runCommand({
- insert: collName,
- documents: [{fromRetryableWrite: true}],
- txnNumber: NumberLong(txnNumber), lsid
- }));
- assert.eq(numRetryableWrites, testDB[collName].find({fromRetryableWrite: true}).itcount());
- }
-
- // Searches config.transactions for an entry for the given txnId on each node in the replica
- // set, verifying the entry does / does not exist and has the expected state, if specified.
- function checkConfigTransactionEntry(rst, {lsid, txnNumber}, {hasEntry, expectedState}) {
- rst.awaitReplication();
- rst.nodes.forEach((node) => {
- // Search for id since we don't know the uid, which is generated by the server.
- const entry = node.getDB("config").transactions.findOne({"_id.id": lsid.id});
-
- if (!hasEntry) {
- // There should be no entry for this session or it should be for an earlier
- // operation.
- if (entry) {
- assert.gt(txnNumber,
- entry.txnNum,
- "expected entry to have lower txnNumber, entry: " + tojson(entry) +
- ", node: " + tojson(node));
- } else {
- assert.isnull(entry,
- "expected entry to be null, entry: " + tojson(entry) +
- ", node: " + tojson(node));
- }
- return;
- }
-
- assert.eq(txnNumber,
- entry.txnNum,
- "expected entry to have the same txnNumber, entry: " + tojson(entry) +
- ", node: " + tojson(node));
-
- if (expectedState) {
- assert.eq(expectedState,
- entry.state,
- "entry: " + tojson(entry) + ", node: " + tojson(node));
+}
+
+// Retries commitTransaction for the given txnId, returning the response.
+function retryCommit({lsid, txnNumber}) {
+ return testDB.adminCommand(
+ {commitTransaction: 1, txnNumber: NumberLong(txnNumber), lsid, autocommit});
+}
+
+// Asserts aborting the given txnId returns NoSuchTransaction.
+function assertTransactionAborted({lsid, txnNumber}) {
+ assert.commandFailedWithCode(
+ testDB.adminCommand(
+ {abortTransaction: 1, txnNumber: NumberLong(txnNumber), lsid, autocommit}),
+ ErrorCodes.NoSuchTransaction);
+}
+
+// Global counter for the number of retryable writes completed. Used to verify retried retryable
+// writes aren't double applied.
+let numRetryableWrites = 0;
+
+// Runs a dummy retryable write and increments the retryable write counter.
+function assertRetryableWriteWorked({lsid, txnNumber}) {
+ numRetryableWrites += 1;
+ assert.commandWorked(testDB.runCommand({
+ insert: collName,
+ documents: [{fromRetryableWrite: true}],
+ txnNumber: NumberLong(txnNumber),
+ lsid
+ }));
+}
+
+// Verifies a txnId has already been used for a retryable write by running a dummy retryable
+// write and asserting the write isn't applied.
+function assertRetryableWriteCanBeRetried({lsid, txnNumber}) {
+ assert.commandWorked(testDB.runCommand({
+ insert: collName,
+ documents: [{fromRetryableWrite: true}],
+ txnNumber: NumberLong(txnNumber),
+ lsid
+ }));
+ assert.eq(numRetryableWrites, testDB[collName].find({fromRetryableWrite: true}).itcount());
+}
+
+// Searches config.transactions for an entry for the given txnId on each node in the replica
+// set, verifying the entry does / does not exist and has the expected state, if specified.
+function checkConfigTransactionEntry(rst, {lsid, txnNumber}, {hasEntry, expectedState}) {
+ rst.awaitReplication();
+ rst.nodes.forEach((node) => {
+ // Search for id since we don't know the uid, which is generated by the server.
+ const entry = node.getDB("config").transactions.findOne({"_id.id": lsid.id});
+
+ if (!hasEntry) {
+ // There should be no entry for this session or it should be for an earlier
+ // operation.
+ if (entry) {
+ assert.gt(txnNumber,
+ entry.txnNum,
+ "expected entry to have lower txnNumber, entry: " + tojson(entry) +
+ ", node: " + tojson(node));
} else {
- assert(!entry.hasOwnProperty("state"),
- "expected entry to not have state, entry: " + tojson(entry) + ", node: " +
- tojson(node));
- }
- });
- }
-
- function runTest({shouldRestart}) {
- // The test waits for failpoints to log a message when hit, so clear the program output
- // before starting so messages from previous iterations aren't in it.
- clearRawMongoProgramOutput();
-
- const txnIds = {
- write: {lsid: {id: UUID()}, txnNumber: 0}, // Retryable write.
- commit: {lsid: {id: UUID()}, txnNumber: 0}, // Committed transaction w/o prepare.
- commitPrepare: {lsid: {id: UUID()}, txnNumber: 0}, // Committed transaction w/ prepare.
- abort: {lsid: {id: UUID()}, txnNumber: 0}, // Aborted transaction w/o prepare.
- abortPrepare: {lsid: {id: UUID()}, txnNumber: 0}, // Aborted transaction after prepare.
- concurrentTxn: {lsid: {id: UUID()}, txnNumber: 0}, // Transaction concurrent w/ setFCV.
- concurrentWrite:
- {lsid: {id: UUID()}, txnNumber: 0}, // Retryable write concurrent w/ setFCV.
- upgradingTxn:
- {lsid: {id: UUID()}, txnNumber: 0}, // Transaction started during FCV upgrade.
- };
-
- //
- // In the latest FCV, verify the expected updates are made to config.transactions for each
- // case and the successful operations are retryable.
- //
- checkFCV(adminDB, latestFCV);
-
- assertRetryableWriteWorked(txnIds.write);
- assert.commandWorked(runTxn(txnIds.commit, {commit: true, prepare: false}));
- assert.commandWorked(runTxn(txnIds.commitPrepare, {commit: true, prepare: true}));
- assert.commandWorked(runTxn(txnIds.abort, {commit: false, prepare: false}));
- assert.commandWorked(runTxn(txnIds.abortPrepare, {commit: false, prepare: true}));
-
- checkConfigTransactionEntry(rst, txnIds.write, {hasEntry: true});
- checkConfigTransactionEntry(
- rst, txnIds.commit, {hasEntry: true, expectedState: "committed"});
- checkConfigTransactionEntry(
- rst, txnIds.commitPrepare, {hasEntry: true, expectedState: "committed"});
- checkConfigTransactionEntry(rst, txnIds.abort, {hasEntry: false});
- checkConfigTransactionEntry(
- rst, txnIds.abortPrepare, {hasEntry: true, expectedState: "aborted"});
-
- // The retryable write and the commit of both committed transactions should be retryable.
- // The aborted transactions should still be aborted.
- assertRetryableWriteCanBeRetried(txnIds.write);
- assert.commandWorked(retryCommit(txnIds.commit));
- assert.commandWorked(retryCommit(txnIds.commitPrepare));
- assertTransactionAborted(txnIds.abort);
- assertTransactionAborted(txnIds.abortPrepare);
-
- //
- // Downgrade to the last-stable FCV and verify config.transactions was updated as expected
- // for previously completed operations and operations concurrent with the downgrade.
- //
-
- if (shouldRestart) {
- // Restart to verify config.transactions entries for sessions not in-memory at the
- // beginning of FCV downgrade are updated correctly.
- jsTestLog("Restarting replica set before downgrading the featureCompatibilityVersion.");
- for (let i = 0; i < rst.nodes.length; i++) {
- rst.restart(i);
+ assert.isnull(entry,
+ "expected entry to be null, entry: " + tojson(entry) +
+ ", node: " + tojson(node));
}
- testDB = rst.getPrimary().getDB(dbName);
- adminDB = rst.getPrimary().getDB("admin");
+ return;
}
- // Make setFCV pause in the downgrading state after getting the list of sessions to
- // potentially modify.
- assert.commandWorked(rst.getPrimary().adminCommand(
- {configureFailPoint: "pauseBeforeDowngradingSessions", mode: "alwaysOn"}));
-
- // Downgrade FCV in a parallel shell and wait until it blocks at the failpoint above.
- const awaitDowngradeFCV = startParallelShell(() => {
- load("jstests/libs/feature_compatibility_version.js");
- jsTestLog("Downgrade the featureCompatibilityVersion in a parallel shell.");
- assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- }, rst.getPrimary().port);
- waitForFailpoint("Hit pauseBeforeDowngradingSessions failpoint", 1 /*numTimes*/);
-
- // Concurrent transactions that use prepare will fail.
- assert.commandFailedWithCode(runTxn(txnIds.concurrentTxn, {commit: true, prepare: true}),
- ErrorCodes.CommandNotSupported);
- txnIds.concurrentTxn.txnNumber += 1;
-
- // Concurrent transactions that do not use prepare and retryable writes succeed.
- assert.commandWorked(runTxn(txnIds.concurrentTxn, {commit: true, prepare: false}));
- assertRetryableWriteWorked(txnIds.concurrentWrite);
-
- // Unset the failpoint and wait for the downgrade to finish.
- assert.commandWorked(rst.getPrimary().adminCommand(
- {configureFailPoint: "pauseBeforeDowngradingSessions", mode: "off"}));
-
- awaitDowngradeFCV();
- checkFCV(adminDB, lastStableFCV);
-
- // The successful concurrent operations should have entries without state and be retryable.
- checkConfigTransactionEntry(rst, txnIds.concurrentTxn, {hasEntry: true});
- assert.commandWorked(retryCommit(txnIds.concurrentTxn));
- checkConfigTransactionEntry(rst, txnIds.concurrentWrite, {hasEntry: true});
- assertRetryableWriteCanBeRetried(txnIds.concurrentWrite);
-
- // Only the retryable write entry should remain.
- checkConfigTransactionEntry(rst, txnIds.write, {hasEntry: true});
- checkConfigTransactionEntry(rst, txnIds.commit, {hasEntry: false});
- checkConfigTransactionEntry(rst, txnIds.commitPrepare, {hasEntry: false});
- checkConfigTransactionEntry(rst, txnIds.abort, {hasEntry: false});
- checkConfigTransactionEntry(rst, txnIds.abortPrepare, {hasEntry: false});
-
- // The retryable write can be retried.
- assertRetryableWriteCanBeRetried(txnIds.write);
-
- // Neither of the commits can be retried.
- assert.commandFailedWithCode(retryCommit(txnIds.commit), ErrorCodes.NoSuchTransaction);
- assert.commandFailedWithCode(retryCommit(txnIds.commitPrepare),
- ErrorCodes.NoSuchTransaction);
-
- //
- // In the last-stable FCV, verify the expected updates are made to config.transactions for
- // each case and the successful operations are retryable.
- //
-
- // Reset each txnId to test upgrade with a clean slate.
- Object.keys(txnIds).forEach((txnIdKey) => {
- txnIds[txnIdKey].lsid = {id: UUID()};
- txnIds[txnIdKey].txnNumber = 0;
- });
+ assert.eq(txnNumber,
+ entry.txnNum,
+ "expected entry to have the same txnNumber, entry: " + tojson(entry) +
+ ", node: " + tojson(node));
- // Prepare can't be used in FCV 4.0, so only commit, abort, and retryable write should
- // succeed.
- assertRetryableWriteWorked(txnIds.write);
- assert.commandWorked(runTxn(txnIds.commit, {commit: true, prepare: false}));
- assert.commandFailedWithCode(runTxn(txnIds.commitPrepare, {commit: true, prepare: true}),
- ErrorCodes.CommandNotSupported);
- assert.commandWorked(runTxn(txnIds.abort, {commit: false, prepare: false}));
- assert.commandFailedWithCode(runTxn(txnIds.abortPrepare, {commit: false, prepare: true}),
- ErrorCodes.CommandNotSupported);
-
- // Only the retryable write and transaction that committed without prepare should have an
- // entry. Neither should have state.
- checkConfigTransactionEntry(rst, txnIds.write, {hasEntry: true});
- checkConfigTransactionEntry(rst, txnIds.commit, {hasEntry: true});
- checkConfigTransactionEntry(rst, txnIds.commitPrepare, {hasEntry: false});
- checkConfigTransactionEntry(rst, txnIds.abort, {hasEntry: false});
- checkConfigTransactionEntry(rst, txnIds.abortPrepare, {hasEntry: false});
-
- // The retryable write and successful commit can be retried.
- assertRetryableWriteCanBeRetried(txnIds.write);
- assert.commandWorked(retryCommit(txnIds.commit));
-
- if (shouldRestart) {
- // Restart to verify config.transactions entries for sessions not in-memory at the
- // beginning of FCV upgrade are updated correctly.
- jsTestLog("Restarting replica set before upgrading the featureCompatibilityVersion.");
- for (let i = 0; i < rst.nodes.length; i++) {
- rst.restart(i);
- }
- testDB = rst.getPrimary().getDB(dbName);
- adminDB = rst.getPrimary().getDB("admin");
+ if (expectedState) {
+ assert.eq(
+ expectedState, entry.state, "entry: " + tojson(entry) + ", node: " + tojson(node));
+ } else {
+ assert(!entry.hasOwnProperty("state"),
+ "expected entry to not have state, entry: " + tojson(entry) +
+ ", node: " + tojson(node));
}
+ });
+}
- //
- // Upgrade to the latest FCV and verify config.transactions was updated as expected for
- // previously completed operations and operations concurrent with the upgrade.
- //
-
- // Run a retryable write on the session that will be used during upgrade so it has a
- // transaction table entry and will be checked out by the upgrade.
- assertRetryableWriteWorked(txnIds.upgradingTxn);
- txnIds.upgradingTxn.txnNumber += 1;
-
- // Make setFCV pause in the upgrading state after getting the list of sessions to
- // potentially modify.
- assert.commandWorked(rst.getPrimary().adminCommand(
- {configureFailPoint: "pauseBeforeUpgradingSessions", mode: "alwaysOn"}));
-
- // Upgrade FCV in a parallel shell and wait until it blocks at the failpoint above.
- const awaitUpgradeFCV = startParallelShell(() => {
- load("jstests/libs/feature_compatibility_version.js");
- jsTestLog("Upgrade the featureCompatibilityVersion in a parallel shell.");
- assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- }, rst.getPrimary().port);
- waitForFailpoint("Hit pauseBeforeUpgradingSessions failpoint", 1 /*numTimes*/);
-
- // Concurrent transactions that use prepare will fail.
- assert.commandFailedWithCode(runTxn(txnIds.concurrentTxn, {commit: true, prepare: true}),
- ErrorCodes.CommandNotSupported);
- txnIds.concurrentTxn.txnNumber += 1;
-
- // Concurrent transactions that do not use prepare and retryable writes succeed.
- assert.commandWorked(runTxn(txnIds.concurrentTxn, {commit: true, prepare: false}));
- assertRetryableWriteWorked(txnIds.concurrentWrite);
-
- // Start a transaction in the upgrading state and verify that it doesn't get aborted by the
- // rest of the upgrade. Note that all sessions are killed and their transactions aborted for
- // writes to the FCV document except when it is set to the fully upgraded state, so this
- // can't be tested for downgrade.
- assert.commandWorked(runTxn(txnIds.upgradingTxn, {leaveOpen: true}));
-
- // Unset the failpoint and wait for the upgrade to finish.
- assert.commandWorked(rst.getPrimary().adminCommand(
- {configureFailPoint: "pauseBeforeUpgradingSessions", mode: "off"}));
-
- awaitUpgradeFCV();
- checkFCV(adminDB, latestFCV);
-
- // The transaction started while upgrading shouldn't have been killed and can be committed.
- assert.commandWorked(testDB.adminCommand({
- commitTransaction: 1,
- lsid: txnIds.upgradingTxn.lsid,
- txnNumber: NumberLong(txnIds.upgradingTxn.txnNumber), autocommit
- }));
-
- // The successful concurrent transaction should have "committed" state and be retryable, and
- // the concurrent retryable write should not have state and also be retryable.
- checkConfigTransactionEntry(
- rst, txnIds.concurrentTxn, {hasEntry: true, expectedState: "committed"});
- assert.commandWorked(retryCommit(txnIds.concurrentTxn));
- checkConfigTransactionEntry(rst, txnIds.concurrentWrite, {hasEntry: true});
- assertRetryableWriteCanBeRetried(txnIds.concurrentWrite);
-
- // There should still only be entries for the committed transaction and retryable write. The
- // committed transaction should now have a "state" field.
- checkConfigTransactionEntry(rst, txnIds.write, {hasEntry: true});
- checkConfigTransactionEntry(
- rst, txnIds.commit, {hasEntry: true, expectedState: "committed"});
- checkConfigTransactionEntry(rst, txnIds.commitPrepare, {hasEntry: false});
- checkConfigTransactionEntry(rst, txnIds.abort, {hasEntry: false});
- checkConfigTransactionEntry(rst, txnIds.abortPrepare, {hasEntry: false});
-
- // The retryable write and successful commit can be retried.
- assertRetryableWriteCanBeRetried(txnIds.write);
- assert.commandWorked(retryCommit(txnIds.commit));
- }
+function runTest({shouldRestart}) {
+ // The test waits for failpoints to log a message when hit, so clear the program output
+ // before starting so messages from previous iterations aren't in it.
+ clearRawMongoProgramOutput();
- runTest({shouldRestart: false});
- runTest({shouldRestart: true});
+ const txnIds = {
+ write: {lsid: {id: UUID()}, txnNumber: 0}, // Retryable write.
+ commit: {lsid: {id: UUID()}, txnNumber: 0}, // Committed transaction w/o prepare.
+ commitPrepare: {lsid: {id: UUID()}, txnNumber: 0}, // Committed transaction w/ prepare.
+ abort: {lsid: {id: UUID()}, txnNumber: 0}, // Aborted transaction w/o prepare.
+ abortPrepare: {lsid: {id: UUID()}, txnNumber: 0}, // Aborted transaction after prepare.
+ concurrentTxn: {lsid: {id: UUID()}, txnNumber: 0}, // Transaction concurrent w/ setFCV.
+ concurrentWrite:
+ {lsid: {id: UUID()}, txnNumber: 0}, // Retryable write concurrent w/ setFCV.
+ upgradingTxn:
+ {lsid: {id: UUID()}, txnNumber: 0}, // Transaction started during FCV upgrade.
+ };
//
- // Verify setFCV is interruptible between modifying sessions.
+ // In the latest FCV, verify the expected updates are made to config.transactions for each
+ // case and the successful operations are retryable.
//
- clearRawMongoProgramOutput();
checkFCV(adminDB, latestFCV);
- // Construct a config.transactions entry that would be modified by downgrade.
- const txnIds = {interrupt: {lsid: {id: UUID()}, txnNumber: 0}};
- assert.commandWorked(runTxn(txnIds.interrupt, {commit: true, prepare: true}));
+ assertRetryableWriteWorked(txnIds.write);
+ assert.commandWorked(runTxn(txnIds.commit, {commit: true, prepare: false}));
+ assert.commandWorked(runTxn(txnIds.commitPrepare, {commit: true, prepare: true}));
+ assert.commandWorked(runTxn(txnIds.abort, {commit: false, prepare: false}));
+ assert.commandWorked(runTxn(txnIds.abortPrepare, {commit: false, prepare: true}));
+
+ checkConfigTransactionEntry(rst, txnIds.write, {hasEntry: true});
+ checkConfigTransactionEntry(rst, txnIds.commit, {hasEntry: true, expectedState: "committed"});
checkConfigTransactionEntry(
- rst, txnIds.interrupt, {hasEntry: true, expectedState: "committed"});
+ rst, txnIds.commitPrepare, {hasEntry: true, expectedState: "committed"});
+ checkConfigTransactionEntry(rst, txnIds.abort, {hasEntry: false});
+ checkConfigTransactionEntry(
+ rst, txnIds.abortPrepare, {hasEntry: true, expectedState: "aborted"});
+
+ // The retryable write and the commit of both committed transactions should be retryable.
+ // The aborted transactions should still be aborted.
+ assertRetryableWriteCanBeRetried(txnIds.write);
+ assert.commandWorked(retryCommit(txnIds.commit));
+ assert.commandWorked(retryCommit(txnIds.commitPrepare));
+ assertTransactionAborted(txnIds.abort);
+ assertTransactionAborted(txnIds.abortPrepare);
- // Pause setFCV before it would modify the entry.
+ //
+ // Downgrade to the last-stable FCV and verify config.transactions was updated as expected
+ // for previously completed operations and operations concurrent with the downgrade.
+ //
+
+ if (shouldRestart) {
+ // Restart to verify config.transactions entries for sessions not in-memory at the
+ // beginning of FCV downgrade are updated correctly.
+ jsTestLog("Restarting replica set before downgrading the featureCompatibilityVersion.");
+ for (let i = 0; i < rst.nodes.length; i++) {
+ rst.restart(i);
+ }
+ testDB = rst.getPrimary().getDB(dbName);
+ adminDB = rst.getPrimary().getDB("admin");
+ }
+
+ // Make setFCV pause in the downgrading state after getting the list of sessions to
+ // potentially modify.
assert.commandWorked(rst.getPrimary().adminCommand(
{configureFailPoint: "pauseBeforeDowngradingSessions", mode: "alwaysOn"}));
- TestData.setFCVLsid = {id: UUID()};
- const awaitUpgradeFCV = startParallelShell(() => {
+ // Downgrade FCV in a parallel shell and wait until it blocks at the failpoint above.
+ const awaitDowngradeFCV = startParallelShell(() => {
load("jstests/libs/feature_compatibility_version.js");
- assert.commandFailedWithCode(
- db.adminCommand(
- {setFeatureCompatibilityVersion: lastStableFCV, lsid: TestData.setFCVLsid}),
- ErrorCodes.Interrupted);
+ jsTestLog("Downgrade the featureCompatibilityVersion in a parallel shell.");
+ assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
}, rst.getPrimary().port);
waitForFailpoint("Hit pauseBeforeDowngradingSessions failpoint", 1 /*numTimes*/);
- // Kill the session running setFCV.
- assert.commandWorked(rst.getPrimary().adminCommand({killSessions: [TestData.setFCVLsid]}));
+ // Concurrent transactions that use prepare will fail.
+ assert.commandFailedWithCode(runTxn(txnIds.concurrentTxn, {commit: true, prepare: true}),
+ ErrorCodes.CommandNotSupported);
+ txnIds.concurrentTxn.txnNumber += 1;
+
+ // Concurrent transactions that do not use prepare and retryable writes succeed.
+ assert.commandWorked(runTxn(txnIds.concurrentTxn, {commit: true, prepare: false}));
+ assertRetryableWriteWorked(txnIds.concurrentWrite);
- // Unpause the failpoint and verify setFCV returns without modifying config.transactions.
+ // Unset the failpoint and wait for the downgrade to finish.
assert.commandWorked(rst.getPrimary().adminCommand(
{configureFailPoint: "pauseBeforeDowngradingSessions", mode: "off"}));
+ awaitDowngradeFCV();
+ checkFCV(adminDB, lastStableFCV);
+
+ // The successful concurrent operations should have entries without state and be retryable.
+ checkConfigTransactionEntry(rst, txnIds.concurrentTxn, {hasEntry: true});
+ assert.commandWorked(retryCommit(txnIds.concurrentTxn));
+ checkConfigTransactionEntry(rst, txnIds.concurrentWrite, {hasEntry: true});
+ assertRetryableWriteCanBeRetried(txnIds.concurrentWrite);
+
+ // Only the retryable write entry should remain.
+ checkConfigTransactionEntry(rst, txnIds.write, {hasEntry: true});
+ checkConfigTransactionEntry(rst, txnIds.commit, {hasEntry: false});
+ checkConfigTransactionEntry(rst, txnIds.commitPrepare, {hasEntry: false});
+ checkConfigTransactionEntry(rst, txnIds.abort, {hasEntry: false});
+ checkConfigTransactionEntry(rst, txnIds.abortPrepare, {hasEntry: false});
+
+ // The retryable write can be retried.
+ assertRetryableWriteCanBeRetried(txnIds.write);
+
+ // Neither of the commits can be retried.
+ assert.commandFailedWithCode(retryCommit(txnIds.commit), ErrorCodes.NoSuchTransaction);
+ assert.commandFailedWithCode(retryCommit(txnIds.commitPrepare), ErrorCodes.NoSuchTransaction);
+
+ //
+ // In the last-stable FCV, verify the expected updates are made to config.transactions for
+ // each case and the successful operations are retryable.
+ //
+
+ // Reset each txnId to test upgrade with a clean slate.
+ Object.keys(txnIds).forEach((txnIdKey) => {
+ txnIds[txnIdKey].lsid = {id: UUID()};
+ txnIds[txnIdKey].txnNumber = 0;
+ });
+
+ // Prepare can't be used in FCV 4.0, so only commit, abort, and retryable write should
+ // succeed.
+ assertRetryableWriteWorked(txnIds.write);
+ assert.commandWorked(runTxn(txnIds.commit, {commit: true, prepare: false}));
+ assert.commandFailedWithCode(runTxn(txnIds.commitPrepare, {commit: true, prepare: true}),
+ ErrorCodes.CommandNotSupported);
+ assert.commandWorked(runTxn(txnIds.abort, {commit: false, prepare: false}));
+ assert.commandFailedWithCode(runTxn(txnIds.abortPrepare, {commit: false, prepare: true}),
+ ErrorCodes.CommandNotSupported);
+
+ // Only the retryable write and transaction that committed without prepare should have an
+ // entry. Neither should have state.
+ checkConfigTransactionEntry(rst, txnIds.write, {hasEntry: true});
+ checkConfigTransactionEntry(rst, txnIds.commit, {hasEntry: true});
+ checkConfigTransactionEntry(rst, txnIds.commitPrepare, {hasEntry: false});
+ checkConfigTransactionEntry(rst, txnIds.abort, {hasEntry: false});
+ checkConfigTransactionEntry(rst, txnIds.abortPrepare, {hasEntry: false});
+
+ // The retryable write and successful commit can be retried.
+ assertRetryableWriteCanBeRetried(txnIds.write);
+ assert.commandWorked(retryCommit(txnIds.commit));
+
+ if (shouldRestart) {
+ // Restart to verify config.transactions entries for sessions not in-memory at the
+ // beginning of FCV upgrade are updated correctly.
+ jsTestLog("Restarting replica set before upgrading the featureCompatibilityVersion.");
+ for (let i = 0; i < rst.nodes.length; i++) {
+ rst.restart(i);
+ }
+ testDB = rst.getPrimary().getDB(dbName);
+ adminDB = rst.getPrimary().getDB("admin");
+ }
+
+ //
+ // Upgrade to the latest FCV and verify config.transactions was updated as expected for
+ // previously completed operations and operations concurrent with the upgrade.
+ //
+
+ // Run a retryable write on the session that will be used during upgrade so it has a
+ // transaction table entry and will be checked out by the upgrade.
+ assertRetryableWriteWorked(txnIds.upgradingTxn);
+ txnIds.upgradingTxn.txnNumber += 1;
+
+ // Make setFCV pause in the upgrading state after getting the list of sessions to
+ // potentially modify.
+ assert.commandWorked(rst.getPrimary().adminCommand(
+ {configureFailPoint: "pauseBeforeUpgradingSessions", mode: "alwaysOn"}));
+
+ // Upgrade FCV in a parallel shell and wait until it blocks at the failpoint above.
+ const awaitUpgradeFCV = startParallelShell(() => {
+ load("jstests/libs/feature_compatibility_version.js");
+ jsTestLog("Upgrade the featureCompatibilityVersion in a parallel shell.");
+ assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+ }, rst.getPrimary().port);
+ waitForFailpoint("Hit pauseBeforeUpgradingSessions failpoint", 1 /*numTimes*/);
+
+ // Concurrent transactions that use prepare will fail.
+ assert.commandFailedWithCode(runTxn(txnIds.concurrentTxn, {commit: true, prepare: true}),
+ ErrorCodes.CommandNotSupported);
+ txnIds.concurrentTxn.txnNumber += 1;
+
+ // Concurrent transactions that do not use prepare and retryable writes succeed.
+ assert.commandWorked(runTxn(txnIds.concurrentTxn, {commit: true, prepare: false}));
+ assertRetryableWriteWorked(txnIds.concurrentWrite);
+
+ // Start a transaction in the upgrading state and verify that it doesn't get aborted by the
+ // rest of the upgrade. Note that all sessions are killed and their transactions aborted for
+ // writes to the FCV document except when it is set to the fully upgraded state, so this
+ // can't be tested for downgrade.
+ assert.commandWorked(runTxn(txnIds.upgradingTxn, {leaveOpen: true}));
+
+ // Unset the failpoint and wait for the upgrade to finish.
+ assert.commandWorked(rst.getPrimary().adminCommand(
+ {configureFailPoint: "pauseBeforeUpgradingSessions", mode: "off"}));
+
awaitUpgradeFCV();
+ checkFCV(adminDB, latestFCV);
+
+ // The transaction started while upgrading shouldn't have been killed and can be committed.
+ assert.commandWorked(testDB.adminCommand({
+ commitTransaction: 1,
+ lsid: txnIds.upgradingTxn.lsid,
+ txnNumber: NumberLong(txnIds.upgradingTxn.txnNumber),
+ autocommit
+ }));
+
+ // The successful concurrent transaction should have "committed" state and be retryable, and
+ // the concurrent retryable write should not have state and also be retryable.
checkConfigTransactionEntry(
- rst, txnIds.interrupt, {hasEntry: true, expectedState: "committed"});
+ rst, txnIds.concurrentTxn, {hasEntry: true, expectedState: "committed"});
+ assert.commandWorked(retryCommit(txnIds.concurrentTxn));
+ checkConfigTransactionEntry(rst, txnIds.concurrentWrite, {hasEntry: true});
+ assertRetryableWriteCanBeRetried(txnIds.concurrentWrite);
+
+ // There should still only be entries for the committed transaction and retryable write. The
+ // committed transaction should now have a "state" field.
+ checkConfigTransactionEntry(rst, txnIds.write, {hasEntry: true});
+ checkConfigTransactionEntry(rst, txnIds.commit, {hasEntry: true, expectedState: "committed"});
+ checkConfigTransactionEntry(rst, txnIds.commitPrepare, {hasEntry: false});
+ checkConfigTransactionEntry(rst, txnIds.abort, {hasEntry: false});
+ checkConfigTransactionEntry(rst, txnIds.abortPrepare, {hasEntry: false});
+
+ // The retryable write and successful commit can be retried.
+ assertRetryableWriteCanBeRetried(txnIds.write);
+ assert.commandWorked(retryCommit(txnIds.commit));
+}
+
+runTest({shouldRestart: false});
+runTest({shouldRestart: true});
+
+//
+// Verify setFCV is interruptible between modifying sessions.
+//
+clearRawMongoProgramOutput();
+checkFCV(adminDB, latestFCV);
+
+// Construct a config.transactions entry that would be modified by downgrade.
+const txnIds = {
+ interrupt: {lsid: {id: UUID()}, txnNumber: 0}
+};
+assert.commandWorked(runTxn(txnIds.interrupt, {commit: true, prepare: true}));
+checkConfigTransactionEntry(rst, txnIds.interrupt, {hasEntry: true, expectedState: "committed"});
+
+// Pause setFCV before it would modify the entry.
+assert.commandWorked(rst.getPrimary().adminCommand(
+ {configureFailPoint: "pauseBeforeDowngradingSessions", mode: "alwaysOn"}));
+
+TestData.setFCVLsid = {
+ id: UUID()
+};
+const awaitUpgradeFCV = startParallelShell(() => {
+ load("jstests/libs/feature_compatibility_version.js");
+ assert.commandFailedWithCode(
+ db.adminCommand({setFeatureCompatibilityVersion: lastStableFCV, lsid: TestData.setFCVLsid}),
+ ErrorCodes.Interrupted);
+}, rst.getPrimary().port);
+waitForFailpoint("Hit pauseBeforeDowngradingSessions failpoint", 1 /*numTimes*/);
+
+// Kill the session running setFCV.
+assert.commandWorked(rst.getPrimary().adminCommand({killSessions: [TestData.setFCVLsid]}));
+
+// Unpause the failpoint and verify setFCV returns without modifying config.transactions.
+assert.commandWorked(rst.getPrimary().adminCommand(
+ {configureFailPoint: "pauseBeforeDowngradingSessions", mode: "off"}));
+
+awaitUpgradeFCV();
+checkConfigTransactionEntry(rst, txnIds.interrupt, {hasEntry: true, expectedState: "committed"});
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/multiVersion/copydb_helper.js b/jstests/multiVersion/copydb_helper.js
index aff7dd83fa4..25c60f2552a 100644
--- a/jstests/multiVersion/copydb_helper.js
+++ b/jstests/multiVersion/copydb_helper.js
@@ -1,53 +1,50 @@
// SERVER-36438 Ensure the 4.2 copyDatabase() shell helper still successfully executes the copyDB
// command on a 4.0 server, now that the copyDB command has been removed as of 4.2.
(function() {
- "use strict";
- const oldVersion = "4.0";
-
- let runTest = function(useAuth) {
- let conn;
- if (useAuth) {
- conn = MongoRunner.runMongod({auth: "", binVersion: oldVersion});
- } else {
- conn = MongoRunner.runMongod({binVersion: oldVersion});
- }
-
- let fromDB = conn.getDB("copydb2-test-a");
- let toDB = conn.getDB("copydb2-test-b");
- let adminDB = conn.getDB("admin");
-
- if (useAuth) {
- adminDB.createUser({user: "root", pwd: "root", roles: ["root"]});
- adminDB.auth("root", "root");
- fromDB.createUser({
- user: "chevy",
- pwd: "chase",
- roles: ["read", {role: "readWrite", db: toDB._name}]
- });
- }
-
- assert.commandWorked(fromDB.foo.insert({a: 1}));
- assert.commandWorked(fromDB.foo.createIndex({a: 1}));
-
- if (useAuth) {
- assert.commandWorked(toDB.getSiblingDB("admin").logout());
- fromDB.auth("chevy", "chase");
- }
-
- assert.eq(1, fromDB.foo.count());
- assert.eq(0, toDB.foo.count());
-
- assert.commandWorked(fromDB.copyDatabase(fromDB._name, toDB._name));
- assert.eq(1, fromDB.foo.count());
- assert.eq(1, toDB.foo.count());
- assert.eq(fromDB.foo.getIndexes().length, toDB.foo.getIndexes().length);
- MongoRunner.stopMongod(conn);
- };
-
- runTest(/*useAuth*/ false);
-
- // Authenticating as multiple users on multiple databases results in an error.
- if (!jsTest.options().auth) {
- runTest(/*useAuth*/ true);
+"use strict";
+const oldVersion = "4.0";
+
+let runTest = function(useAuth) {
+ let conn;
+ if (useAuth) {
+ conn = MongoRunner.runMongod({auth: "", binVersion: oldVersion});
+ } else {
+ conn = MongoRunner.runMongod({binVersion: oldVersion});
}
+
+ let fromDB = conn.getDB("copydb2-test-a");
+ let toDB = conn.getDB("copydb2-test-b");
+ let adminDB = conn.getDB("admin");
+
+ if (useAuth) {
+ adminDB.createUser({user: "root", pwd: "root", roles: ["root"]});
+ adminDB.auth("root", "root");
+ fromDB.createUser(
+ {user: "chevy", pwd: "chase", roles: ["read", {role: "readWrite", db: toDB._name}]});
+ }
+
+ assert.commandWorked(fromDB.foo.insert({a: 1}));
+ assert.commandWorked(fromDB.foo.createIndex({a: 1}));
+
+ if (useAuth) {
+ assert.commandWorked(toDB.getSiblingDB("admin").logout());
+ fromDB.auth("chevy", "chase");
+ }
+
+ assert.eq(1, fromDB.foo.count());
+ assert.eq(0, toDB.foo.count());
+
+ assert.commandWorked(fromDB.copyDatabase(fromDB._name, toDB._name));
+ assert.eq(1, fromDB.foo.count());
+ assert.eq(1, toDB.foo.count());
+ assert.eq(fromDB.foo.getIndexes().length, toDB.foo.getIndexes().length);
+ MongoRunner.stopMongod(conn);
+};
+
+runTest(/*useAuth*/ false);
+
+// Authenticating as multiple users on multiple databases results in an error.
+if (!jsTest.options().auth) {
+ runTest(/*useAuth*/ true);
+}
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/collection_validator_feature_compatibility_version.js b/jstests/multiVersion/genericSetFCVUsage/collection_validator_feature_compatibility_version.js
index f98d7ec43fe..646619f5e42 100644
--- a/jstests/multiVersion/genericSetFCVUsage/collection_validator_feature_compatibility_version.js
+++ b/jstests/multiVersion/genericSetFCVUsage/collection_validator_feature_compatibility_version.js
@@ -7,206 +7,201 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/feature_compatibility_version.js");
-
- const testName = "collection_validator_feature_compatibility_version";
- const dbpath = MongoRunner.dataPath + testName;
-
- // The 'testCases' array should be populated with
- //
- // { validator: { ... }, nonMatchingDocument: { ... } }
- //
- // objects that use query features new in the latest version of mongod. Note that this also
- // includes new aggregation expressions able to be used with the $expr match expression. This
- // test ensures that a collection validator accepts the new query feature when the feature
- // compatibility version is the latest version, and rejects it when the feature compatibility
- // version is the last-stable version.
- const testCases = [];
-
- let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest"});
- assert.neq(null, conn, "mongod was unable to start up");
-
- let testDB = conn.getDB(testName);
-
- let adminDB = conn.getDB("admin");
-
- // Explicitly set the feature compatibility version to the latest version.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
-
- testCases.forEach(function(test, i) {
- // Create a collection with a validator using new query features.
- const coll = testDB["coll" + i];
- assert.commandWorked(
- testDB.createCollection(coll.getName(), {validator: test.validator}),
- `Expected to be able to create collection with validator ${tojson(test.validator)}`);
-
- // The validator should cause this insert to fail.
- assert.writeErrorWithCode(
- coll.insert(test.nonMatchingDocument),
- ErrorCodes.DocumentValidationFailure,
- `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
- `collection with validator ${tojson(test.validator)}`);
-
- // Set a validator using new query features on an existing collection.
- coll.drop();
- assert.commandWorked(testDB.createCollection(coll.getName()));
- assert.commandWorked(
- testDB.runCommand({collMod: coll.getName(), validator: test.validator}),
- `Expected to be able to modify collection validator to be ${tojson(test.validator)}`);
-
- // Another failing update.
- assert.writeErrorWithCode(
- coll.insert(test.nonMatchingDocument),
- ErrorCodes.DocumentValidationFailure,
- `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
- `collection with validator ${tojson(test.validator)}`);
- });
-
- // Set the feature compatibility version to the last-stable version.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-
- testCases.forEach(function(test, i) {
- // The validator is already in place, so it should still cause this insert to fail.
- const coll = testDB["coll" + i];
- assert.writeErrorWithCode(
- coll.insert(test.nonMatchingDocument),
- ErrorCodes.DocumentValidationFailure,
- `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
- `collection with validator ${tojson(test.validator)}`);
-
- // Trying to create a new collection with a validator using new query features should fail
- // while feature compatibility version is the last-stable version.
- let res = testDB.createCollection("other", {validator: test.validator});
- assert.commandFailedWithCode(
- res,
- ErrorCodes.QueryFeatureNotAllowed,
- 'Expected *not* to be able to create collection with validator ' +
- tojson(test.validator));
- assert(
- res.errmsg.match(/feature compatibility version/),
- `Expected error message from createCollection with validator ` +
- `${tojson(test.validator)} to reference 'feature compatibility version' but got: ` +
- res.errmsg);
-
- // Trying to update a collection with a validator using new query features should also fail.
- res = testDB.runCommand({collMod: coll.getName(), validator: test.validator});
- assert.commandFailedWithCode(
- res,
- ErrorCodes.QueryFeatureNotAllowed,
- `Expected to be able to create collection with validator ${tojson(test.validator)}`);
- assert(
- res.errmsg.match(/feature compatibility version/),
- `Expected error message from createCollection with validator ` +
- `${tojson(test.validator)} to reference 'feature compatibility version' but got: ` +
- res.errmsg);
- });
-
- MongoRunner.stopMongod(conn);
-
- if (testCases.length > 0) {
- // If we try to start up the last-stable version of mongod, it will fail, because it will
- // not be able to parse the validator using new query features.
- conn =
- MongoRunner.runMongod({dbpath: dbpath, binVersion: "last-stable", noCleanData: true});
- assert.eq(null,
- conn,
- `version ${MongoRunner.getBinVersionFor("last-stable")} of mongod started, even` +
- " with a validator using new query features in place.");
- }
-
- // Starting up the latest version of mongod, however, should succeed, even though the feature
- // compatibility version is still set to the last-stable version.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true});
- assert.neq(null, conn, "mongod was unable to start up");
-
- adminDB = conn.getDB("admin");
- testDB = conn.getDB(testName);
-
- // And the validator should still work.
- testCases.forEach(function(test, i) {
- const coll = testDB["coll" + i];
- assert.writeErrorWithCode(
- coll.insert(test.nonMatchingDocument),
- ErrorCodes.DocumentValidationFailure,
- `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
- `collection with validator ${tojson(test.validator)}`);
-
- // Remove the validator.
- assert.commandWorked(testDB.runCommand({collMod: coll.getName(), validator: {}}));
- });
-
- MongoRunner.stopMongod(conn);
-
- // Now, we should be able to start up the last-stable version of mongod.
+"use strict";
+
+load("jstests/libs/feature_compatibility_version.js");
+
+const testName = "collection_validator_feature_compatibility_version";
+const dbpath = MongoRunner.dataPath + testName;
+
+// The 'testCases' array should be populated with
+//
+// { validator: { ... }, nonMatchingDocument: { ... } }
+//
+// objects that use query features new in the latest version of mongod. Note that this also
+// includes new aggregation expressions able to be used with the $expr match expression. This
+// test ensures that a collection validator accepts the new query feature when the feature
+// compatibility version is the latest version, and rejects it when the feature compatibility
+// version is the last-stable version.
+const testCases = [];
+
+let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest"});
+assert.neq(null, conn, "mongod was unable to start up");
+
+let testDB = conn.getDB(testName);
+
+let adminDB = conn.getDB("admin");
+
+// Explicitly set the feature compatibility version to the latest version.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
+
+testCases.forEach(function(test, i) {
+ // Create a collection with a validator using new query features.
+ const coll = testDB["coll" + i];
+ assert.commandWorked(
+ testDB.createCollection(coll.getName(), {validator: test.validator}),
+ `Expected to be able to create collection with validator ${tojson(test.validator)}`);
+
+ // The validator should cause this insert to fail.
+ assert.writeErrorWithCode(
+ coll.insert(test.nonMatchingDocument),
+ ErrorCodes.DocumentValidationFailure,
+ `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
+ `collection with validator ${tojson(test.validator)}`);
+
+ // Set a validator using new query features on an existing collection.
+ coll.drop();
+ assert.commandWorked(testDB.createCollection(coll.getName()));
+ assert.commandWorked(
+ testDB.runCommand({collMod: coll.getName(), validator: test.validator}),
+ `Expected to be able to modify collection validator to be ${tojson(test.validator)}`);
+
+ // Another failing update.
+ assert.writeErrorWithCode(
+ coll.insert(test.nonMatchingDocument),
+ ErrorCodes.DocumentValidationFailure,
+ `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
+ `collection with validator ${tojson(test.validator)}`);
+});
+
+// Set the feature compatibility version to the last-stable version.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+
+testCases.forEach(function(test, i) {
+ // The validator is already in place, so it should still cause this insert to fail.
+ const coll = testDB["coll" + i];
+ assert.writeErrorWithCode(
+ coll.insert(test.nonMatchingDocument),
+ ErrorCodes.DocumentValidationFailure,
+ `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
+ `collection with validator ${tojson(test.validator)}`);
+
+ // Trying to create a new collection with a validator using new query features should fail
+ // while feature compatibility version is the last-stable version.
+ let res = testDB.createCollection("other", {validator: test.validator});
+ assert.commandFailedWithCode(
+ res,
+ ErrorCodes.QueryFeatureNotAllowed,
+ 'Expected *not* to be able to create collection with validator ' + tojson(test.validator));
+ assert(res.errmsg.match(/feature compatibility version/),
+ `Expected error message from createCollection with validator ` +
+ `${tojson(test.validator)} to reference 'feature compatibility version' but got: ` +
+ res.errmsg);
+
+ // Trying to update a collection with a validator using new query features should also fail.
+ res = testDB.runCommand({collMod: coll.getName(), validator: test.validator});
+ assert.commandFailedWithCode(
+ res,
+ ErrorCodes.QueryFeatureNotAllowed,
+ `Expected to be able to create collection with validator ${tojson(test.validator)}`);
+ assert(res.errmsg.match(/feature compatibility version/),
+ `Expected error message from createCollection with validator ` +
+ `${tojson(test.validator)} to reference 'feature compatibility version' but got: ` +
+ res.errmsg);
+});
+
+MongoRunner.stopMongod(conn);
+
+if (testCases.length > 0) {
+ // If we try to start up the last-stable version of mongod, it will fail, because it will
+ // not be able to parse the validator using new query features.
conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "last-stable", noCleanData: true});
- assert.neq(
- null,
- conn,
- `version ${MongoRunner.getBinVersionFor("last-stable")} of mongod failed to start, even` +
- " after we removed the validator using new query features");
-
- MongoRunner.stopMongod(conn);
-
- // The rest of the test uses the latest version of mongod.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true});
- assert.neq(null, conn, "mongod was unable to start up");
-
- adminDB = conn.getDB("admin");
- testDB = conn.getDB(testName);
-
- // Set the feature compatibility version back to the latest version.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
-
- testCases.forEach(function(test, i) {
- const coll = testDB["coll2" + i];
-
- // Now we should be able to create a collection with a validator using new query features
- // again.
- assert.commandWorked(
- testDB.createCollection(coll.getName(), {validator: test.validator}),
- `Expected to be able to create collection with validator ${tojson(test.validator)}`);
-
- // And we should be able to modify a collection to have a validator using new query
- // features.
- assert.commandWorked(
- testDB.runCommand({collMod: coll.getName(), validator: test.validator}),
- `Expected to be able to modify collection validator to be ${tojson(test.validator)}`);
- });
-
- // Set the feature compatibility version to the last-stable version and then restart with
- // internalValidateFeaturesAsMaster=false.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- binVersion: "latest",
- noCleanData: true,
- setParameter: "internalValidateFeaturesAsMaster=false"
- });
- assert.neq(null, conn, "mongod was unable to start up");
-
- testDB = conn.getDB(testName);
-
- testCases.forEach(function(test, i) {
- const coll = testDB["coll3" + i];
- // Even though the feature compatibility version is the last-stable version, we should still
- // be able to add a validator using new query features, because
- // internalValidateFeaturesAsMaster is false.
- assert.commandWorked(
- testDB.createCollection(coll.getName(), {validator: test.validator}),
- `Expected to be able to create collection with validator ${tojson(test.validator)}`);
-
- // We should also be able to modify a collection to have a validator using new query
- // features.
- coll.drop();
- assert.commandWorked(testDB.createCollection(coll.getName()));
- assert.commandWorked(
- testDB.runCommand({collMod: coll.getName(), validator: test.validator}),
- `Expected to be able to modify collection validator to be ${tojson(test.validator)}`);
- });
-
- MongoRunner.stopMongod(conn);
-
+ assert.eq(null,
+ conn,
+ `version ${MongoRunner.getBinVersionFor("last-stable")} of mongod started, even` +
+ " with a validator using new query features in place.");
+}
+
+// Starting up the latest version of mongod, however, should succeed, even though the feature
+// compatibility version is still set to the last-stable version.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true});
+assert.neq(null, conn, "mongod was unable to start up");
+
+adminDB = conn.getDB("admin");
+testDB = conn.getDB(testName);
+
+// And the validator should still work.
+testCases.forEach(function(test, i) {
+ const coll = testDB["coll" + i];
+ assert.writeErrorWithCode(
+ coll.insert(test.nonMatchingDocument),
+ ErrorCodes.DocumentValidationFailure,
+ `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
+ `collection with validator ${tojson(test.validator)}`);
+
+ // Remove the validator.
+ assert.commandWorked(testDB.runCommand({collMod: coll.getName(), validator: {}}));
+});
+
+MongoRunner.stopMongod(conn);
+
+// Now, we should be able to start up the last-stable version of mongod.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "last-stable", noCleanData: true});
+assert.neq(
+ null,
+ conn,
+ `version ${MongoRunner.getBinVersionFor("last-stable")} of mongod failed to start, even` +
+ " after we removed the validator using new query features");
+
+MongoRunner.stopMongod(conn);
+
+// The rest of the test uses the latest version of mongod.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true});
+assert.neq(null, conn, "mongod was unable to start up");
+
+adminDB = conn.getDB("admin");
+testDB = conn.getDB(testName);
+
+// Set the feature compatibility version back to the latest version.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
+
+testCases.forEach(function(test, i) {
+ const coll = testDB["coll2" + i];
+
+ // Now we should be able to create a collection with a validator using new query features
+ // again.
+ assert.commandWorked(
+ testDB.createCollection(coll.getName(), {validator: test.validator}),
+ `Expected to be able to create collection with validator ${tojson(test.validator)}`);
+
+ // And we should be able to modify a collection to have a validator using new query
+ // features.
+ assert.commandWorked(
+ testDB.runCommand({collMod: coll.getName(), validator: test.validator}),
+ `Expected to be able to modify collection validator to be ${tojson(test.validator)}`);
+});
+
+// Set the feature compatibility version to the last-stable version and then restart with
+// internalValidateFeaturesAsMaster=false.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ binVersion: "latest",
+ noCleanData: true,
+ setParameter: "internalValidateFeaturesAsMaster=false"
+});
+assert.neq(null, conn, "mongod was unable to start up");
+
+testDB = conn.getDB(testName);
+
+testCases.forEach(function(test, i) {
+ const coll = testDB["coll3" + i];
+ // Even though the feature compatibility version is the last-stable version, we should still
+ // be able to add a validator using new query features, because
+ // internalValidateFeaturesAsMaster is false.
+ assert.commandWorked(
+ testDB.createCollection(coll.getName(), {validator: test.validator}),
+ `Expected to be able to create collection with validator ${tojson(test.validator)}`);
+
+ // We should also be able to modify a collection to have a validator using new query
+ // features.
+ coll.drop();
+ assert.commandWorked(testDB.createCollection(coll.getName()));
+ assert.commandWorked(
+ testDB.runCommand({collMod: coll.getName(), validator: test.validator}),
+ `Expected to be able to modify collection validator to be ${tojson(test.validator)}`);
+});
+
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js b/jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js
index 2fb02f419ce..c49abeafa8f 100644
--- a/jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js
+++ b/jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js
@@ -8,48 +8,47 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
-
- load("jstests/libs/feature_compatibility_version.js");
-
- const lastStable = "last-stable";
-
- let st = new ShardingTest({mongos: 1, shards: 1});
- const ns = "testDB.testColl";
- let mongosAdminDB = st.s.getDB("admin");
-
- // Assert that a mongos using the 'last-stable' binary version will crash when connecting to a
- // cluster running on the 'latest' binary version with the 'latest' FCV.
- let lastStableMongos =
- MongoRunner.runMongos({configdb: st.configRS.getURL(), binVersion: lastStable});
-
- assert(!lastStableMongos);
-
- // Assert that a mongos using the 'last-stable' binary version will successfully connect to a
- // cluster running on the 'latest' binary version with the 'last-stable' FCV.
- assert.commandWorked(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- lastStableMongos =
- MongoRunner.runMongos({configdb: st.configRS.getURL(), binVersion: lastStable});
- assert.neq(null,
- lastStableMongos,
- "mongos was unable to start up with binary version=" + lastStable +
- " and connect to FCV=" + lastStableFCV + " cluster");
-
- // Ensure that the 'lastStable' binary mongos can perform reads and writes to the shards in the
- // cluster.
- assert.writeOK(lastStableMongos.getDB("test").foo.insert({x: 1}));
- let foundDoc = lastStableMongos.getDB("test").foo.findOne({x: 1});
- assert.neq(null, foundDoc);
- assert.eq(1, foundDoc.x, tojson(foundDoc));
-
- // Assert that the 'lastStable' binary mongos will crash after the cluster is upgraded to
- // 'latestFCV'.
- assert.commandWorked(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
- let error = assert.throws(function() {
- lastStableMongos.getDB("test").foo.insert({x: 1});
- });
- assert(isNetworkError(error));
- assert(!lastStableMongos.conn);
-
- st.stop();
+"use strict";
+
+load("jstests/libs/feature_compatibility_version.js");
+
+const lastStable = "last-stable";
+
+let st = new ShardingTest({mongos: 1, shards: 1});
+const ns = "testDB.testColl";
+let mongosAdminDB = st.s.getDB("admin");
+
+// Assert that a mongos using the 'last-stable' binary version will crash when connecting to a
+// cluster running on the 'latest' binary version with the 'latest' FCV.
+let lastStableMongos =
+ MongoRunner.runMongos({configdb: st.configRS.getURL(), binVersion: lastStable});
+
+assert(!lastStableMongos);
+
+// Assert that a mongos using the 'last-stable' binary version will successfully connect to a
+// cluster running on the 'latest' binary version with the 'last-stable' FCV.
+assert.commandWorked(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+lastStableMongos = MongoRunner.runMongos({configdb: st.configRS.getURL(), binVersion: lastStable});
+assert.neq(null,
+ lastStableMongos,
+ "mongos was unable to start up with binary version=" + lastStable +
+ " and connect to FCV=" + lastStableFCV + " cluster");
+
+// Ensure that the 'lastStable' binary mongos can perform reads and writes to the shards in the
+// cluster.
+assert.writeOK(lastStableMongos.getDB("test").foo.insert({x: 1}));
+let foundDoc = lastStableMongos.getDB("test").foo.findOne({x: 1});
+assert.neq(null, foundDoc);
+assert.eq(1, foundDoc.x, tojson(foundDoc));
+
+// Assert that the 'lastStable' binary mongos will crash after the cluster is upgraded to
+// 'latestFCV'.
+assert.commandWorked(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
+let error = assert.throws(function() {
+ lastStableMongos.getDB("test").foo.insert({x: 1});
+});
+assert(isNetworkError(error));
+assert(!lastStableMongos.conn);
+
+st.stop();
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js b/jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js
index 6a858400ae4..e280a82451e 100644
--- a/jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js
+++ b/jstests/multiVersion/genericSetFCVUsage/do_upgrade_downgrade.js
@@ -1,304 +1,224 @@
// Perform the upgrade/downgrade procedure by first setting the featureCompatibilityVersion and
// then switching the binary.
(function() {
- "use strict";
-
- load("jstests/replsets/rslib.js");
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/libs/get_index_helpers.js");
- load("jstests/libs/check_uuids.js");
- load("jstests/libs/check_unique_indexes.js");
-
- const latestBinary = "latest";
- const lastStableBinary = "last-stable";
-
- let setFCV = function(adminDB, version) {
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: version}));
- checkFCV(adminDB, version);
- };
-
- let insertDataForConn = function(conn, dbs, nodeOptions) {
- for (let i = 0; i < 20; i++) {
- let doc = {id: i, sno: i, a: "foo", conn: conn.name};
- for (let j in dbs) {
- if (nodeOptions.hasOwnProperty("configsvr")) {
- if (j !== "admin" && j !== "local") {
- // We can't create user databases on a --configsvr instance.
- continue;
- }
- // Config servers have a majority write concern.
- assert.writeOK(
- conn.getDB(dbs[j]).foo.insert(doc, {writeConcern: {w: "majority"}}));
- } else {
- assert.writeOK(conn.getDB(dbs[j]).foo.insert(doc));
- }
- }
- }
+"use strict";
+
+load("jstests/replsets/rslib.js");
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/libs/get_index_helpers.js");
+load("jstests/libs/check_uuids.js");
+load("jstests/libs/check_unique_indexes.js");
- // Create unique indexes on collection "foo" with two index formatVersions.
- // Providing index version explicitly allows index creation with corresponding
- // formatVersion.
+const latestBinary = "latest";
+const lastStableBinary = "last-stable";
+
+let setFCV = function(adminDB, version) {
+ assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: version}));
+ checkFCV(adminDB, version);
+};
+
+let insertDataForConn = function(conn, dbs, nodeOptions) {
+ for (let i = 0; i < 20; i++) {
+ let doc = {id: i, sno: i, a: "foo", conn: conn.name};
for (let j in dbs) {
- let testDB = conn.getDB(dbs[j]);
- testDB.getCollectionInfos().forEach(function(c) {
- if (c.name === "foo") {
- let foo = testDB.getCollection(c.name);
- assert.commandWorked(foo.createIndex({id: 1}, {unique: true}));
- assert.commandWorked(foo.createIndex({sno: 1}, {unique: true, v: 1}));
+ if (nodeOptions.hasOwnProperty("configsvr")) {
+ if (j !== "admin" && j !== "local") {
+ // We can't create user databases on a --configsvr instance.
+ continue;
}
- });
+ // Config servers have a majority write concern.
+ assert.writeOK(conn.getDB(dbs[j]).foo.insert(doc, {writeConcern: {w: "majority"}}));
+ } else {
+ assert.writeOK(conn.getDB(dbs[j]).foo.insert(doc));
+ }
}
- };
-
- let recreateUniqueIndexes = function(db, secondary) {
- // Obtain list of all v1 and v2 unique indexes
- var unique_idx = [];
- var unique_idx_v1 = [];
- db.adminCommand("listDatabases").databases.forEach(function(d) {
- if (secondary && !(d.name === "local")) {
- // All replicated indexes will be dropped on the primary, and have that
- // drop propogated. Secondary nodes need to recreate unique indexes
- // associated with local collections.
- return;
+ }
+
+ // Create unique indexes on collection "foo" with two index formatVersions.
+ // Providing index version explicitly allows index creation with corresponding
+ // formatVersion.
+ for (let j in dbs) {
+ let testDB = conn.getDB(dbs[j]);
+ testDB.getCollectionInfos().forEach(function(c) {
+ if (c.name === "foo") {
+ let foo = testDB.getCollection(c.name);
+ assert.commandWorked(foo.createIndex({id: 1}, {unique: true}));
+ assert.commandWorked(foo.createIndex({sno: 1}, {unique: true, v: 1}));
}
- let mdb = db.getSiblingDB(d.name);
- mdb.getCollectionInfos().forEach(function(c) {
- let currentCollection = mdb.getCollection(c.name);
- currentCollection.getIndexes().forEach(function(i) {
- if (i.unique) {
- if (i.v === 1) {
- unique_idx_v1.push(i);
- return;
- }
- unique_idx.push(i);
- }
- });
- });
});
-
- // Drop and create all v:2 indexes
- for (let idx of unique_idx) {
- let [dbName, collName] = idx.ns.split(".");
- let res = db.getSiblingDB(dbName).runCommand({dropIndexes: collName, index: idx.name});
- assert.commandWorked(res);
- res = db.getSiblingDB(dbName).runCommand({
- createIndexes: collName,
- indexes: [{"key": idx.key, "name": idx.name, "unique": true}]
- });
- assert.commandWorked(res);
+ }
+};
+
+let recreateUniqueIndexes = function(db, secondary) {
+ // Obtain list of all v1 and v2 unique indexes
+ var unique_idx = [];
+ var unique_idx_v1 = [];
+ db.adminCommand("listDatabases").databases.forEach(function(d) {
+ if (secondary && !(d.name === "local")) {
+ // All replicated indexes will be dropped on the primary, and have that
+ // drop propogated. Secondary nodes need to recreate unique indexes
+ // associated with local collections.
+ return;
}
-
- // Drop and create all v:1 indexes
- for (let idx of unique_idx_v1) {
- let [dbName, collName] = idx.ns.split(".");
- let res = db.getSiblingDB(dbName).runCommand({dropIndexes: collName, index: idx.name});
- assert.commandWorked(res);
- res = db.getSiblingDB(dbName).runCommand({
- createIndexes: collName,
- indexes: [{"key": idx.key, "name": idx.name, "unique": true, "v": 1}]
+ let mdb = db.getSiblingDB(d.name);
+ mdb.getCollectionInfos().forEach(function(c) {
+ let currentCollection = mdb.getCollection(c.name);
+ currentCollection.getIndexes().forEach(function(i) {
+ if (i.unique) {
+ if (i.v === 1) {
+ unique_idx_v1.push(i);
+ return;
+ }
+ unique_idx.push(i);
+ }
});
- assert.commandWorked(res);
- }
- };
-
- // Create and clear dbpath
- let sharedDbPath = MongoRunner.dataPath + "do_upgrade_downgrade";
- resetDbpath(sharedDbPath);
-
- // Return a mongodb connection with startup options, version and dbpath options
- let startMongodWithVersion = function(nodeOptions, ver, path) {
- let version = ver || latestBinary;
- let dbpath = path || sharedDbPath;
- let conn = MongoRunner.runMongod(
- Object.assign({}, nodeOptions, {dbpath: dbpath, binVersion: version}));
- assert.neq(null,
- conn,
- "mongod was unable to start up with version=" + version + " and path=" + dbpath);
- return conn;
- };
-
- //
- // Standalone tests.
- //
- let standaloneTest = function(nodeOptions) {
- let noCleanDataOptions = Object.assign({noCleanData: true}, nodeOptions);
-
- // New latest binary version standalone.
- jsTest.log("Starting a latest binVersion standalone");
- let conn = startMongodWithVersion(nodeOptions, latestBinary);
- let adminDB = conn.getDB("admin");
-
- // Insert some data.
- insertDataForConn(conn, ["admin", "local", "test"], nodeOptions);
-
- if (!nodeOptions.hasOwnProperty("shardsvr")) {
- // Initially featureCompatibilityVersion is latest except for when we run with shardsvr.
- // We expect featureCompatibilityVersion to be last-stable for shardsvr.
- checkFCV(adminDB, latestFCV);
-
- // Ensure all collections have UUIDs and all unique indexes have new version in latest
- // featureCompatibilityVersion mode.
- checkCollectionUUIDs(adminDB);
- checkUniqueIndexFormatVersion(adminDB);
-
- // Set featureCompatibilityVersion to last-stable.
- setFCV(adminDB, lastStableFCV);
- }
-
- // Ensure featureCompatibilityVersion is last-stable and all collections still have UUIDs.
- checkFCV(adminDB, lastStableFCV);
- checkCollectionUUIDs(adminDB);
-
- // Drop and recreate unique indexes with the older FCV
- recreateUniqueIndexes(adminDB, false);
-
- // Stop latest binary version mongod.
- MongoRunner.stopMongod(conn);
-
- // Start last-stable binary version mongod with same dbpath
- jsTest.log("Starting a last-stable binVersion standalone to test downgrade");
- let lastStableConn = startMongodWithVersion(noCleanDataOptions, lastStableBinary);
- let lastStableAdminDB = lastStableConn.getDB("admin");
-
- // Check FCV document.
- checkFCV(lastStableAdminDB, lastStableFCV);
-
- // Ensure all collections still have UUIDs on a last-stable mongod.
- checkCollectionUUIDs(lastStableAdminDB);
-
- // Stop last-stable binary version mongod.
- MongoRunner.stopMongod(lastStableConn);
-
- // Start latest binary version mongod again.
- jsTest.log("Starting a latest binVersion standalone to test upgrade");
- conn = startMongodWithVersion(noCleanDataOptions, latestBinary);
- adminDB = conn.getDB("admin");
-
- // Ensure setFeatureCompatibilityVersion to latest succeeds, all collections have UUIDs
- // and all unique indexes are in new version.
- setFCV(adminDB, latestFCV);
+ });
+ });
+
+ // Drop and create all v:2 indexes
+ for (let idx of unique_idx) {
+ let [dbName, collName] = idx.ns.split(".");
+ let res = db.getSiblingDB(dbName).runCommand({dropIndexes: collName, index: idx.name});
+ assert.commandWorked(res);
+ res = db.getSiblingDB(dbName).runCommand({
+ createIndexes: collName,
+ indexes: [{"key": idx.key, "name": idx.name, "unique": true}]
+ });
+ assert.commandWorked(res);
+ }
+
+ // Drop and create all v:1 indexes
+ for (let idx of unique_idx_v1) {
+ let [dbName, collName] = idx.ns.split(".");
+ let res = db.getSiblingDB(dbName).runCommand({dropIndexes: collName, index: idx.name});
+ assert.commandWorked(res);
+ res = db.getSiblingDB(dbName).runCommand({
+ createIndexes: collName,
+ indexes: [{"key": idx.key, "name": idx.name, "unique": true, "v": 1}]
+ });
+ assert.commandWorked(res);
+ }
+};
+
+// Create and clear dbpath
+let sharedDbPath = MongoRunner.dataPath + "do_upgrade_downgrade";
+resetDbpath(sharedDbPath);
+
+// Return a mongodb connection with startup options, version and dbpath options
+let startMongodWithVersion = function(nodeOptions, ver, path) {
+ let version = ver || latestBinary;
+ let dbpath = path || sharedDbPath;
+ let conn = MongoRunner.runMongod(
+ Object.assign({}, nodeOptions, {dbpath: dbpath, binVersion: version}));
+ assert.neq(null,
+ conn,
+ "mongod was unable to start up with version=" + version + " and path=" + dbpath);
+ return conn;
+};
+
+//
+// Standalone tests.
+//
+let standaloneTest = function(nodeOptions) {
+ let noCleanDataOptions = Object.assign({noCleanData: true}, nodeOptions);
+
+ // New latest binary version standalone.
+ jsTest.log("Starting a latest binVersion standalone");
+ let conn = startMongodWithVersion(nodeOptions, latestBinary);
+ let adminDB = conn.getDB("admin");
+
+ // Insert some data.
+ insertDataForConn(conn, ["admin", "local", "test"], nodeOptions);
+
+ if (!nodeOptions.hasOwnProperty("shardsvr")) {
+ // Initially featureCompatibilityVersion is latest except for when we run with shardsvr.
+ // We expect featureCompatibilityVersion to be last-stable for shardsvr.
checkFCV(adminDB, latestFCV);
+
+ // Ensure all collections have UUIDs and all unique indexes have new version in latest
+ // featureCompatibilityVersion mode.
checkCollectionUUIDs(adminDB);
checkUniqueIndexFormatVersion(adminDB);
- // Stop latest binary version mongod for the last time
- MongoRunner.stopMongod(conn);
- };
-
- //
- // Replica set tests.
- //
- let replicaSetTest = function(nodeOptions) {
-
- // New latest binary version replica set.
- jsTest.log("Starting a latest binVersion ReplSetTest");
- let rst = new ReplSetTest({nodes: 3, nodeOptions: nodeOptions});
- rst.startSet();
- rst.initiate();
- let primaryAdminDB = rst.getPrimary().getDB("admin");
- let secondaries = rst.getSecondaries();
-
- // Insert some data.
- insertDataForConn(rst.getPrimary(), ["admin", "local", "test"], nodeOptions);
- rst.awaitReplication();
-
- for (let j = 0; j < secondaries.length; j++) {
- let secondaryAdminDB = secondaries[j].getDB("admin");
- // Insert some data into the local DB.
- insertDataForConn(secondaries[j], ["local"], nodeOptions);
- }
-
- if (!nodeOptions.hasOwnProperty("shardsvr")) {
- // Initially featureCompatibilityVersion is latest on primary and secondaries except for
- // when we run with shardsvr. We expect featureCompatibilityVersion to be last-stable
- // for shardsvr.
- checkFCV(primaryAdminDB, latestFCV);
-
- for (let j = 0; j < secondaries.length; j++) {
- let secondaryAdminDB = secondaries[j].getDB("admin");
- checkFCV(secondaryAdminDB, latestFCV);
- }
-
- // Ensure all collections have UUIDs and unique indexes are in new version in latest
- // featureCompatibilityVersion mode on both primary and secondaries.
- checkCollectionUUIDs(primaryAdminDB);
- checkUniqueIndexFormatVersion(primaryAdminDB);
- for (let j = 0; j < secondaries.length; j++) {
- let secondaryAdminDB = secondaries[j].getDB("admin");
- checkCollectionUUIDs(secondaryAdminDB);
- checkUniqueIndexFormatVersion(secondaryAdminDB);
- }
-
- // Change featureCompatibilityVersion to last-stable.
- setFCV(primaryAdminDB, lastStableFCV);
- rst.awaitReplication();
- }
-
- // Ensure featureCompatibilityVersion is last-stable and all collections still have UUIDs.
- checkFCV(primaryAdminDB, lastStableFCV);
- for (let j = 0; j < secondaries.length; j++) {
- let secondaryAdminDB = secondaries[j].getDB("admin");
- checkFCV(secondaryAdminDB, lastStableFCV);
- }
-
- checkCollectionUUIDs(primaryAdminDB);
- for (let j = 0; j < secondaries.length; j++) {
- let secondaryAdminDB = secondaries[j].getDB("admin");
- checkCollectionUUIDs(secondaryAdminDB);
- }
-
- // Drop and recreate unique indexes with the older FCV
- recreateUniqueIndexes(primaryAdminDB, false);
-
- // Now drop and recreate unique indexes on secondaries' "local" database
- for (let j = 0; j < secondaries.length; j++) {
- let secondaryAdminDB = secondaries[j].getDB("admin");
- recreateUniqueIndexes(secondaryAdminDB, true);
- }
-
- // Stop latest binary version replica set.
- rst.stopSet(null /* signal */, true /* forRestart */);
-
- // Downgrade the ReplSetTest binaries and make sure everything is okay.
- jsTest.log("Starting a last-stable binVersion ReplSetTest to test downgrade");
- rst.startSet({restart: true, binVersion: lastStableBinary});
-
- // Check that the featureCompatiblityVersion is set to last-stable and all
- // collections still have UUIDs.
- let lastStablePrimaryAdminDB = rst.getPrimary().getDB("admin");
- let lastStableSecondaries = rst.getSecondaries();
-
- checkFCV(lastStablePrimaryAdminDB, lastStableFCV);
- for (let j = 0; j < lastStableSecondaries.length; j++) {
- let secondaryAdminDB = lastStableSecondaries[j].getDB("admin");
- checkFCV(secondaryAdminDB, lastStableFCV);
- }
-
- checkCollectionUUIDs(lastStablePrimaryAdminDB);
- for (let j = 0; j < secondaries.length; j++) {
- let secondaryAdminDB = lastStableSecondaries[j].getDB("admin");
- checkCollectionUUIDs(secondaryAdminDB);
- }
-
- rst.stopSet(null /* signal */, true /* forRestart */);
-
- // Start latest binary version replica set again.
- jsTest.log("Starting a latest binVersion ReplSetTest to test upgrade");
- rst.startSet({restart: true, binVersion: latestBinary});
- primaryAdminDB = rst.getPrimary().getDB("admin");
- secondaries = rst.getSecondaries();
-
- // Ensure all collections have UUIDs and unique indexes are in new version after switching
- // back to latest featureCompatibilityVersion on both primary and secondaries.
- setFCV(primaryAdminDB, latestFCV);
- rst.awaitReplication();
-
+ // Set featureCompatibilityVersion to last-stable.
+ setFCV(adminDB, lastStableFCV);
+ }
+
+ // Ensure featureCompatibilityVersion is last-stable and all collections still have UUIDs.
+ checkFCV(adminDB, lastStableFCV);
+ checkCollectionUUIDs(adminDB);
+
+ // Drop and recreate unique indexes with the older FCV
+ recreateUniqueIndexes(adminDB, false);
+
+ // Stop latest binary version mongod.
+ MongoRunner.stopMongod(conn);
+
+ // Start last-stable binary version mongod with same dbpath
+ jsTest.log("Starting a last-stable binVersion standalone to test downgrade");
+ let lastStableConn = startMongodWithVersion(noCleanDataOptions, lastStableBinary);
+ let lastStableAdminDB = lastStableConn.getDB("admin");
+
+ // Check FCV document.
+ checkFCV(lastStableAdminDB, lastStableFCV);
+
+ // Ensure all collections still have UUIDs on a last-stable mongod.
+ checkCollectionUUIDs(lastStableAdminDB);
+
+ // Stop last-stable binary version mongod.
+ MongoRunner.stopMongod(lastStableConn);
+
+ // Start latest binary version mongod again.
+ jsTest.log("Starting a latest binVersion standalone to test upgrade");
+ conn = startMongodWithVersion(noCleanDataOptions, latestBinary);
+ adminDB = conn.getDB("admin");
+
+ // Ensure setFeatureCompatibilityVersion to latest succeeds, all collections have UUIDs
+ // and all unique indexes are in new version.
+ setFCV(adminDB, latestFCV);
+ checkFCV(adminDB, latestFCV);
+ checkCollectionUUIDs(adminDB);
+ checkUniqueIndexFormatVersion(adminDB);
+
+ // Stop latest binary version mongod for the last time
+ MongoRunner.stopMongod(conn);
+};
+
+//
+// Replica set tests.
+//
+let replicaSetTest = function(nodeOptions) {
+ // New latest binary version replica set.
+ jsTest.log("Starting a latest binVersion ReplSetTest");
+ let rst = new ReplSetTest({nodes: 3, nodeOptions: nodeOptions});
+ rst.startSet();
+ rst.initiate();
+ let primaryAdminDB = rst.getPrimary().getDB("admin");
+ let secondaries = rst.getSecondaries();
+
+ // Insert some data.
+ insertDataForConn(rst.getPrimary(), ["admin", "local", "test"], nodeOptions);
+ rst.awaitReplication();
+
+ for (let j = 0; j < secondaries.length; j++) {
+ let secondaryAdminDB = secondaries[j].getDB("admin");
+ // Insert some data into the local DB.
+ insertDataForConn(secondaries[j], ["local"], nodeOptions);
+ }
+
+ if (!nodeOptions.hasOwnProperty("shardsvr")) {
+ // Initially featureCompatibilityVersion is latest on primary and secondaries except for
+ // when we run with shardsvr. We expect featureCompatibilityVersion to be last-stable
+ // for shardsvr.
checkFCV(primaryAdminDB, latestFCV);
+
for (let j = 0; j < secondaries.length; j++) {
let secondaryAdminDB = secondaries[j].getDB("admin");
checkFCV(secondaryAdminDB, latestFCV);
}
+ // Ensure all collections have UUIDs and unique indexes are in new version in latest
+ // featureCompatibilityVersion mode on both primary and secondaries.
checkCollectionUUIDs(primaryAdminDB);
checkUniqueIndexFormatVersion(primaryAdminDB);
for (let j = 0; j < secondaries.length; j++) {
@@ -307,18 +227,96 @@
checkUniqueIndexFormatVersion(secondaryAdminDB);
}
- rst.stopSet();
- };
-
- // Do tests for regular standalones and replica sets.
- standaloneTest({});
- replicaSetTest({});
-
- // Do tests for standalones and replica sets started with --shardsvr.
- standaloneTest({shardsvr: ""});
- replicaSetTest({shardsvr: ""});
-
- // Do tests for standalones and replica sets started with --configsvr.
- standaloneTest({configsvr: ""});
- replicaSetTest({configsvr: ""});
+ // Change featureCompatibilityVersion to last-stable.
+ setFCV(primaryAdminDB, lastStableFCV);
+ rst.awaitReplication();
+ }
+
+ // Ensure featureCompatibilityVersion is last-stable and all collections still have UUIDs.
+ checkFCV(primaryAdminDB, lastStableFCV);
+ for (let j = 0; j < secondaries.length; j++) {
+ let secondaryAdminDB = secondaries[j].getDB("admin");
+ checkFCV(secondaryAdminDB, lastStableFCV);
+ }
+
+ checkCollectionUUIDs(primaryAdminDB);
+ for (let j = 0; j < secondaries.length; j++) {
+ let secondaryAdminDB = secondaries[j].getDB("admin");
+ checkCollectionUUIDs(secondaryAdminDB);
+ }
+
+ // Drop and recreate unique indexes with the older FCV
+ recreateUniqueIndexes(primaryAdminDB, false);
+
+ // Now drop and recreate unique indexes on secondaries' "local" database
+ for (let j = 0; j < secondaries.length; j++) {
+ let secondaryAdminDB = secondaries[j].getDB("admin");
+ recreateUniqueIndexes(secondaryAdminDB, true);
+ }
+
+ // Stop latest binary version replica set.
+ rst.stopSet(null /* signal */, true /* forRestart */);
+
+ // Downgrade the ReplSetTest binaries and make sure everything is okay.
+ jsTest.log("Starting a last-stable binVersion ReplSetTest to test downgrade");
+ rst.startSet({restart: true, binVersion: lastStableBinary});
+
+ // Check that the featureCompatiblityVersion is set to last-stable and all
+ // collections still have UUIDs.
+ let lastStablePrimaryAdminDB = rst.getPrimary().getDB("admin");
+ let lastStableSecondaries = rst.getSecondaries();
+
+ checkFCV(lastStablePrimaryAdminDB, lastStableFCV);
+ for (let j = 0; j < lastStableSecondaries.length; j++) {
+ let secondaryAdminDB = lastStableSecondaries[j].getDB("admin");
+ checkFCV(secondaryAdminDB, lastStableFCV);
+ }
+
+ checkCollectionUUIDs(lastStablePrimaryAdminDB);
+ for (let j = 0; j < secondaries.length; j++) {
+ let secondaryAdminDB = lastStableSecondaries[j].getDB("admin");
+ checkCollectionUUIDs(secondaryAdminDB);
+ }
+
+ rst.stopSet(null /* signal */, true /* forRestart */);
+
+ // Start latest binary version replica set again.
+ jsTest.log("Starting a latest binVersion ReplSetTest to test upgrade");
+ rst.startSet({restart: true, binVersion: latestBinary});
+ primaryAdminDB = rst.getPrimary().getDB("admin");
+ secondaries = rst.getSecondaries();
+
+ // Ensure all collections have UUIDs and unique indexes are in new version after switching
+ // back to latest featureCompatibilityVersion on both primary and secondaries.
+ setFCV(primaryAdminDB, latestFCV);
+ rst.awaitReplication();
+
+ checkFCV(primaryAdminDB, latestFCV);
+ for (let j = 0; j < secondaries.length; j++) {
+ let secondaryAdminDB = secondaries[j].getDB("admin");
+ checkFCV(secondaryAdminDB, latestFCV);
+ }
+
+ checkCollectionUUIDs(primaryAdminDB);
+ checkUniqueIndexFormatVersion(primaryAdminDB);
+ for (let j = 0; j < secondaries.length; j++) {
+ let secondaryAdminDB = secondaries[j].getDB("admin");
+ checkCollectionUUIDs(secondaryAdminDB);
+ checkUniqueIndexFormatVersion(secondaryAdminDB);
+ }
+
+ rst.stopSet();
+};
+
+// Do tests for regular standalones and replica sets.
+standaloneTest({});
+replicaSetTest({});
+
+// Do tests for standalones and replica sets started with --shardsvr.
+standaloneTest({shardsvr: ""});
+replicaSetTest({shardsvr: ""});
+
+// Do tests for standalones and replica sets started with --configsvr.
+standaloneTest({configsvr: ""});
+replicaSetTest({configsvr: ""});
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/downgrade_after_rollback_via_refetch.js b/jstests/multiVersion/genericSetFCVUsage/downgrade_after_rollback_via_refetch.js
index f534c4794f6..f031ac47b5e 100644
--- a/jstests/multiVersion/genericSetFCVUsage/downgrade_after_rollback_via_refetch.js
+++ b/jstests/multiVersion/genericSetFCVUsage/downgrade_after_rollback_via_refetch.js
@@ -3,67 +3,65 @@
// and restarts with the last-stable version before its next stable checkpoint, then oplog entries
// after the common point are replayed.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/replsets/libs/rollback_test.js");
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/replsets/libs/rollback_test.js");
- TestData.rollbackShutdowns = true;
- TestData.allowUncleanShutdowns = true;
- let name = "downgrade_after_rollback_via_refetch";
- let dbName = "test";
- let sourceCollName = "coll";
+TestData.rollbackShutdowns = true;
+TestData.allowUncleanShutdowns = true;
+let name = "downgrade_after_rollback_via_refetch";
+let dbName = "test";
+let sourceCollName = "coll";
- function testDowngrade(enableMajorityReadConcern) {
- jsTest.log("Test downgrade with enableMajorityReadConcern=" + enableMajorityReadConcern);
+function testDowngrade(enableMajorityReadConcern) {
+ jsTest.log("Test downgrade with enableMajorityReadConcern=" + enableMajorityReadConcern);
- // Set up Rollback Test.
- let replTest = new ReplSetTest(
- {name, nodes: 3, useBridge: true, nodeOptions: {enableMajorityReadConcern: "false"}});
- replTest.startSet();
- let config = replTest.getReplSetConfig();
- config.members[2].priority = 0;
- config.settings = {chainingAllowed: false};
- replTest.initiate(config);
- let rollbackTest = new RollbackTest(name, replTest);
+ // Set up Rollback Test.
+ let replTest = new ReplSetTest(
+ {name, nodes: 3, useBridge: true, nodeOptions: {enableMajorityReadConcern: "false"}});
+ replTest.startSet();
+ let config = replTest.getReplSetConfig();
+ config.members[2].priority = 0;
+ config.settings = {chainingAllowed: false};
+ replTest.initiate(config);
+ let rollbackTest = new RollbackTest(name, replTest);
- // Set the featureCompatibilityVersion to the last-stable version, so that we can downgrade
- // the rollback node.
- assert.commandWorked(rollbackTest.getPrimary().adminCommand(
- {setFeatureCompatibilityVersion: lastStableFCV}));
+ // Set the featureCompatibilityVersion to the last-stable version, so that we can downgrade
+ // the rollback node.
+ assert.commandWorked(
+ rollbackTest.getPrimary().adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- let rollbackNode = rollbackTest.transitionToRollbackOperations();
+ let rollbackNode = rollbackTest.transitionToRollbackOperations();
- // Turn off stable checkpoints on the rollback node.
- assert.commandWorked(rollbackNode.adminCommand(
- {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"}));
+ // Turn off stable checkpoints on the rollback node.
+ assert.commandWorked(
+ rollbackNode.adminCommand({configureFailPoint: "disableSnapshotting", mode: "alwaysOn"}));
- // Wait for a rollback to finish.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+ // Wait for a rollback to finish.
+ rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+ rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+ rollbackTest.transitionToSteadyStateOperations();
- // Replicate a new operation to the rollback node. Replication is disabled on the tiebreaker
- // node, so a successful majority write guarantees the write has replicated to the rollback
- // node.
- assert.commandWorked(rollbackTest.getPrimary().getDB(dbName)[sourceCollName].insert(
- {_id: 0}, {writeConcern: {w: "majority"}}));
- assert.eq(rollbackNode.getDB(dbName)[sourceCollName].find({_id: 0}).itcount(), 1);
+ // Replicate a new operation to the rollback node. Replication is disabled on the tiebreaker
+ // node, so a successful majority write guarantees the write has replicated to the rollback
+ // node.
+ assert.commandWorked(rollbackTest.getPrimary().getDB(dbName)[sourceCollName].insert(
+ {_id: 0}, {writeConcern: {w: "majority"}}));
+ assert.eq(rollbackNode.getDB(dbName)[sourceCollName].find({_id: 0}).itcount(), 1);
- // Kill the rollback node and restart it on the last-stable version.
- rollbackTest.restartNode(
- 0,
- 9,
- {binVersion: "last-stable", enableMajorityReadConcern: enableMajorityReadConcern});
- replTest.awaitSecondaryNodes();
+ // Kill the rollback node and restart it on the last-stable version.
+ rollbackTest.restartNode(
+ 0, 9, {binVersion: "last-stable", enableMajorityReadConcern: enableMajorityReadConcern});
+ replTest.awaitSecondaryNodes();
- // The rollback node should replay the new operation.
- rollbackNode = rollbackTest.getSecondary();
- assert.eq(rollbackNode.getDB(dbName)[sourceCollName].find({_id: 0}).itcount(), 1);
+ // The rollback node should replay the new operation.
+ rollbackNode = rollbackTest.getSecondary();
+ assert.eq(rollbackNode.getDB(dbName)[sourceCollName].find({_id: 0}).itcount(), 1);
- rollbackTest.stop();
- }
+ rollbackTest.stop();
+}
- testDowngrade("true");
- testDowngrade("false");
+testDowngrade("true");
+testDowngrade("false");
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/feature_compatibility_version_lagging_secondary.js b/jstests/multiVersion/genericSetFCVUsage/feature_compatibility_version_lagging_secondary.js
index 008164a5a80..2148a26111e 100644
--- a/jstests/multiVersion/genericSetFCVUsage/feature_compatibility_version_lagging_secondary.js
+++ b/jstests/multiVersion/genericSetFCVUsage/feature_compatibility_version_lagging_secondary.js
@@ -1,53 +1,52 @@
// Tests that a primary with upgrade featureCompatibilityVersion cannot connect with a secondary
// with a lower binary version.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/libs/write_concern_util.js");
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/libs/write_concern_util.js");
- const latest = "latest";
- const downgrade = "last-stable";
+const latest = "latest";
+const downgrade = "last-stable";
- // Start a new replica set with two latest version nodes.
- let rst = new ReplSetTest({
- nodes: [{binVersion: latest}, {binVersion: latest, rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- });
- rst.startSet();
- rst.initiate();
+// Start a new replica set with two latest version nodes.
+let rst = new ReplSetTest({
+ nodes: [{binVersion: latest}, {binVersion: latest, rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: false}
+});
+rst.startSet();
+rst.initiate();
- let primary = rst.getPrimary();
- let latestSecondary = rst.getSecondary();
+let primary = rst.getPrimary();
+let latestSecondary = rst.getSecondary();
- // Set the featureCompatibilityVersion to the downgrade version so that a downgrade node can
- // join the set.
- assert.commandWorked(
- primary.getDB("admin").runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+// Set the featureCompatibilityVersion to the downgrade version so that a downgrade node can
+// join the set.
+assert.commandWorked(
+ primary.getDB("admin").runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- // Add a downgrade node to the set.
- let downgradeSecondary = rst.add({binVersion: downgrade, rsConfig: {priority: 0}});
- rst.reInitiate();
+// Add a downgrade node to the set.
+let downgradeSecondary = rst.add({binVersion: downgrade, rsConfig: {priority: 0}});
+rst.reInitiate();
- // Wait for the downgrade secondary to finish initial sync.
- rst.awaitSecondaryNodes();
- rst.awaitReplication();
+// Wait for the downgrade secondary to finish initial sync.
+rst.awaitSecondaryNodes();
+rst.awaitReplication();
- // Stop replication on the downgrade secondary.
- stopServerReplication(downgradeSecondary);
+// Stop replication on the downgrade secondary.
+stopServerReplication(downgradeSecondary);
- // Set the featureCompatibilityVersion to the upgrade version. This will not replicate to
- // the downgrade secondary, but the downgrade secondary will no longer be able to
- // communicate with the rest of the set.
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+// Set the featureCompatibilityVersion to the upgrade version. This will not replicate to
+// the downgrade secondary, but the downgrade secondary will no longer be able to
+// communicate with the rest of the set.
+assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- // Shut down the latest version secondary.
- rst.stop(latestSecondary);
+// Shut down the latest version secondary.
+rst.stop(latestSecondary);
- // The primary should step down, since it can no longer see a majority of the replica set.
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- restartServerReplication(downgradeSecondary);
- rst.stopSet();
+// The primary should step down, since it can no longer see a majority of the replica set.
+rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+restartServerReplication(downgradeSecondary);
+rst.stopSet();
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js b/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js
index 6e54a1f0642..273695dbc05 100644
--- a/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js
+++ b/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js
@@ -10,299 +10,291 @@
*/
(function() {
- 'use strict';
-
- load('jstests/libs/get_index_helpers.js');
- load('jstests/multiVersion/libs/multi_rs.js');
- load('jstests/multiVersion/libs/verify_versions.js');
-
- // Setup the dbpath for this test.
- const dbpath = MongoRunner.dataPath + 'major_version_upgrade';
- resetDbpath(dbpath);
-
- // We set noCleanData to true in order to preserve the data files between iterations.
- const defaultOptions = {
- dbpath: dbpath,
- noCleanData: true,
- };
-
- // This lists all supported releases and needs to be kept up to date as versions are added and
- // dropped.
- // TODO SERVER-26792: In the future, we should have a common place from which both the
- // multiversion setup procedure and this test get information about supported major releases.
- const versions = [
- {binVersion: '3.2', testCollection: 'three_two'},
- {binVersion: '3.4', featureCompatibilityVersion: '3.4', testCollection: 'three_four'},
- {binVersion: '3.6', featureCompatibilityVersion: '3.6', testCollection: 'three_six'},
- {binVersion: '4.0', featureCompatibilityVersion: '4.0', testCollection: 'four_zero'},
- {binVersion: 'last-stable', testCollection: 'last_stable'},
- {binVersion: 'latest', featureCompatibilityVersion: '4.2', testCollection: 'latest'},
- ];
-
- // These key patterns are considered valid for existing v:0 and v:1 indexes, but are considered
- // invalid for v:2 indexes or new index builds.
- var invalidIndexSpecs = [
- {a: 0},
- {a: NaN},
- {a: true},
- ];
-
- // When running the oldest supported version, insert indexes with bad key patterns.
- function insertBadIndexes(testDB) {
- invalidIndexSpecs.forEach((spec) => {
- // Generate a unique and identifiable collection name.
- let collName = 'bad_index_' + tojson(spec.a);
- assert.commandWorked(testDB[collName].createIndex(spec, {name: 'badkp'}),
- 'failed to create index with key pattern' + tojson(spec));
-
- });
- }
+'use strict';
+
+load('jstests/libs/get_index_helpers.js');
+load('jstests/multiVersion/libs/multi_rs.js');
+load('jstests/multiVersion/libs/verify_versions.js');
+
+// Setup the dbpath for this test.
+const dbpath = MongoRunner.dataPath + 'major_version_upgrade';
+resetDbpath(dbpath);
+
+// We set noCleanData to true in order to preserve the data files between iterations.
+const defaultOptions = {
+ dbpath: dbpath,
+ noCleanData: true,
+};
+
+// This lists all supported releases and needs to be kept up to date as versions are added and
+// dropped.
+// TODO SERVER-26792: In the future, we should have a common place from which both the
+// multiversion setup procedure and this test get information about supported major releases.
+const versions = [
+ {binVersion: '3.2', testCollection: 'three_two'},
+ {binVersion: '3.4', featureCompatibilityVersion: '3.4', testCollection: 'three_four'},
+ {binVersion: '3.6', featureCompatibilityVersion: '3.6', testCollection: 'three_six'},
+ {binVersion: '4.0', featureCompatibilityVersion: '4.0', testCollection: 'four_zero'},
+ {binVersion: 'last-stable', testCollection: 'last_stable'},
+ {binVersion: 'latest', featureCompatibilityVersion: '4.2', testCollection: 'latest'},
+];
+
+// These key patterns are considered valid for existing v:0 and v:1 indexes, but are considered
+// invalid for v:2 indexes or new index builds.
+var invalidIndexSpecs = [
+ {a: 0},
+ {a: NaN},
+ {a: true},
+];
+
+// When running the oldest supported version, insert indexes with bad key patterns.
+function insertBadIndexes(testDB) {
+ invalidIndexSpecs.forEach((spec) => {
+ // Generate a unique and identifiable collection name.
+ let collName = 'bad_index_' + tojson(spec.a);
+ assert.commandWorked(testDB[collName].createIndex(spec, {name: 'badkp'}),
+ 'failed to create index with key pattern' + tojson(spec));
+ });
+}
+
+// When running the newest version, check that the indexes with bad key patterns are readable.
+function validateBadIndexesStandalone(testDB) {
+ invalidIndexSpecs.forEach((spec) => {
+ // Generate a unique and identifiable collection name.
+ let collName = 'bad_index_' + tojson(spec.a);
+ let indexSpec = GetIndexHelpers.findByName(testDB[collName].getIndexes(), 'badkp');
+ assert.neq(null, indexSpec, 'could not find index "badkp"');
+ assert.eq(1, indexSpec.v, tojson(indexSpec));
+
+ // Collection compact command should succeed, despite the presence of the v:1 index
+ // which would fail v:2 validation rules.
+ assert.commandWorked(testDB.runCommand({compact: collName}));
+
+ // reIndex will fail because when featureCompatibilityVersion>=3.4, reIndex
+ // automatically upgrades v=1 indexes to v=2.
+ assert.commandFailed(testDB[collName].reIndex());
+
+ // reIndex should not drop the index.
+ indexSpec = GetIndexHelpers.findByName(testDB[collName].getIndexes(), 'badkp');
+ assert.neq(null, indexSpec, 'could not find index "badkp" after reIndex');
+ assert.eq(1, indexSpec.v, tojson(indexSpec));
+
+ // A query that hints the index should succeed.
+ assert.commandWorked(testDB.runCommand({find: collName, hint: "badkp"}));
+
+ // Newly created indexes will do stricter validation and should fail if the
+ // key pattern is invalid.
+ assert.commandWorked(testDB[collName].dropIndexes());
+ assert.commandFailedWithCode(
+ testDB[collName].createIndex(spec),
+ ErrorCodes.CannotCreateIndex,
+ 'creating index with key pattern ' + tojson(spec) + ' unexpectedly succeeded');
+ // Index build should also fail if v:1 or v:2 is explicitly requested.
+ assert.commandFailedWithCode(
+ testDB[collName].createIndex(spec, {v: 1}),
+ ErrorCodes.CannotCreateIndex,
+ 'creating index with key pattern ' + tojson(spec) + ' unexpectedly succeeded');
+ assert.commandFailedWithCode(
+ testDB[collName].createIndex(spec, {v: 2}),
+ ErrorCodes.CannotCreateIndex,
+ 'creating index with key pattern ' + tojson(spec) + ' unexpectedly succeeded');
+ });
+}
+
+// Check that secondary nodes have the v:1 indexes.
+function validateBadIndexesSecondary(testDB) {
+ invalidIndexSpecs.forEach((spec) => {
+ // Generate a unique and identifiable collection name.
+ let collName = 'bad_index_' + tojson(spec.a);
+ // Verify that the secondary has the v:1 index.
+ let indexSpec = GetIndexHelpers.findByName(testDB[collName].getIndexes(), 'badkp');
+ assert.neq(null, indexSpec, 'could not find index "badkp"');
+ assert.eq(1, indexSpec.v, tojson(indexSpec));
+ });
+}
+
+// Standalone
+// Iterate from earliest to latest versions specified in the versions list, and follow the steps
+// outlined at the top of this test file.
+let authSchemaUpgraded = false;
+for (let i = 0; i < versions.length; i++) {
+ let version = versions[i];
+ let mongodOptions = Object.extend({binVersion: version.binVersion}, defaultOptions);
+
+ // Start a mongod with specified version.
+ let conn = MongoRunner.runMongod(mongodOptions);
+
+ if ((conn === null) && (i > 0) && !authSchemaUpgraded) {
+ // As of 4.0, mongod will refuse to start up with authSchema 3
+ // until the schema has been upgraded.
+ // Step back a version (to 3.6) in order to perform the upgrade,
+ // Then try startuing 4.0 again.
+ print(
+ "Failed starting mongod, going to try upgrading the auth schema on the prior version");
+ conn = MongoRunner.runMongod(
+ Object.extend({binVersion: versions[i - 1].binVersion}, defaultOptions));
+ assert.neq(null,
+ conn,
+ 'mongod was previously able to start with version ' +
+ tojson(version.binVersion) + " but now can't");
+ assert.commandWorked(conn.getDB('admin').runCommand({authSchemaUpgrade: 1}));
+ MongoRunner.stopMongod(conn);
- // When running the newest version, check that the indexes with bad key patterns are readable.
- function validateBadIndexesStandalone(testDB) {
- invalidIndexSpecs.forEach((spec) => {
- // Generate a unique and identifiable collection name.
- let collName = 'bad_index_' + tojson(spec.a);
- let indexSpec = GetIndexHelpers.findByName(testDB[collName].getIndexes(), 'badkp');
- assert.neq(null, indexSpec, 'could not find index "badkp"');
- assert.eq(1, indexSpec.v, tojson(indexSpec));
-
- // Collection compact command should succeed, despite the presence of the v:1 index
- // which would fail v:2 validation rules.
- assert.commandWorked(testDB.runCommand({compact: collName}));
-
- // reIndex will fail because when featureCompatibilityVersion>=3.4, reIndex
- // automatically upgrades v=1 indexes to v=2.
- assert.commandFailed(testDB[collName].reIndex());
-
- // reIndex should not drop the index.
- indexSpec = GetIndexHelpers.findByName(testDB[collName].getIndexes(), 'badkp');
- assert.neq(null, indexSpec, 'could not find index "badkp" after reIndex');
- assert.eq(1, indexSpec.v, tojson(indexSpec));
-
- // A query that hints the index should succeed.
- assert.commandWorked(testDB.runCommand({find: collName, hint: "badkp"}));
-
- // Newly created indexes will do stricter validation and should fail if the
- // key pattern is invalid.
- assert.commandWorked(testDB[collName].dropIndexes());
- assert.commandFailedWithCode(
- testDB[collName].createIndex(spec),
- ErrorCodes.CannotCreateIndex,
- 'creating index with key pattern ' + tojson(spec) + ' unexpectedly succeeded');
- // Index build should also fail if v:1 or v:2 is explicitly requested.
- assert.commandFailedWithCode(
- testDB[collName].createIndex(spec, {v: 1}),
- ErrorCodes.CannotCreateIndex,
- 'creating index with key pattern ' + tojson(spec) + ' unexpectedly succeeded');
- assert.commandFailedWithCode(
- testDB[collName].createIndex(spec, {v: 2}),
- ErrorCodes.CannotCreateIndex,
- 'creating index with key pattern ' + tojson(spec) + ' unexpectedly succeeded');
-
- });
+ authSchemaUpgraded = true;
+ conn = MongoRunner.runMongod(mongodOptions);
}
- // Check that secondary nodes have the v:1 indexes.
- function validateBadIndexesSecondary(testDB) {
- invalidIndexSpecs.forEach((spec) => {
- // Generate a unique and identifiable collection name.
- let collName = 'bad_index_' + tojson(spec.a);
- // Verify that the secondary has the v:1 index.
- let indexSpec = GetIndexHelpers.findByName(testDB[collName].getIndexes(), 'badkp');
- assert.neq(null, indexSpec, 'could not find index "badkp"');
- assert.eq(1, indexSpec.v, tojson(indexSpec));
- });
+ assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(mongodOptions));
+ assert.binVersion(conn, version.binVersion);
+
+ if ((i === 0) && (version.binVersion <= 3.6)) {
+ // Simulate coming from a <= 2.6 installation where MONGODB-CR was the default/only
+ // authentication mechanism. Eventually, the upgrade process will fail (above) when
+ // running on 4.0 where support for MONGODB-CR has been removed.
+ conn.getDB('admin').system.version.save({"_id": "authSchema", "currentVersion": 3});
}
- // Standalone
- // Iterate from earliest to latest versions specified in the versions list, and follow the steps
- // outlined at the top of this test file.
- let authSchemaUpgraded = false;
- for (let i = 0; i < versions.length; i++) {
- let version = versions[i];
- let mongodOptions = Object.extend({binVersion: version.binVersion}, defaultOptions);
-
- // Start a mongod with specified version.
- let conn = MongoRunner.runMongod(mongodOptions);
-
- if ((conn === null) && (i > 0) && !authSchemaUpgraded) {
- // As of 4.0, mongod will refuse to start up with authSchema 3
- // until the schema has been upgraded.
- // Step back a version (to 3.6) in order to perform the upgrade,
- // Then try startuing 4.0 again.
- print(
- "Failed starting mongod, going to try upgrading the auth schema on the prior version");
- conn = MongoRunner.runMongod(
- Object.extend({binVersion: versions[i - 1].binVersion}, defaultOptions));
- assert.neq(null,
- conn,
- 'mongod was previously able to start with version ' +
- tojson(version.binVersion) + " but now can't");
- assert.commandWorked(conn.getDB('admin').runCommand({authSchemaUpgrade: 1}));
- MongoRunner.stopMongod(conn);
-
- authSchemaUpgraded = true;
- conn = MongoRunner.runMongod(mongodOptions);
- }
+ // Connect to the 'test' database.
+ let testDB = conn.getDB('test');
+ // Verify that the data and indices from previous iterations are still accessible.
+ for (let j = 0; j < i; j++) {
+ let oldVersionCollection = versions[j].testCollection;
+ assert.eq(1,
+ testDB[oldVersionCollection].count(),
+ `data from ${oldVersionCollection} should be available; options: ` +
+ tojson(mongodOptions));
assert.neq(
- null, conn, 'mongod was unable to start up with options: ' + tojson(mongodOptions));
- assert.binVersion(conn, version.binVersion);
-
- if ((i === 0) && (version.binVersion <= 3.6)) {
- // Simulate coming from a <= 2.6 installation where MONGODB-CR was the default/only
- // authentication mechanism. Eventually, the upgrade process will fail (above) when
- // running on 4.0 where support for MONGODB-CR has been removed.
- conn.getDB('admin').system.version.save({"_id": "authSchema", "currentVersion": 3});
- }
+ null,
+ GetIndexHelpers.findByKeyPattern(testDB[oldVersionCollection].getIndexes(), {a: 1}),
+ `index from ${oldVersionCollection} should be available; options: ` +
+ tojson(mongodOptions));
+ }
- // Connect to the 'test' database.
- let testDB = conn.getDB('test');
-
- // Verify that the data and indices from previous iterations are still accessible.
- for (let j = 0; j < i; j++) {
- let oldVersionCollection = versions[j].testCollection;
- assert.eq(1,
- testDB[oldVersionCollection].count(),
- `data from ${oldVersionCollection} should be available; options: ` +
- tojson(mongodOptions));
- assert.neq(
- null,
- GetIndexHelpers.findByKeyPattern(testDB[oldVersionCollection].getIndexes(), {a: 1}),
- `index from ${oldVersionCollection} should be available; options: ` +
- tojson(mongodOptions));
- }
+ // Create a new collection.
+ assert.commandWorked(testDB.createCollection(version.testCollection));
+
+ // Insert a document into the new collection.
+ assert.writeOK(testDB[version.testCollection].insert({a: 1}));
+ assert.eq(1,
+ testDB[version.testCollection].count(),
+ `mongo should have inserted 1 document into collection ${version.testCollection}; ` +
+ 'options: ' + tojson(mongodOptions));
+
+ // Create an index on the new collection.
+ assert.commandWorked(testDB[version.testCollection].createIndex({a: 1}));
+
+ if (i === 0) {
+ // We're on the earliest version, insert indexes with bad key patterns.
+ insertBadIndexes(testDB);
+ } else if (i === versions.length - 1) {
+ // We're on the latest version, check bad indexes are still readable.
+ validateBadIndexesStandalone(testDB);
+ }
- // Create a new collection.
- assert.commandWorked(testDB.createCollection(version.testCollection));
-
- // Insert a document into the new collection.
- assert.writeOK(testDB[version.testCollection].insert({a: 1}));
- assert.eq(
- 1,
- testDB[version.testCollection].count(),
- `mongo should have inserted 1 document into collection ${version.testCollection}; ` +
- 'options: ' + tojson(mongodOptions));
-
- // Create an index on the new collection.
- assert.commandWorked(testDB[version.testCollection].createIndex({a: 1}));
-
- if (i === 0) {
- // We're on the earliest version, insert indexes with bad key patterns.
- insertBadIndexes(testDB);
- } else if (i === versions.length - 1) {
- // We're on the latest version, check bad indexes are still readable.
- validateBadIndexesStandalone(testDB);
- }
+ // Set the appropriate featureCompatibilityVersion upon upgrade, if applicable.
+ if (version.hasOwnProperty('featureCompatibilityVersion')) {
+ let adminDB = conn.getDB("admin");
+ assert.commandWorked(adminDB.runCommand(
+ {"setFeatureCompatibilityVersion": version.featureCompatibilityVersion}));
+ }
- // Set the appropriate featureCompatibilityVersion upon upgrade, if applicable.
- if (version.hasOwnProperty('featureCompatibilityVersion')) {
- let adminDB = conn.getDB("admin");
- assert.commandWorked(adminDB.runCommand(
- {"setFeatureCompatibilityVersion": version.featureCompatibilityVersion}));
+ // Shutdown the current mongod.
+ MongoRunner.stopMongod(conn);
+}
+
+// Replica Sets
+// Setup the ReplSetTest object.
+let nodes = {
+ n1: {binVersion: versions[0].binVersion},
+ n2: {binVersion: versions[0].binVersion},
+ n3: {binVersion: versions[0].binVersion},
+};
+let rst = new ReplSetTest({nodes});
+
+// Start up and initiate the replica set.
+rst.startSet();
+rst.initiate();
+
+// Iterate from earliest to latest versions specified in the versions list, and follow the steps
+// outlined at the top of this test file.
+for (let i = 0; i < versions.length; i++) {
+ let version = versions[i];
+
+ // Connect to the primary running the old version to ensure that the test can insert and
+ // create indices.
+ let primary = rst.getPrimary();
+
+ // Upgrade the secondary nodes first.
+ rst.upgradeSecondaries(primary, {binVersion: version.binVersion});
+
+ assert.neq(null,
+ primary,
+ `replica set was unable to start up after upgrading secondaries to version: ${
+ version.binVersion}`);
+
+ // Connect to the 'test' database.
+ let testDB = primary.getDB('test');
+ assert.commandWorked(testDB.createCollection(version.testCollection));
+ assert.writeOK(testDB[version.testCollection].insert({a: 1}));
+ assert.eq(1,
+ testDB[version.testCollection].count(),
+ `mongo should have inserted 1 document into collection ${version.testCollection}; ` +
+ 'nodes: ' + tojson(nodes));
+
+ // Create an index on the new collection.
+ assert.commandWorked(testDB[version.testCollection].createIndex({a: 1}));
+
+ if (i === 0) {
+ // We're on the earliest version, insert indexes with bad key patterns.
+ insertBadIndexes(testDB);
+ } else if (i === versions.length - 1) {
+ // We're on the latest version, check bad indexes are still readable.
+ for (let secondary of rst.getSecondaries()) {
+ validateBadIndexesSecondary(secondary.getDB('test'));
}
-
- // Shutdown the current mongod.
- MongoRunner.stopMongod(conn);
}
- // Replica Sets
- // Setup the ReplSetTest object.
- let nodes = {
- n1: {binVersion: versions[0].binVersion},
- n2: {binVersion: versions[0].binVersion},
- n3: {binVersion: versions[0].binVersion},
- };
- let rst = new ReplSetTest({nodes});
-
- // Start up and initiate the replica set.
- rst.startSet();
- rst.initiate();
-
- // Iterate from earliest to latest versions specified in the versions list, and follow the steps
- // outlined at the top of this test file.
- for (let i = 0; i < versions.length; i++) {
- let version = versions[i];
-
- // Connect to the primary running the old version to ensure that the test can insert and
- // create indices.
- let primary = rst.getPrimary();
-
- // Upgrade the secondary nodes first.
- rst.upgradeSecondaries(primary, {binVersion: version.binVersion});
-
+ // Do the index creation and insertion again after upgrading the primary node.
+ primary = rst.upgradePrimary(primary, {binVersion: version.binVersion});
+ assert.neq(
+ null, primary, `replica set was unable to start up with version: ${version.binVersion}`);
+ assert.binVersion(primary, version.binVersion);
+ testDB = primary.getDB('test');
+
+ assert.writeOK(testDB[version.testCollection].insert({b: 1}));
+ assert.eq(2,
+ testDB[version.testCollection].count(),
+ `mongo should have inserted 2 documents into collection ${version.testCollection}; ` +
+ 'nodes: ' + tojson(nodes));
+
+ assert.commandWorked(testDB[version.testCollection].createIndex({b: 1}));
+
+ // Verify that all previously inserted data and indices are accessible.
+ for (let j = 0; j <= i; j++) {
+ let oldVersionCollection = versions[j].testCollection;
+ assert.eq(2,
+ testDB[oldVersionCollection].count(),
+ `data from ${oldVersionCollection} should be available; nodes: ${tojson(nodes)}`);
assert.neq(
null,
- primary,
- `replica set was unable to start up after upgrading secondaries to version: ${version.binVersion}`);
-
- // Connect to the 'test' database.
- let testDB = primary.getDB('test');
- assert.commandWorked(testDB.createCollection(version.testCollection));
- assert.writeOK(testDB[version.testCollection].insert({a: 1}));
- assert.eq(
- 1,
- testDB[version.testCollection].count(),
- `mongo should have inserted 1 document into collection ${version.testCollection}; ` +
- 'nodes: ' + tojson(nodes));
-
- // Create an index on the new collection.
- assert.commandWorked(testDB[version.testCollection].createIndex({a: 1}));
-
- if (i === 0) {
- // We're on the earliest version, insert indexes with bad key patterns.
- insertBadIndexes(testDB);
- } else if (i === versions.length - 1) {
- // We're on the latest version, check bad indexes are still readable.
- for (let secondary of rst.getSecondaries()) {
- validateBadIndexesSecondary(secondary.getDB('test'));
- }
- }
-
- // Do the index creation and insertion again after upgrading the primary node.
- primary = rst.upgradePrimary(primary, {binVersion: version.binVersion});
- assert.neq(null,
- primary,
- `replica set was unable to start up with version: ${version.binVersion}`);
- assert.binVersion(primary, version.binVersion);
- testDB = primary.getDB('test');
-
- assert.writeOK(testDB[version.testCollection].insert({b: 1}));
- assert.eq(
- 2,
- testDB[version.testCollection].count(),
- `mongo should have inserted 2 documents into collection ${version.testCollection}; ` +
- 'nodes: ' + tojson(nodes));
-
- assert.commandWorked(testDB[version.testCollection].createIndex({b: 1}));
-
- // Verify that all previously inserted data and indices are accessible.
- for (let j = 0; j <= i; j++) {
- let oldVersionCollection = versions[j].testCollection;
- assert.eq(
- 2,
- testDB[oldVersionCollection].count(),
- `data from ${oldVersionCollection} should be available; nodes: ${tojson(nodes)}`);
- assert.neq(
- null,
- GetIndexHelpers.findByKeyPattern(testDB[oldVersionCollection].getIndexes(), {a: 1}),
- `index from ${oldVersionCollection} should be available; nodes: ${tojson(nodes)}`);
- assert.neq(
- null,
- GetIndexHelpers.findByKeyPattern(testDB[oldVersionCollection].getIndexes(), {b: 1}),
- `index from ${oldVersionCollection} should be available; nodes: ${tojson(nodes)}`);
- }
+ GetIndexHelpers.findByKeyPattern(testDB[oldVersionCollection].getIndexes(), {a: 1}),
+ `index from ${oldVersionCollection} should be available; nodes: ${tojson(nodes)}`);
+ assert.neq(
+ null,
+ GetIndexHelpers.findByKeyPattern(testDB[oldVersionCollection].getIndexes(), {b: 1}),
+ `index from ${oldVersionCollection} should be available; nodes: ${tojson(nodes)}`);
+ }
- // Set the appropriate featureCompatibilityVersion upon upgrade, if applicable.
- if (version.hasOwnProperty('featureCompatibilityVersion')) {
- let primaryAdminDB = primary.getDB("admin");
- assert.commandWorked(primaryAdminDB.runCommand(
- {setFeatureCompatibilityVersion: version.featureCompatibilityVersion}));
- rst.awaitReplication();
- }
+ // Set the appropriate featureCompatibilityVersion upon upgrade, if applicable.
+ if (version.hasOwnProperty('featureCompatibilityVersion')) {
+ let primaryAdminDB = primary.getDB("admin");
+ assert.commandWorked(primaryAdminDB.runCommand(
+ {setFeatureCompatibilityVersion: version.featureCompatibilityVersion}));
+ rst.awaitReplication();
}
+}
- // Stop the replica set.
- rst.stopSet();
+// Stop the replica set.
+rst.stopSet();
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/migration_between_mixed_FCV_mixed_version_mongods.js b/jstests/multiVersion/genericSetFCVUsage/migration_between_mixed_FCV_mixed_version_mongods.js
index fe78152e548..b2b2aee05ad 100644
--- a/jstests/multiVersion/genericSetFCVUsage/migration_between_mixed_FCV_mixed_version_mongods.js
+++ b/jstests/multiVersion/genericSetFCVUsage/migration_between_mixed_FCV_mixed_version_mongods.js
@@ -4,38 +4,37 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/feature_compatibility_version.js");
-
- let st = new ShardingTest({
- shards: [{binVersion: "latest"}, {binVersion: "last-stable"}],
- mongos: {binVersion: "latest"},
- other: {shardAsReplicaSet: false},
- });
-
- let testDB = st.s.getDB("test");
-
- // Create a sharded collection with primary shard 0.
- assert.commandWorked(st.s.adminCommand({enableSharding: testDB.getName()}));
- st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
- assert.commandWorked(
- st.s.adminCommand({shardCollection: testDB.coll.getFullName(), key: {a: 1}}));
-
- // Set the featureCompatibilityVersion to latestFCV. This will fail because the
- // featureCompatibilityVersion cannot be set to latestFCV on shard 1, but it will set the
- // featureCompatibilityVersion to latestFCV on shard 0.
- assert.commandFailed(st.s.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV, latestFCV);
- checkFCV(st.shard0.getDB("admin"), latestFCV);
- checkFCV(st.shard1.getDB("admin"), lastStableFCV);
-
- // It is not possible to move a chunk from a latestFCV shard to a last-stable binary version
- // shard.
- assert.commandFailedWithCode(
- st.s.adminCommand(
- {moveChunk: testDB.coll.getFullName(), find: {a: 1}, to: st.shard1.shardName}),
- ErrorCodes.IncompatibleServerVersion);
-
- st.stop();
+"use strict";
+
+load("jstests/libs/feature_compatibility_version.js");
+
+let st = new ShardingTest({
+ shards: [{binVersion: "latest"}, {binVersion: "last-stable"}],
+ mongos: {binVersion: "latest"},
+ other: {shardAsReplicaSet: false},
+});
+
+let testDB = st.s.getDB("test");
+
+// Create a sharded collection with primary shard 0.
+assert.commandWorked(st.s.adminCommand({enableSharding: testDB.getName()}));
+st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({shardCollection: testDB.coll.getFullName(), key: {a: 1}}));
+
+// Set the featureCompatibilityVersion to latestFCV. This will fail because the
+// featureCompatibilityVersion cannot be set to latestFCV on shard 1, but it will set the
+// featureCompatibilityVersion to latestFCV on shard 0.
+assert.commandFailed(st.s.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV, latestFCV);
+checkFCV(st.shard0.getDB("admin"), latestFCV);
+checkFCV(st.shard1.getDB("admin"), lastStableFCV);
+
+// It is not possible to move a chunk from a latestFCV shard to a last-stable binary version
+// shard.
+assert.commandFailedWithCode(
+ st.s.adminCommand(
+ {moveChunk: testDB.coll.getFullName(), find: {a: 1}, to: st.shard1.shardName}),
+ ErrorCodes.IncompatibleServerVersion);
+
+st.stop();
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary.js b/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary.js
index 54e654c4dcb..48e710d330d 100644
--- a/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary.js
+++ b/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary.js
@@ -1,14 +1,14 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/replsets/libs/rename_across_dbs.js");
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/replsets/libs/rename_across_dbs.js");
- const nodes = [{binVersion: 'last-stable'}, {binVersion: 'latest'}, {}];
- const options = {
- nodes: nodes,
- setFeatureCompatibilityVersion: lastStableFCV,
- };
+const nodes = [{binVersion: 'last-stable'}, {binVersion: 'latest'}, {}];
+const options = {
+ nodes: nodes,
+ setFeatureCompatibilityVersion: lastStableFCV,
+};
- new RenameAcrossDatabasesTest(options).run();
+new RenameAcrossDatabasesTest(options).run();
}());
diff --git a/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary_drop_target.js b/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary_drop_target.js
index 4cdab3f5a61..a1e5869800d 100644
--- a/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary_drop_target.js
+++ b/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_primary_drop_target.js
@@ -1,15 +1,15 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/replsets/libs/rename_across_dbs.js");
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/replsets/libs/rename_across_dbs.js");
- const nodes = [{binVersion: 'last-stable'}, {binVersion: 'latest'}, {}];
- const options = {
- nodes: nodes,
- setFeatureCompatibilityVersion: lastStableFCV,
- dropTarget: true,
- };
+const nodes = [{binVersion: 'last-stable'}, {binVersion: 'latest'}, {}];
+const options = {
+ nodes: nodes,
+ setFeatureCompatibilityVersion: lastStableFCV,
+ dropTarget: true,
+};
- new RenameAcrossDatabasesTest(options).run();
+new RenameAcrossDatabasesTest(options).run();
}());
diff --git a/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary.js b/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary.js
index 3fd541a0ffc..2fb9c126ca6 100644
--- a/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary.js
+++ b/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary.js
@@ -1,14 +1,14 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/replsets/libs/rename_across_dbs.js");
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/replsets/libs/rename_across_dbs.js");
- const nodes = [{binVersion: 'latest'}, {binVersion: 'last-stable'}, {}];
- const options = {
- nodes: nodes,
- setFeatureCompatibilityVersion: lastStableFCV,
- };
+const nodes = [{binVersion: 'latest'}, {binVersion: 'last-stable'}, {}];
+const options = {
+ nodes: nodes,
+ setFeatureCompatibilityVersion: lastStableFCV,
+};
- new RenameAcrossDatabasesTest(options).run();
+new RenameAcrossDatabasesTest(options).run();
}());
diff --git a/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary_drop_target.js b/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary_drop_target.js
index 90ca1312db3..814fa096f47 100644
--- a/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary_drop_target.js
+++ b/jstests/multiVersion/genericSetFCVUsage/rename_across_dbs_last_stable_secondary_drop_target.js
@@ -1,15 +1,15 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/replsets/libs/rename_across_dbs.js");
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/replsets/libs/rename_across_dbs.js");
- const nodes = [{binVersion: 'latest'}, {binVersion: 'last-stable'}, {}];
- const options = {
- nodes: nodes,
- setFeatureCompatibilityVersion: lastStableFCV,
- dropTarget: true,
- };
+const nodes = [{binVersion: 'latest'}, {binVersion: 'last-stable'}, {}];
+const options = {
+ nodes: nodes,
+ setFeatureCompatibilityVersion: lastStableFCV,
+ dropTarget: true,
+};
- new RenameAcrossDatabasesTest(options).run();
+new RenameAcrossDatabasesTest(options).run();
}());
diff --git a/jstests/multiVersion/genericSetFCVUsage/repair_feature_compatibility_version.js b/jstests/multiVersion/genericSetFCVUsage/repair_feature_compatibility_version.js
index 8eeea0ed427..d37e40c5816 100644
--- a/jstests/multiVersion/genericSetFCVUsage/repair_feature_compatibility_version.js
+++ b/jstests/multiVersion/genericSetFCVUsage/repair_feature_compatibility_version.js
@@ -4,87 +4,82 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/feature_compatibility_version.js");
+load("jstests/libs/feature_compatibility_version.js");
- let dbpath = MongoRunner.dataPath + "feature_compatibility_version";
- resetDbpath(dbpath);
- let connection;
- let adminDB;
+let dbpath = MongoRunner.dataPath + "feature_compatibility_version";
+resetDbpath(dbpath);
+let connection;
+let adminDB;
- const latest = "latest";
+const latest = "latest";
- /**
- * Ensure that a mongod (without using --repair) fails to start up if there are non-local
- * collections and the FCV document in the admin database has been removed.
- *
- * The mongod has 'version' binary and is started up on 'dbpath'.
- */
- let doStartupFailTests = function(version, dbpath) {
- // Set up a mongod with an admin database but without a FCV document in the admin database.
- setupMissingFCVDoc(version, dbpath);
-
- // Now attempt to start up a new mongod without clearing the data files from 'dbpath', which
- // contain the admin database but are missing the FCV document. The mongod should fail to
- // start up if there is a non-local collection and the FCV document is missing.
- let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: version, noCleanData: true});
- assert.eq(
- null,
- conn,
- "expected mongod to fail when data files are present but no FCV document is found.");
- };
+/**
+ * Ensure that a mongod (without using --repair) fails to start up if there are non-local
+ * collections and the FCV document in the admin database has been removed.
+ *
+ * The mongod has 'version' binary and is started up on 'dbpath'.
+ */
+let doStartupFailTests = function(version, dbpath) {
+ // Set up a mongod with an admin database but without a FCV document in the admin database.
+ setupMissingFCVDoc(version, dbpath);
- /**
- * Starts up a mongod with binary 'version' on 'dbpath', then removes the FCV document from the
- * admin database and returns the mongod.
- */
- let setupMissingFCVDoc = function(version, dbpath) {
- let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: version});
- assert.neq(null,
- conn,
- "mongod was unable to start up with version=" + version + " and no data files");
- adminDB = conn.getDB("admin");
- removeFCVDocument(adminDB);
- MongoRunner.stopMongod(conn);
- return conn;
- };
+ // Now attempt to start up a new mongod without clearing the data files from 'dbpath', which
+ // contain the admin database but are missing the FCV document. The mongod should fail to
+ // start up if there is a non-local collection and the FCV document is missing.
+ let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: version, noCleanData: true});
+ assert.eq(null,
+ conn,
+ "expected mongod to fail when data files are present but no FCV document is found.");
+};
- // Check that start up without --repair fails if there is non-local DB data and the FCV doc was
- // deleted.
- doStartupFailTests(latest, dbpath);
+/**
+ * Starts up a mongod with binary 'version' on 'dbpath', then removes the FCV document from the
+ * admin database and returns the mongod.
+ */
+let setupMissingFCVDoc = function(version, dbpath) {
+ let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: version});
+ assert.neq(
+ null, conn, "mongod was unable to start up with version=" + version + " and no data files");
+ adminDB = conn.getDB("admin");
+ removeFCVDocument(adminDB);
+ MongoRunner.stopMongod(conn);
+ return conn;
+};
- // --repair can be used to restore a missing featureCompatibilityVersion document to an existing
- // admin database, as long as all collections have UUIDs. The FCV should be initialized to
- // lastStableFCV / downgraded FCV.
- connection = setupMissingFCVDoc(latest, dbpath);
- let returnCode =
- runMongoProgram("mongod", "--port", connection.port, "--repair", "--dbpath", dbpath);
- assert.eq(
- returnCode,
- 0,
- "expected mongod --repair to execute successfully when restoring a missing FCV document.");
+// Check that start up without --repair fails if there is non-local DB data and the FCV doc was
+// deleted.
+doStartupFailTests(latest, dbpath);
- connection = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest, noCleanData: true});
- assert.neq(null,
- connection,
- "mongod was unable to start up with version=" + latest + " and existing data files");
- adminDB = connection.getDB("admin");
- assert.eq(adminDB.system.version.findOne({_id: "featureCompatibilityVersion"}).version,
- lastStableFCV);
- assert.eq(adminDB.system.version.findOne({_id: "featureCompatibilityVersion"}).targetVersion,
- null);
- MongoRunner.stopMongod(connection);
+// --repair can be used to restore a missing featureCompatibilityVersion document to an existing
+// admin database, as long as all collections have UUIDs. The FCV should be initialized to
+// lastStableFCV / downgraded FCV.
+connection = setupMissingFCVDoc(latest, dbpath);
+let returnCode =
+ runMongoProgram("mongod", "--port", connection.port, "--repair", "--dbpath", dbpath);
+assert.eq(
+ returnCode,
+ 0,
+ "expected mongod --repair to execute successfully when restoring a missing FCV document.");
- // If the featureCompatibilityVersion document is present, --repair should just return success.
- connection = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest});
- assert.neq(null,
- connection,
- "mongod was unable to start up with version=" + latest + " and no data files");
- MongoRunner.stopMongod(connection);
+connection = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest, noCleanData: true});
+assert.neq(null,
+ connection,
+ "mongod was unable to start up with version=" + latest + " and existing data files");
+adminDB = connection.getDB("admin");
+assert.eq(adminDB.system.version.findOne({_id: "featureCompatibilityVersion"}).version,
+ lastStableFCV);
+assert.eq(adminDB.system.version.findOne({_id: "featureCompatibilityVersion"}).targetVersion, null);
+MongoRunner.stopMongod(connection);
- returnCode =
- runMongoProgram("mongod", "--port", connection.port, "--repair", "--dbpath", dbpath);
- assert.eq(returnCode, 0);
+// If the featureCompatibilityVersion document is present, --repair should just return success.
+connection = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest});
+assert.neq(null,
+ connection,
+ "mongod was unable to start up with version=" + latest + " and no data files");
+MongoRunner.stopMongod(connection);
+returnCode = runMongoProgram("mongod", "--port", connection.port, "--repair", "--dbpath", dbpath);
+assert.eq(returnCode, 0);
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/rollback_last_stable_to_latest.js b/jstests/multiVersion/genericSetFCVUsage/rollback_last_stable_to_latest.js
index d88d7452ed5..9d3c1a60172 100644
--- a/jstests/multiVersion/genericSetFCVUsage/rollback_last_stable_to_latest.js
+++ b/jstests/multiVersion/genericSetFCVUsage/rollback_last_stable_to_latest.js
@@ -4,9 +4,9 @@
*/
(function() {
- "use strict";
- load("jstests/multiVersion/libs/multiversion_rollback.js");
+"use strict";
+load("jstests/multiVersion/libs/multiversion_rollback.js");
- var testName = "multiversion_rollback_last_stable_to_latest";
- testMultiversionRollback(testName, "last-stable", "latest");
+var testName = "multiversion_rollback_last_stable_to_latest";
+testMultiversionRollback(testName, "last-stable", "latest");
})(); \ No newline at end of file
diff --git a/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_last_stable.js b/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_last_stable.js
index 951b6c91ae9..546065ecc5f 100644
--- a/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_last_stable.js
+++ b/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_last_stable.js
@@ -4,9 +4,9 @@
*/
(function() {
- "use strict";
- load("jstests/multiVersion/libs/multiversion_rollback.js");
+"use strict";
+load("jstests/multiVersion/libs/multiversion_rollback.js");
- var testName = "multiversion_rollback_latest_to_last_stable";
- testMultiversionRollback(testName, "latest", "last-stable");
+var testName = "multiversion_rollback_latest_to_last_stable";
+testMultiversionRollback(testName, "latest", "last-stable");
})(); \ No newline at end of file
diff --git a/jstests/multiVersion/genericSetFCVUsage/setFCV_collmod_transaction_rollback.js b/jstests/multiVersion/genericSetFCVUsage/setFCV_collmod_transaction_rollback.js
index 8a4b276d7b5..1edb0a61904 100644
--- a/jstests/multiVersion/genericSetFCVUsage/setFCV_collmod_transaction_rollback.js
+++ b/jstests/multiVersion/genericSetFCVUsage/setFCV_collmod_transaction_rollback.js
@@ -3,40 +3,40 @@
* collMod command.
*/
(function() {
- 'use strict';
+'use strict';
- load("jstests/libs/feature_compatibility_version.js");
+load("jstests/libs/feature_compatibility_version.js");
- let dbpath = MongoRunner.dataPath + "setFCV_collmod_transaction_rollback";
- resetDbpath(dbpath);
+let dbpath = MongoRunner.dataPath + "setFCV_collmod_transaction_rollback";
+resetDbpath(dbpath);
- const latest = "latest";
+const latest = "latest";
- let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest});
- assert.neq(
- null, conn, "mongod was unable to start up with version=" + latest + " and no data files");
- let adminDB = conn.getDB("admin");
+let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest});
+assert.neq(
+ null, conn, "mongod was unable to start up with version=" + latest + " and no data files");
+let adminDB = conn.getDB("admin");
- var collName = "collModTest";
- var coll = adminDB.getCollection(collName);
- var ttlBeforeRollback = 50;
+var collName = "collModTest";
+var coll = adminDB.getCollection(collName);
+var ttlBeforeRollback = 50;
- assert.commandWorked(
- coll.createIndex({b: 1}, {"name": "index1", "expireAfterSeconds": ttlBeforeRollback}));
+assert.commandWorked(
+ coll.createIndex({b: 1}, {"name": "index1", "expireAfterSeconds": ttlBeforeRollback}));
- // The failpoint causes an interrupt in the collMod's WriteUnitOfWork, thus triggers a rollback.
- assert.commandWorked(
- adminDB.adminCommand({configureFailPoint: "assertAfterIndexUpdate", mode: "alwaysOn"}));
+// The failpoint causes an interrupt in the collMod's WriteUnitOfWork, thus triggers a rollback.
+assert.commandWorked(
+ adminDB.adminCommand({configureFailPoint: "assertAfterIndexUpdate", mode: "alwaysOn"}));
- // Test transaction rollback after index ttl update collMod.
- assert.commandFailedWithCode(
- adminDB.runCommand(
- {"collMod": collName, "index": {"name": "index1", "expireAfterSeconds": 100}}),
- 50970);
+// Test transaction rollback after index ttl update collMod.
+assert.commandFailedWithCode(
+ adminDB.runCommand(
+ {"collMod": collName, "index": {"name": "index1", "expireAfterSeconds": 100}}),
+ 50970);
- const index = coll.getIndexes();
- var ttlAfterRollback = index[1].expireAfterSeconds;
- assert.eq(ttlAfterRollback, ttlBeforeRollback);
+const index = coll.getIndexes();
+var ttlAfterRollback = index[1].expireAfterSeconds;
+assert.eq(ttlAfterRollback, ttlBeforeRollback);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js b/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js
index a404d8d01e3..9c62bdb5ee1 100644
--- a/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js
+++ b/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js
@@ -10,382 +10,373 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
TestData.skipCheckDBHashes = true;
(function() {
- "use strict";
-
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/libs/get_index_helpers.js");
- load("jstests/libs/write_concern_util.js");
- load("jstests/replsets/rslib.js");
-
- let dbpath = MongoRunner.dataPath + "feature_compatibility_version";
- resetDbpath(dbpath);
- let res;
-
- const latest = "latest";
- const lastStable = "last-stable";
-
- //
- // Standalone tests.
- //
-
- let conn;
- let adminDB;
-
- // A 'latest' binary standalone should default to 'latestFCV'.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest});
- assert.neq(
- null, conn, "mongod was unable to start up with version=" + latest + " and no data files");
- adminDB = conn.getDB("admin");
- checkFCV(adminDB, latestFCV);
-
- jsTestLog("EXPECTED TO FAIL: featureCompatibilityVersion cannot be set to an invalid value");
- assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: 5}));
- assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: "3.2"}));
- assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: "4.4"}));
- assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: "3.4"}));
-
- jsTestLog("EXPECTED TO FAIL: setFeatureCompatibilityVersion rejects unknown fields.");
- assert.commandFailed(
- adminDB.runCommand({setFeatureCompatibilityVersion: lastStable, unknown: 1}));
-
- jsTestLog(
- "EXPECTED TO FAIL: setFeatureCompatibilityVersion can only be run on the admin database");
- assert.commandFailed(
- conn.getDB("test").runCommand({setFeatureCompatibilityVersion: lastStable}));
-
- jsTestLog("EXPECTED TO FAIL: featureCompatibilityVersion cannot be set via setParameter");
- assert.commandFailed(
- adminDB.runCommand({setParameter: 1, featureCompatibilityVersion: lastStable}));
-
- // setFeatureCompatibilityVersion fails to downgrade FCV if the write fails.
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCollectionUpdates",
- data: {collectionNS: "admin.system.version"},
- mode: "alwaysOn"
- }));
- jsTestLog(
- "EXPECTED TO FAIL: setFeatureCompatibilityVersion fails to downgrade FCV if the write fails");
- assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(adminDB, latestFCV);
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCollectionUpdates",
- data: {collectionNS: "admin.system.version"},
- mode: "off"
- }));
-
- // featureCompatibilityVersion can be downgraded to 'lastStableFCV'.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(adminDB, lastStableFCV);
-
- // setFeatureCompatibilityVersion fails to upgrade to 'latestFCV' if the write fails.
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCollectionUpdates",
- data: {collectionNS: "admin.system.version"},
- mode: "alwaysOn"
- }));
- jsTestLog(
- "EXPECTED TO FAIL: setFeatureCompatibilityVersion fails to upgrade to 'latestFCV' if the write fails");
- assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(adminDB, lastStableFCV);
- assert.commandWorked(adminDB.runCommand({
- configureFailPoint: "failCollectionUpdates",
- data: {collectionNS: "admin.system.version"},
- mode: "off"
- }));
-
- // featureCompatibilityVersion can be upgraded to 'latestFCV'.
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(adminDB, latestFCV);
-
- MongoRunner.stopMongod(conn);
-
- // featureCompatibilityVersion is durable.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest});
- assert.neq(
- null, conn, "mongod was unable to start up with version=" + latest + " and no data files");
- adminDB = conn.getDB("admin");
- checkFCV(adminDB, latestFCV);
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(adminDB, lastStableFCV);
- MongoRunner.stopMongod(conn);
-
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest, noCleanData: true});
- assert.neq(null,
- conn,
- "mongod was unable to start up with binary version=" + latest +
- " and last-stable featureCompatibilityVersion");
- adminDB = conn.getDB("admin");
- checkFCV(adminDB, lastStableFCV);
- MongoRunner.stopMongod(conn);
-
- // If you upgrade from 'lastStable' binary to 'latest' binary and have non-local databases, FCV
- // remains 'lastStableFCV'.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: lastStable});
- assert.neq(null,
- conn,
- "mongod was unable to start up with version=" + lastStable + " and no data files");
- assert.writeOK(conn.getDB("test").coll.insert({a: 5}));
- adminDB = conn.getDB("admin");
- checkFCV(adminDB, lastStableFCV);
- MongoRunner.stopMongod(conn);
-
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest, noCleanData: true});
- assert.neq(null,
- conn,
- "mongod was unable to start up with binary version=" + latest +
- " and featureCompatibilityVersion=" + lastStableFCV);
- adminDB = conn.getDB("admin");
- checkFCV(adminDB, lastStableFCV);
- MongoRunner.stopMongod(conn);
-
- // A 'latest' binary mongod started with --shardsvr and clean data files defaults to
- // 'lastStableFCV'.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest, shardsvr: ""});
- assert.neq(
- null, conn, "mongod was unable to start up with version=" + latest + " and no data files");
- adminDB = conn.getDB("admin");
- checkFCV(adminDB, lastStableFCV);
- MongoRunner.stopMongod(conn);
-
- //
- // Replica set tests.
- //
-
- let rst;
- let rstConns;
- let replSetConfig;
- let primaryAdminDB;
- let secondaryAdminDB;
-
- // 'latest' binary replica set.
- rst = new ReplSetTest({nodes: 2, nodeOpts: {binVersion: latest}});
- rst.startSet();
- rst.initiate();
- primaryAdminDB = rst.getPrimary().getDB("admin");
- secondaryAdminDB = rst.getSecondary().getDB("admin");
-
- // FCV should default to 'latestFCV' on primary and secondary in a 'latest' binary replica set.
- checkFCV(primaryAdminDB, latestFCV);
- rst.awaitReplication();
- checkFCV(secondaryAdminDB, latestFCV);
-
- // featureCompatibilityVersion propagates to secondary.
- assert.commandWorked(
- primaryAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(primaryAdminDB, lastStableFCV);
- rst.awaitReplication();
- checkFCV(secondaryAdminDB, lastStableFCV);
-
- jsTestLog("EXPECTED TO FAIL: setFeatureCompatibilityVersion cannot be run on secondary");
- assert.commandFailed(secondaryAdminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
-
- rst.stopSet();
-
- // A 'latest' binary secondary with a 'lastStable' binary primary will have 'lastStableFCV'
- rst = new ReplSetTest({nodes: [{binVersion: lastStable}, {binVersion: latest}]});
- rstConns = rst.startSet();
- replSetConfig = rst.getReplSetConfig();
- replSetConfig.members[1].priority = 0;
- replSetConfig.members[1].votes = 0;
- rst.initiate(replSetConfig);
- rst.waitForState(rstConns[0], ReplSetTest.State.PRIMARY);
- secondaryAdminDB = rst.getSecondary().getDB("admin");
- checkFCV(secondaryAdminDB, lastStableFCV);
- rst.stopSet();
-
- // Test that a 'lastStable' secondary can successfully perform initial sync from a 'latest'
- // primary with 'lastStableFCV'.
- rst = new ReplSetTest({
- nodes: [{binVersion: latest}, {binVersion: latest, rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- });
- rst.startSet();
- rst.initiate();
-
- let primary = rst.getPrimary();
- primaryAdminDB = primary.getDB("admin");
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-
- let secondary = rst.getSecondary();
-
- // The command should fail because wtimeout expires before a majority responds.
- stopServerReplication(secondary);
- res = primary.adminCommand(
- {setFeatureCompatibilityVersion: latestFCV, writeConcern: {wtimeout: 1000}});
- assert.eq(0, res.ok);
- assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
- restartServerReplication(secondary);
-
- // Because the failed setFCV command left the primary in an intermediary state, complete the
- // upgrade then reset back to the lastStable version.
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-
- secondary = rst.add({binVersion: lastStable});
- secondaryAdminDB = secondary.getDB("admin");
-
- // Rig the election so that the first node running latest version remains the primary after the
- // 'lastStable' secondary is added to the replica set.
- replSetConfig = rst.getReplSetConfig();
- replSetConfig.version = 4;
- replSetConfig.members[2].priority = 0;
- reconfig(rst, replSetConfig);
-
- // Verify that the 'lastStable' secondary successfully performed its initial sync.
- assert.writeOK(
- primaryAdminDB.getSiblingDB("test").coll.insert({awaitRepl: true}, {writeConcern: {w: 3}}));
-
- // Test that a 'lastStable' secondary can no longer replicate from the primary after the FCV is
- // upgraded to 'latestFCV'.
- // Note: the 'lastStable' secondary must stop replicating during the upgrade to ensure it has no
- // chance of seeing the 'upgrading to latest' message in the oplog, whereupon it would crash.
- stopServerReplication(secondary);
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- restartServerReplication(secondary);
- checkFCV(secondaryAdminDB, lastStableFCV);
- assert.writeOK(primaryAdminDB.getSiblingDB("test").coll.insert({shouldReplicate: false}));
- assert.eq(secondaryAdminDB.getSiblingDB("test").coll.find({shouldReplicate: false}).itcount(),
- 0);
- rst.stopSet();
-
- // Test idempotency for setFeatureCompatibilityVersion.
- rst = new ReplSetTest({nodes: 2, nodeOpts: {binVersion: latest}});
- rst.startSet();
- rst.initiate();
-
- // Set FCV to 'lastStableFCV' so that a 'lastStable' binary node can join the set.
- primary = rst.getPrimary();
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- rst.awaitReplication();
-
- // Add a 'lastStable' binary node to the set.
- secondary = rst.add({binVersion: lastStable});
- rst.reInitiate();
-
- // Ensure the 'lastStable' binary node succeeded its initial sync.
- assert.writeOK(primary.getDB("test").coll.insert({awaitRepl: true}, {writeConcern: {w: 3}}));
-
- // Run {setFCV: lastStableFCV}. This should be idempotent.
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- rst.awaitReplication();
-
- // Ensure the secondary is still running.
- rst.stopSet();
-
- //
- // Sharding tests.
- //
-
- let st;
- let mongosAdminDB;
- let configPrimaryAdminDB;
- let shardPrimaryAdminDB;
-
- // A 'latest' binary cluster started with clean data files will set FCV to 'latestFCV'.
- st = new ShardingTest({
- shards: {rs0: {nodes: [{binVersion: latest}, {binVersion: latest}]}},
- other: {useBridge: true}
- });
- mongosAdminDB = st.s.getDB("admin");
- configPrimaryAdminDB = st.configRS.getPrimary().getDB("admin");
- shardPrimaryAdminDB = st.rs0.getPrimary().getDB("admin");
-
- checkFCV(configPrimaryAdminDB, latestFCV);
- checkFCV(shardPrimaryAdminDB, latestFCV);
-
- jsTestLog(
- "EXPECTED TO FAIL: featureCompatibilityVersion cannot be set to invalid value on mongos");
- assert.commandFailed(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: 5}));
- assert.commandFailed(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: "3.2"}));
- assert.commandFailed(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: "4.4"}));
-
- jsTestLog("EXPECTED TO FAIL: setFeatureCompatibilityVersion rejects unknown fields on mongos");
- assert.commandFailed(
- mongosAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV, unknown: 1}));
-
- jsTestLog(
- "EXPECTED TO FAIL: setFeatureCompatibilityVersion can only be run on the admin database on mongos");
- assert.commandFailed(
- st.s.getDB("test").runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-
- jsTestLog(
- "EXPECTED TO FAIL: featureCompatibilityVersion cannot be set via setParameter on mongos");
- assert.commandFailed(
- mongosAdminDB.runCommand({setParameter: 1, featureCompatibilityVersion: lastStableFCV}));
-
- // Prevent the shard primary from receiving messages from the config server primary. When we try
- // to set FCV to 'lastStableFCV', the command should fail because the shard cannot be contacted.
- st.rs0.getPrimary().discardMessagesFrom(st.configRS.getPrimary(), 1.0);
- jsTestLog(
- "EXPECTED TO FAIL: setFeatureCompatibilityVersion cannot be set because the shard primary is not reachable");
- assert.commandFailed(
- mongosAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV, maxTimeMS: 1000}));
- checkFCV(
- configPrimaryAdminDB, lastStableFCV, lastStableFCV /* indicates downgrade in progress */);
- st.rs0.getPrimary().discardMessagesFrom(st.configRS.getPrimary(), 0.0);
-
- // FCV can be set to 'lastStableFCV' on mongos.
- // This is run through assert.soon() because we've just caused a network interruption
- // by discarding messages in the bridge.
- assert.soon(function() {
- res = mongosAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV});
- if (res.ok == 0) {
- print("Failed to set feature compatibility version: " + tojson(res));
- return false;
- }
- return true;
- });
-
- // featureCompatibilityVersion propagates to config and shard.
- checkFCV(configPrimaryAdminDB, lastStableFCV);
- checkFCV(shardPrimaryAdminDB, lastStableFCV);
-
- // A 'latest' binary replica set started as a shard server defaults to 'lastStableFCV'.
- let latestShard = new ReplSetTest({
- name: "latestShard",
- nodes: [{binVersion: latest}, {binVersion: latest}],
- nodeOptions: {shardsvr: ""},
- useHostName: true
- });
- latestShard.startSet();
- latestShard.initiate();
- let latestShardPrimaryAdminDB = latestShard.getPrimary().getDB("admin");
- checkFCV(latestShardPrimaryAdminDB, lastStableFCV);
- assert.commandWorked(mongosAdminDB.runCommand({addShard: latestShard.getURL()}));
- checkFCV(latestShardPrimaryAdminDB, lastStableFCV);
-
- // FCV can be set to 'latestFCV' on mongos.
- assert.commandWorked(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(st.configRS.getPrimary().getDB("admin"), latestFCV);
- checkFCV(shardPrimaryAdminDB, latestFCV);
- checkFCV(latestShardPrimaryAdminDB, latestFCV);
-
- // Call ShardingTest.stop before shutting down latestShard, so that the UUID check in
- // ShardingTest.stop can talk to latestShard.
- st.stop();
- latestShard.stopSet();
-
- // Create cluster with a 'lastStable' binary mongos so that we can add 'lastStable' binary
- // shards.
- st = new ShardingTest({shards: 0, other: {mongosOptions: {binVersion: lastStable}}});
- mongosAdminDB = st.s.getDB("admin");
- configPrimaryAdminDB = st.configRS.getPrimary().getDB("admin");
- checkFCV(configPrimaryAdminDB, lastStableFCV);
-
- // Adding a 'lastStable' binary shard to a cluster with 'lastStableFCV' succeeds.
- let lastStableShard = new ReplSetTest({
- name: "lastStableShard",
- nodes: [{binVersion: lastStable}, {binVersion: lastStable}],
- nodeOptions: {shardsvr: ""},
- useHostName: true
- });
- lastStableShard.startSet();
- lastStableShard.initiate();
- assert.commandWorked(mongosAdminDB.runCommand({addShard: lastStableShard.getURL()}));
- checkFCV(lastStableShard.getPrimary().getDB("admin"), lastStableFCV);
-
- // call ShardingTest.stop before shutting down lastStableShard, so that the UUID check in
- // ShardingTest.stop can talk to lastStableShard.
- st.stop();
- lastStableShard.stopSet();
+"use strict";
+
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/libs/get_index_helpers.js");
+load("jstests/libs/write_concern_util.js");
+load("jstests/replsets/rslib.js");
+
+let dbpath = MongoRunner.dataPath + "feature_compatibility_version";
+resetDbpath(dbpath);
+let res;
+
+const latest = "latest";
+const lastStable = "last-stable";
+
+//
+// Standalone tests.
+//
+
+let conn;
+let adminDB;
+
+// A 'latest' binary standalone should default to 'latestFCV'.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest});
+assert.neq(
+ null, conn, "mongod was unable to start up with version=" + latest + " and no data files");
+adminDB = conn.getDB("admin");
+checkFCV(adminDB, latestFCV);
+
+jsTestLog("EXPECTED TO FAIL: featureCompatibilityVersion cannot be set to an invalid value");
+assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: 5}));
+assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: "3.2"}));
+assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: "4.4"}));
+assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: "3.4"}));
+
+jsTestLog("EXPECTED TO FAIL: setFeatureCompatibilityVersion rejects unknown fields.");
+assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: lastStable, unknown: 1}));
+
+jsTestLog("EXPECTED TO FAIL: setFeatureCompatibilityVersion can only be run on the admin database");
+assert.commandFailed(conn.getDB("test").runCommand({setFeatureCompatibilityVersion: lastStable}));
+
+jsTestLog("EXPECTED TO FAIL: featureCompatibilityVersion cannot be set via setParameter");
+assert.commandFailed(
+ adminDB.runCommand({setParameter: 1, featureCompatibilityVersion: lastStable}));
+
+// setFeatureCompatibilityVersion fails to downgrade FCV if the write fails.
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCollectionUpdates",
+ data: {collectionNS: "admin.system.version"},
+ mode: "alwaysOn"
+}));
+jsTestLog(
+ "EXPECTED TO FAIL: setFeatureCompatibilityVersion fails to downgrade FCV if the write fails");
+assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+checkFCV(adminDB, latestFCV);
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCollectionUpdates",
+ data: {collectionNS: "admin.system.version"},
+ mode: "off"
+}));
+
+// featureCompatibilityVersion can be downgraded to 'lastStableFCV'.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+checkFCV(adminDB, lastStableFCV);
+
+// setFeatureCompatibilityVersion fails to upgrade to 'latestFCV' if the write fails.
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCollectionUpdates",
+ data: {collectionNS: "admin.system.version"},
+ mode: "alwaysOn"
+}));
+jsTestLog(
+ "EXPECTED TO FAIL: setFeatureCompatibilityVersion fails to upgrade to 'latestFCV' if the write fails");
+assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
+checkFCV(adminDB, lastStableFCV);
+assert.commandWorked(adminDB.runCommand({
+ configureFailPoint: "failCollectionUpdates",
+ data: {collectionNS: "admin.system.version"},
+ mode: "off"
+}));
+
+// featureCompatibilityVersion can be upgraded to 'latestFCV'.
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
+checkFCV(adminDB, latestFCV);
+
+MongoRunner.stopMongod(conn);
+
+// featureCompatibilityVersion is durable.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest});
+assert.neq(
+ null, conn, "mongod was unable to start up with version=" + latest + " and no data files");
+adminDB = conn.getDB("admin");
+checkFCV(adminDB, latestFCV);
+assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+checkFCV(adminDB, lastStableFCV);
+MongoRunner.stopMongod(conn);
+
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest, noCleanData: true});
+assert.neq(null,
+ conn,
+ "mongod was unable to start up with binary version=" + latest +
+ " and last-stable featureCompatibilityVersion");
+adminDB = conn.getDB("admin");
+checkFCV(adminDB, lastStableFCV);
+MongoRunner.stopMongod(conn);
+
+// If you upgrade from 'lastStable' binary to 'latest' binary and have non-local databases, FCV
+// remains 'lastStableFCV'.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: lastStable});
+assert.neq(
+ null, conn, "mongod was unable to start up with version=" + lastStable + " and no data files");
+assert.writeOK(conn.getDB("test").coll.insert({a: 5}));
+adminDB = conn.getDB("admin");
+checkFCV(adminDB, lastStableFCV);
+MongoRunner.stopMongod(conn);
+
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest, noCleanData: true});
+assert.neq(null,
+ conn,
+ "mongod was unable to start up with binary version=" + latest +
+ " and featureCompatibilityVersion=" + lastStableFCV);
+adminDB = conn.getDB("admin");
+checkFCV(adminDB, lastStableFCV);
+MongoRunner.stopMongod(conn);
+
+// A 'latest' binary mongod started with --shardsvr and clean data files defaults to
+// 'lastStableFCV'.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latest, shardsvr: ""});
+assert.neq(
+ null, conn, "mongod was unable to start up with version=" + latest + " and no data files");
+adminDB = conn.getDB("admin");
+checkFCV(adminDB, lastStableFCV);
+MongoRunner.stopMongod(conn);
+
+//
+// Replica set tests.
+//
+
+let rst;
+let rstConns;
+let replSetConfig;
+let primaryAdminDB;
+let secondaryAdminDB;
+
+// 'latest' binary replica set.
+rst = new ReplSetTest({nodes: 2, nodeOpts: {binVersion: latest}});
+rst.startSet();
+rst.initiate();
+primaryAdminDB = rst.getPrimary().getDB("admin");
+secondaryAdminDB = rst.getSecondary().getDB("admin");
+
+// FCV should default to 'latestFCV' on primary and secondary in a 'latest' binary replica set.
+checkFCV(primaryAdminDB, latestFCV);
+rst.awaitReplication();
+checkFCV(secondaryAdminDB, latestFCV);
+
+// featureCompatibilityVersion propagates to secondary.
+assert.commandWorked(primaryAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+checkFCV(primaryAdminDB, lastStableFCV);
+rst.awaitReplication();
+checkFCV(secondaryAdminDB, lastStableFCV);
+
+jsTestLog("EXPECTED TO FAIL: setFeatureCompatibilityVersion cannot be run on secondary");
+assert.commandFailed(secondaryAdminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
+
+rst.stopSet();
+
+// A 'latest' binary secondary with a 'lastStable' binary primary will have 'lastStableFCV'
+rst = new ReplSetTest({nodes: [{binVersion: lastStable}, {binVersion: latest}]});
+rstConns = rst.startSet();
+replSetConfig = rst.getReplSetConfig();
+replSetConfig.members[1].priority = 0;
+replSetConfig.members[1].votes = 0;
+rst.initiate(replSetConfig);
+rst.waitForState(rstConns[0], ReplSetTest.State.PRIMARY);
+secondaryAdminDB = rst.getSecondary().getDB("admin");
+checkFCV(secondaryAdminDB, lastStableFCV);
+rst.stopSet();
+
+// Test that a 'lastStable' secondary can successfully perform initial sync from a 'latest'
+// primary with 'lastStableFCV'.
+rst = new ReplSetTest({
+ nodes: [{binVersion: latest}, {binVersion: latest, rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: false}
+});
+rst.startSet();
+rst.initiate();
+
+let primary = rst.getPrimary();
+primaryAdminDB = primary.getDB("admin");
+assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+
+let secondary = rst.getSecondary();
+
+// The command should fail because wtimeout expires before a majority responds.
+stopServerReplication(secondary);
+res = primary.adminCommand(
+ {setFeatureCompatibilityVersion: latestFCV, writeConcern: {wtimeout: 1000}});
+assert.eq(0, res.ok);
+assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
+restartServerReplication(secondary);
+
+// Because the failed setFCV command left the primary in an intermediary state, complete the
+// upgrade then reset back to the lastStable version.
+assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+
+secondary = rst.add({binVersion: lastStable});
+secondaryAdminDB = secondary.getDB("admin");
+
+// Rig the election so that the first node running latest version remains the primary after the
+// 'lastStable' secondary is added to the replica set.
+replSetConfig = rst.getReplSetConfig();
+replSetConfig.version = 4;
+replSetConfig.members[2].priority = 0;
+reconfig(rst, replSetConfig);
+
+// Verify that the 'lastStable' secondary successfully performed its initial sync.
+assert.writeOK(
+ primaryAdminDB.getSiblingDB("test").coll.insert({awaitRepl: true}, {writeConcern: {w: 3}}));
+
+// Test that a 'lastStable' secondary can no longer replicate from the primary after the FCV is
+// upgraded to 'latestFCV'.
+// Note: the 'lastStable' secondary must stop replicating during the upgrade to ensure it has no
+// chance of seeing the 'upgrading to latest' message in the oplog, whereupon it would crash.
+stopServerReplication(secondary);
+assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+restartServerReplication(secondary);
+checkFCV(secondaryAdminDB, lastStableFCV);
+assert.writeOK(primaryAdminDB.getSiblingDB("test").coll.insert({shouldReplicate: false}));
+assert.eq(secondaryAdminDB.getSiblingDB("test").coll.find({shouldReplicate: false}).itcount(), 0);
+rst.stopSet();
+
+// Test idempotency for setFeatureCompatibilityVersion.
+rst = new ReplSetTest({nodes: 2, nodeOpts: {binVersion: latest}});
+rst.startSet();
+rst.initiate();
+
+// Set FCV to 'lastStableFCV' so that a 'lastStable' binary node can join the set.
+primary = rst.getPrimary();
+assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+rst.awaitReplication();
+
+// Add a 'lastStable' binary node to the set.
+secondary = rst.add({binVersion: lastStable});
+rst.reInitiate();
+
+// Ensure the 'lastStable' binary node succeeded its initial sync.
+assert.writeOK(primary.getDB("test").coll.insert({awaitRepl: true}, {writeConcern: {w: 3}}));
+
+// Run {setFCV: lastStableFCV}. This should be idempotent.
+assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+rst.awaitReplication();
+
+// Ensure the secondary is still running.
+rst.stopSet();
+
+//
+// Sharding tests.
+//
+
+let st;
+let mongosAdminDB;
+let configPrimaryAdminDB;
+let shardPrimaryAdminDB;
+
+// A 'latest' binary cluster started with clean data files will set FCV to 'latestFCV'.
+st = new ShardingTest({
+ shards: {rs0: {nodes: [{binVersion: latest}, {binVersion: latest}]}},
+ other: {useBridge: true}
+});
+mongosAdminDB = st.s.getDB("admin");
+configPrimaryAdminDB = st.configRS.getPrimary().getDB("admin");
+shardPrimaryAdminDB = st.rs0.getPrimary().getDB("admin");
+
+checkFCV(configPrimaryAdminDB, latestFCV);
+checkFCV(shardPrimaryAdminDB, latestFCV);
+
+jsTestLog("EXPECTED TO FAIL: featureCompatibilityVersion cannot be set to invalid value on mongos");
+assert.commandFailed(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: 5}));
+assert.commandFailed(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: "3.2"}));
+assert.commandFailed(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: "4.4"}));
+
+jsTestLog("EXPECTED TO FAIL: setFeatureCompatibilityVersion rejects unknown fields on mongos");
+assert.commandFailed(
+ mongosAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV, unknown: 1}));
+
+jsTestLog(
+ "EXPECTED TO FAIL: setFeatureCompatibilityVersion can only be run on the admin database on mongos");
+assert.commandFailed(
+ st.s.getDB("test").runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+
+jsTestLog("EXPECTED TO FAIL: featureCompatibilityVersion cannot be set via setParameter on mongos");
+assert.commandFailed(
+ mongosAdminDB.runCommand({setParameter: 1, featureCompatibilityVersion: lastStableFCV}));
+
+// Prevent the shard primary from receiving messages from the config server primary. When we try
+// to set FCV to 'lastStableFCV', the command should fail because the shard cannot be contacted.
+st.rs0.getPrimary().discardMessagesFrom(st.configRS.getPrimary(), 1.0);
+jsTestLog(
+ "EXPECTED TO FAIL: setFeatureCompatibilityVersion cannot be set because the shard primary is not reachable");
+assert.commandFailed(
+ mongosAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV, maxTimeMS: 1000}));
+checkFCV(configPrimaryAdminDB, lastStableFCV, lastStableFCV /* indicates downgrade in progress */);
+st.rs0.getPrimary().discardMessagesFrom(st.configRS.getPrimary(), 0.0);
+
+// FCV can be set to 'lastStableFCV' on mongos.
+// This is run through assert.soon() because we've just caused a network interruption
+// by discarding messages in the bridge.
+assert.soon(function() {
+ res = mongosAdminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV});
+ if (res.ok == 0) {
+ print("Failed to set feature compatibility version: " + tojson(res));
+ return false;
+ }
+ return true;
+});
+
+// featureCompatibilityVersion propagates to config and shard.
+checkFCV(configPrimaryAdminDB, lastStableFCV);
+checkFCV(shardPrimaryAdminDB, lastStableFCV);
+
+// A 'latest' binary replica set started as a shard server defaults to 'lastStableFCV'.
+let latestShard = new ReplSetTest({
+ name: "latestShard",
+ nodes: [{binVersion: latest}, {binVersion: latest}],
+ nodeOptions: {shardsvr: ""},
+ useHostName: true
+});
+latestShard.startSet();
+latestShard.initiate();
+let latestShardPrimaryAdminDB = latestShard.getPrimary().getDB("admin");
+checkFCV(latestShardPrimaryAdminDB, lastStableFCV);
+assert.commandWorked(mongosAdminDB.runCommand({addShard: latestShard.getURL()}));
+checkFCV(latestShardPrimaryAdminDB, lastStableFCV);
+
+// FCV can be set to 'latestFCV' on mongos.
+assert.commandWorked(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
+checkFCV(st.configRS.getPrimary().getDB("admin"), latestFCV);
+checkFCV(shardPrimaryAdminDB, latestFCV);
+checkFCV(latestShardPrimaryAdminDB, latestFCV);
+
+// Call ShardingTest.stop before shutting down latestShard, so that the UUID check in
+// ShardingTest.stop can talk to latestShard.
+st.stop();
+latestShard.stopSet();
+
+// Create cluster with a 'lastStable' binary mongos so that we can add 'lastStable' binary
+// shards.
+st = new ShardingTest({shards: 0, other: {mongosOptions: {binVersion: lastStable}}});
+mongosAdminDB = st.s.getDB("admin");
+configPrimaryAdminDB = st.configRS.getPrimary().getDB("admin");
+checkFCV(configPrimaryAdminDB, lastStableFCV);
+
+// Adding a 'lastStable' binary shard to a cluster with 'lastStableFCV' succeeds.
+let lastStableShard = new ReplSetTest({
+ name: "lastStableShard",
+ nodes: [{binVersion: lastStable}, {binVersion: lastStable}],
+ nodeOptions: {shardsvr: ""},
+ useHostName: true
+});
+lastStableShard.startSet();
+lastStableShard.initiate();
+assert.commandWorked(mongosAdminDB.runCommand({addShard: lastStableShard.getURL()}));
+checkFCV(lastStableShard.getPrimary().getDB("admin"), lastStableFCV);
+
+// call ShardingTest.stop before shutting down lastStableShard, so that the UUID check in
+// ShardingTest.stop can talk to lastStableShard.
+st.stop();
+lastStableShard.stopSet();
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_while_creating_collection.js b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_while_creating_collection.js
index ba675fbae67..e80e36eb624 100644
--- a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_while_creating_collection.js
+++ b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_while_creating_collection.js
@@ -2,75 +2,72 @@
* Tests that upgrade/downgrade works correctly even while creating a new collection.
*/
(function() {
- "use strict";
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/libs/parallel_shell_helpers.js");
+"use strict";
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/libs/parallel_shell_helpers.js");
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
- // Rig the election so that the first node is always primary and that modifying the
- // featureCompatibilityVersion document doesn't need to wait for data to replicate.
- var replSetConfig = rst.getReplSetConfig();
- replSetConfig.members[1].priority = 0;
- replSetConfig.members[1].votes = 0;
+// Rig the election so that the first node is always primary and that modifying the
+// featureCompatibilityVersion document doesn't need to wait for data to replicate.
+var replSetConfig = rst.getReplSetConfig();
+replSetConfig.members[1].priority = 0;
+replSetConfig.members[1].votes = 0;
- rst.initiate(replSetConfig);
+rst.initiate(replSetConfig);
- const primary = rst.getPrimary();
- const primaryDB = primary.getDB("test");
+const primary = rst.getPrimary();
+const primaryDB = primary.getDB("test");
- for (let versions
- of[{from: lastStableFCV, to: latestFCV}, {from: latestFCV, to: lastStableFCV}]) {
- jsTestLog("Changing FeatureCompatibilityVersion from " + versions.from + " to " +
- versions.to + " while creating a collection");
- assert.commandWorked(
- primaryDB.adminCommand({setFeatureCompatibilityVersion: versions.from}));
+for (let versions of [{from: lastStableFCV, to: latestFCV}, {from: latestFCV, to: lastStableFCV}]) {
+ jsTestLog("Changing FeatureCompatibilityVersion from " + versions.from + " to " + versions.to +
+ " while creating a collection");
+ assert.commandWorked(primaryDB.adminCommand({setFeatureCompatibilityVersion: versions.from}));
- assert.commandWorked(primaryDB.adminCommand(
- {configureFailPoint: "hangBeforeLoggingCreateCollection", mode: "alwaysOn"}));
- primaryDB.mycoll.drop();
+ assert.commandWorked(primaryDB.adminCommand(
+ {configureFailPoint: "hangBeforeLoggingCreateCollection", mode: "alwaysOn"}));
+ primaryDB.mycoll.drop();
- let awaitCreateCollection;
- let awaitUpgradeFCV;
+ let awaitCreateCollection;
+ let awaitUpgradeFCV;
- try {
- awaitCreateCollection = startParallelShell(function() {
- assert.commandWorked(db.runCommand({create: "mycoll"}));
- }, primary.port);
+ try {
+ awaitCreateCollection = startParallelShell(function() {
+ assert.commandWorked(db.runCommand({create: "mycoll"}));
+ }, primary.port);
- assert.soon(function() {
- return rawMongoProgramOutput().match("createCollection: test.mycoll");
- });
+ assert.soon(function() {
+ return rawMongoProgramOutput().match("createCollection: test.mycoll");
+ });
- awaitUpgradeFCV = startParallelShell(
- funWithArgs(function(version) {
- assert.commandWorked(
- db.adminCommand({setFeatureCompatibilityVersion: version}));
- }, versions.to), primary.port);
+ awaitUpgradeFCV = startParallelShell(
+ funWithArgs(function(version) {
+ assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: version}));
+ }, versions.to), primary.port);
- {
- let res;
- assert.soon(
- function() {
- res = assert.commandWorked(primaryDB.adminCommand(
- {getParameter: 1, featureCompatibilityVersion: 1}));
- return res.featureCompatibilityVersion.version === versions.from &&
- res.featureCompatibilityVersion.targetVersion === versions.new;
- },
- function() {
- return "targetVersion of featureCompatibilityVersion document wasn't " +
- "updated on primary: " + tojson(res);
- });
- }
- } finally {
- assert.commandWorked(primaryDB.adminCommand(
- {configureFailPoint: "hangBeforeLoggingCreateCollection", mode: "off"}));
+ {
+ let res;
+ assert.soon(
+ function() {
+ res = assert.commandWorked(
+ primaryDB.adminCommand({getParameter: 1, featureCompatibilityVersion: 1}));
+ return res.featureCompatibilityVersion.version === versions.from &&
+ res.featureCompatibilityVersion.targetVersion === versions.new;
+ },
+ function() {
+ return "targetVersion of featureCompatibilityVersion document wasn't " +
+ "updated on primary: " + tojson(res);
+ });
}
-
- awaitCreateCollection();
- awaitUpgradeFCV();
- rst.checkReplicatedDataHashes();
+ } finally {
+ assert.commandWorked(primaryDB.adminCommand(
+ {configureFailPoint: "hangBeforeLoggingCreateCollection", mode: "off"}));
}
- rst.stopSet();
+
+ awaitCreateCollection();
+ awaitUpgradeFCV();
+ rst.checkReplicatedDataHashes();
+}
+rst.stopSet();
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/view_definition_feature_compatibility_version.js b/jstests/multiVersion/genericSetFCVUsage/view_definition_feature_compatibility_version.js
index c9d23d4a228..be72a724237 100644
--- a/jstests/multiVersion/genericSetFCVUsage/view_definition_feature_compatibility_version.js
+++ b/jstests/multiVersion/genericSetFCVUsage/view_definition_feature_compatibility_version.js
@@ -7,175 +7,174 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/feature_compatibility_version.js");
-
- const testName = "view_definition_feature_compatibility_version_multiversion";
- const dbpath = MongoRunner.dataPath + testName;
-
- // The 'pipelinesWithNewFeatures' array should be populated with aggregation pipelines that use
- // aggregation features new in the latest version of mongod. This test ensures that a view
- // definition accepts the new aggregation feature when the feature compatibility version is the
- // latest version, and rejects it when the feature compatibility version is the last-stable
- // version.
- const pipelinesWithNewFeatures = [];
-
- let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest"});
- assert.neq(null, conn, "mongod was unable to start up");
- let testDB = conn.getDB(testName);
-
- // Explicitly set feature compatibility version to the latest version.
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-
- // Test that we are able to create a new view with any of the new features.
- pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert.commandWorked(
- testDB.createView("firstView" + i, "coll", pipe),
- `Expected to be able to create view with pipeline ${tojson(pipe)} while in FCV` +
- ` ${latestFCV}`));
-
+"use strict";
+
+load("jstests/libs/feature_compatibility_version.js");
+
+const testName = "view_definition_feature_compatibility_version_multiversion";
+const dbpath = MongoRunner.dataPath + testName;
+
+// The 'pipelinesWithNewFeatures' array should be populated with aggregation pipelines that use
+// aggregation features new in the latest version of mongod. This test ensures that a view
+// definition accepts the new aggregation feature when the feature compatibility version is the
+// latest version, and rejects it when the feature compatibility version is the last-stable
+// version.
+const pipelinesWithNewFeatures = [];
+
+let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest"});
+assert.neq(null, conn, "mongod was unable to start up");
+let testDB = conn.getDB(testName);
+
+// Explicitly set feature compatibility version to the latest version.
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+
+// Test that we are able to create a new view with any of the new features.
+pipelinesWithNewFeatures.forEach(
+ (pipe, i) => assert.commandWorked(
+ testDB.createView("firstView" + i, "coll", pipe),
+ `Expected to be able to create view with pipeline ${tojson(pipe)} while in FCV` +
+ ` ${latestFCV}`));
+
+// Test that we are able to create a new view with any of the new features.
+pipelinesWithNewFeatures.forEach(function(pipe, i) {
+ assert(testDB["firstView" + i].drop(), `Drop of view with pipeline ${tojson(pipe)} failed`);
+ assert.commandWorked(testDB.createView("firstView" + i, "coll", []));
+ assert.commandWorked(
+ testDB.runCommand({collMod: "firstView" + i, viewOn: "coll", pipeline: pipe}),
+ `Expected to be able to modify view to use pipeline ${tojson(pipe)} while in FCV` +
+ ` ${latestFCV}`);
+});
+
+// Create an empty view which we will attempt to update to use new query features while the
+// feature compatibility version is the last-stable version.
+assert.commandWorked(testDB.createView("emptyView", "coll", []));
+
+// Set the feature compatibility version to the last-stable version.
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+
+// Read against an existing view using new query features should not fail.
+pipelinesWithNewFeatures.forEach(
+ (pipe, i) => assert.commandWorked(testDB.runCommand({find: "firstView" + i}),
+ `Failed to query view with pipeline ${tojson(pipe)}`));
+
+// Trying to create a new view using new query features should fail.
+pipelinesWithNewFeatures.forEach(
+ (pipe, i) => assert.commandFailedWithCode(
+ testDB.createView("view_fail" + i, "coll", pipe),
+ ErrorCodes.QueryFeatureNotAllowed,
+ `Expected *not* to be able to create view with pipeline ${tojson(pipe)} while in FCV` +
+ ` ${lastStableFCV}`));
+
+// Trying to update existing view to use new query features should also fail.
+pipelinesWithNewFeatures.forEach(
+ (pipe, i) => assert.commandFailedWithCode(
+ testDB.runCommand({collMod: "emptyView", viewOn: "coll", pipeline: pipe}),
+ ErrorCodes.QueryFeatureNotAllowed,
+ `Expected *not* to be able to modify view to use pipeline ${tojson(pipe)} while in` +
+ `FCV ${lastStableFCV}`));
+
+MongoRunner.stopMongod(conn);
+
+// Starting up the last-stable version of mongod with new query features will succeed.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "last-stable", noCleanData: true});
+assert.neq(null,
+ conn,
+ `version ${MongoRunner.getBinVersionFor("last-stable")} of mongod was` +
+ " unable to start up");
+testDB = conn.getDB(testName);
+
+// Reads will fail against views with new query features when running the last-stable version.
+// Not checking the code returned on failure as it is not uniform across the various
+// 'pipeline' arguments tested.
+pipelinesWithNewFeatures.forEach(
+ (pipe, i) => assert.commandFailed(
+ testDB.runCommand({find: "firstView" + i}),
+ `Expected read against view with pipeline ${tojson(pipe)} to fail on version` +
+ ` ${MongoRunner.getBinVersionFor("last-stable")}`));
+
+// Test that a read against a view that does not contain new query features succeeds.
+assert.commandWorked(testDB.runCommand({find: "emptyView"}));
+
+MongoRunner.stopMongod(conn);
+
+// Starting up the latest version of mongod should succeed, even though the feature
+// compatibility version is still set to the last-stable version.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true});
+assert.neq(null, conn, "mongod was unable to start up");
+testDB = conn.getDB(testName);
+
+// Read against an existing view using new query features should not fail.
+pipelinesWithNewFeatures.forEach(
+ (pipe, i) => assert.commandWorked(testDB.runCommand({find: "firstView" + i}),
+ `Failed to query view with pipeline ${tojson(pipe)}`));
+
+// Set the feature compatibility version back to the latest version.
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+
+pipelinesWithNewFeatures.forEach(function(pipe, i) {
+ assert.commandWorked(testDB.runCommand({find: "firstView" + i}),
+ `Failed to query view with pipeline ${tojson(pipe)}`);
// Test that we are able to create a new view with any of the new features.
- pipelinesWithNewFeatures.forEach(function(pipe, i) {
- assert(testDB["firstView" + i].drop(), `Drop of view with pipeline ${tojson(pipe)} failed`);
- assert.commandWorked(testDB.createView("firstView" + i, "coll", []));
- assert.commandWorked(
- testDB.runCommand({collMod: "firstView" + i, viewOn: "coll", pipeline: pipe}),
- `Expected to be able to modify view to use pipeline ${tojson(pipe)} while in FCV` +
- ` ${latestFCV}`);
- });
-
- // Create an empty view which we will attempt to update to use new query features while the
- // feature compatibility version is the last-stable version.
- assert.commandWorked(testDB.createView("emptyView", "coll", []));
-
- // Set the feature compatibility version to the last-stable version.
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-
- // Read against an existing view using new query features should not fail.
- pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert.commandWorked(testDB.runCommand({find: "firstView" + i}),
- `Failed to query view with pipeline ${tojson(pipe)}`));
-
- // Trying to create a new view using new query features should fail.
- pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert.commandFailedWithCode(
- testDB.createView("view_fail" + i, "coll", pipe),
- ErrorCodes.QueryFeatureNotAllowed,
- `Expected *not* to be able to create view with pipeline ${tojson(pipe)} while in FCV` +
- ` ${lastStableFCV}`));
-
- // Trying to update existing view to use new query features should also fail.
- pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert.commandFailedWithCode(
- testDB.runCommand({collMod: "emptyView", viewOn: "coll", pipeline: pipe}),
- ErrorCodes.QueryFeatureNotAllowed,
- `Expected *not* to be able to modify view to use pipeline ${tojson(pipe)} while in` +
- `FCV ${lastStableFCV}`));
-
- MongoRunner.stopMongod(conn);
-
- // Starting up the last-stable version of mongod with new query features will succeed.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "last-stable", noCleanData: true});
- assert.neq(null,
- conn,
- `version ${MongoRunner.getBinVersionFor("last-stable")} of mongod was` +
- " unable to start up");
- testDB = conn.getDB(testName);
-
- // Reads will fail against views with new query features when running the last-stable version.
- // Not checking the code returned on failure as it is not uniform across the various
- // 'pipeline' arguments tested.
- pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert.commandFailed(
- testDB.runCommand({find: "firstView" + i}),
- `Expected read against view with pipeline ${tojson(pipe)} to fail on version` +
- ` ${MongoRunner.getBinVersionFor("last-stable")}`));
-
- // Test that a read against a view that does not contain new query features succeeds.
- assert.commandWorked(testDB.runCommand({find: "emptyView"}));
-
- MongoRunner.stopMongod(conn);
-
- // Starting up the latest version of mongod should succeed, even though the feature
- // compatibility version is still set to the last-stable version.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true});
- assert.neq(null, conn, "mongod was unable to start up");
- testDB = conn.getDB(testName);
-
- // Read against an existing view using new query features should not fail.
- pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert.commandWorked(testDB.runCommand({find: "firstView" + i}),
- `Failed to query view with pipeline ${tojson(pipe)}`));
-
- // Set the feature compatibility version back to the latest version.
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-
- pipelinesWithNewFeatures.forEach(function(pipe, i) {
- assert.commandWorked(testDB.runCommand({find: "firstView" + i}),
- `Failed to query view with pipeline ${tojson(pipe)}`);
- // Test that we are able to create a new view with any of the new features.
- assert.commandWorked(
- testDB.createView("secondView" + i, "coll", pipe),
- `Expected to be able to create view with pipeline ${tojson(pipe)} while in FCV` +
- ` ${latestFCV}`);
-
- // Test that we are able to update an existing view to use any of the new features.
- assert(testDB["secondView" + i].drop(),
- `Drop of view with pipeline ${tojson(pipe)} failed`);
- assert.commandWorked(testDB.createView("secondView" + i, "coll", []));
- assert.commandWorked(
- testDB.runCommand({collMod: "secondView" + i, viewOn: "coll", pipeline: pipe}),
- `Expected to be able to modify view to use pipeline ${tojson(pipe)} while in FCV` +
- ` ${latestFCV}`);
- });
-
- // Set the feature compatibility version to the last-stable version and then restart with
- // internalValidateFeaturesAsMaster=false.
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- binVersion: "latest",
- noCleanData: true,
- setParameter: "internalValidateFeaturesAsMaster=false"
- });
- assert.neq(null, conn, "mongod was unable to start up");
- testDB = conn.getDB(testName);
-
- pipelinesWithNewFeatures.forEach(function(pipe, i) {
- // Even though the feature compatibility version is the last-stable version, we should still
- // be able to create a view using new query features, because
- // internalValidateFeaturesAsMaster is false.
- assert.commandWorked(
- testDB.createView("thirdView" + i, "coll", pipe),
- `Expected to be able to create view with pipeline ${tojson(pipe)} while in FCV` +
- ` ${lastStableFCV} with internalValidateFeaturesAsMaster=false`);
-
- // We should also be able to modify a view to use new query features.
- assert(testDB["thirdView" + i].drop(), `Drop of view with pipeline ${tojson(pipe)} failed`);
- assert.commandWorked(testDB.createView("thirdView" + i, "coll", []));
- assert.commandWorked(
- testDB.runCommand({collMod: "thirdView" + i, viewOn: "coll", pipeline: pipe}),
- `Expected to be able to modify view to use pipeline ${tojson(pipe)} while in FCV` +
- ` ${lastStableFCV} with internalValidateFeaturesAsMaster=false`);
- });
-
- MongoRunner.stopMongod(conn);
-
- // Starting up the last-stable version of mongod with new query features should succeed.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "last-stable", noCleanData: true});
- assert.neq(null,
- conn,
- `version ${MongoRunner.getBinVersionFor("last-stable")} of mongod was` +
- " unable to start up");
- testDB = conn.getDB(testName);
-
- // Existing views with new query features can be dropped.
- pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert(testDB["firstView" + i].drop(),
- `Drop of view with pipeline ${tojson(pipe)} failed`));
- assert(testDB.system.views.drop(), "Drop of system.views collection failed");
-
- MongoRunner.stopMongod(conn);
+ assert.commandWorked(
+ testDB.createView("secondView" + i, "coll", pipe),
+ `Expected to be able to create view with pipeline ${tojson(pipe)} while in FCV` +
+ ` ${latestFCV}`);
+
+ // Test that we are able to update an existing view to use any of the new features.
+ assert(testDB["secondView" + i].drop(), `Drop of view with pipeline ${tojson(pipe)} failed`);
+ assert.commandWorked(testDB.createView("secondView" + i, "coll", []));
+ assert.commandWorked(
+ testDB.runCommand({collMod: "secondView" + i, viewOn: "coll", pipeline: pipe}),
+ `Expected to be able to modify view to use pipeline ${tojson(pipe)} while in FCV` +
+ ` ${latestFCV}`);
+});
+
+// Set the feature compatibility version to the last-stable version and then restart with
+// internalValidateFeaturesAsMaster=false.
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ binVersion: "latest",
+ noCleanData: true,
+ setParameter: "internalValidateFeaturesAsMaster=false"
+});
+assert.neq(null, conn, "mongod was unable to start up");
+testDB = conn.getDB(testName);
+
+pipelinesWithNewFeatures.forEach(function(pipe, i) {
+ // Even though the feature compatibility version is the last-stable version, we should still
+ // be able to create a view using new query features, because
+ // internalValidateFeaturesAsMaster is false.
+ assert.commandWorked(
+ testDB.createView("thirdView" + i, "coll", pipe),
+ `Expected to be able to create view with pipeline ${tojson(pipe)} while in FCV` +
+ ` ${lastStableFCV} with internalValidateFeaturesAsMaster=false`);
+
+ // We should also be able to modify a view to use new query features.
+ assert(testDB["thirdView" + i].drop(), `Drop of view with pipeline ${tojson(pipe)} failed`);
+ assert.commandWorked(testDB.createView("thirdView" + i, "coll", []));
+ assert.commandWorked(
+ testDB.runCommand({collMod: "thirdView" + i, viewOn: "coll", pipeline: pipe}),
+ `Expected to be able to modify view to use pipeline ${tojson(pipe)} while in FCV` +
+ ` ${lastStableFCV} with internalValidateFeaturesAsMaster=false`);
+});
+
+MongoRunner.stopMongod(conn);
+
+// Starting up the last-stable version of mongod with new query features should succeed.
+conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "last-stable", noCleanData: true});
+assert.neq(null,
+ conn,
+ `version ${MongoRunner.getBinVersionFor("last-stable")} of mongod was` +
+ " unable to start up");
+testDB = conn.getDB(testName);
+
+// Existing views with new query features can be dropped.
+pipelinesWithNewFeatures.forEach((pipe, i) =>
+ assert(testDB["firstView" + i].drop(),
+ `Drop of view with pipeline ${tojson(pipe)} failed`));
+assert(testDB.system.views.drop(), "Drop of system.views collection failed");
+
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/multiVersion/index_value_empty_string_repl.js b/jstests/multiVersion/index_value_empty_string_repl.js
index be9db1db0af..1a638d92086 100644
--- a/jstests/multiVersion/index_value_empty_string_repl.js
+++ b/jstests/multiVersion/index_value_empty_string_repl.js
@@ -4,32 +4,32 @@
*/
(function() {
- "use strict";
- load('./jstests/multiVersion/libs/multi_rs.js');
+"use strict";
+load('./jstests/multiVersion/libs/multi_rs.js');
- const newVersion = "latest";
- const oldVersion = "last-stable";
+const newVersion = "latest";
+const oldVersion = "last-stable";
- const name = "index_value_empty_string_repl";
- let nodes = {
- n1: {binVersion: oldVersion},
- n2: {binVersion: newVersion, rsConfig: {priority: 0}},
- };
+const name = "index_value_empty_string_repl";
+let nodes = {
+ n1: {binVersion: oldVersion},
+ n2: {binVersion: newVersion, rsConfig: {priority: 0}},
+};
- const rst = new ReplSetTest({name: name, nodes: nodes, waitForKeys: true});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({name: name, nodes: nodes, waitForKeys: true});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB('test');
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
- assert.commandWorked(testDB.testColl.createIndex({x: ""}));
- rst.awaitReplication();
+assert.commandWorked(testDB.testColl.createIndex({x: ""}));
+rst.awaitReplication();
- rst.add({binVersion: newVersion, rsConfig: {priority: 0}});
- rst.reInitiate();
+rst.add({binVersion: newVersion, rsConfig: {priority: 0}});
+rst.reInitiate();
- rst.awaitSecondaryNodes();
- rst.awaitReplication();
- rst.stopSet();
+rst.awaitSecondaryNodes();
+rst.awaitReplication();
+rst.stopSet();
})();
diff --git a/jstests/multiVersion/index_value_empty_string_upgrade.js b/jstests/multiVersion/index_value_empty_string_upgrade.js
index 5996620b31e..aa9ebf3b4d6 100644
--- a/jstests/multiVersion/index_value_empty_string_upgrade.js
+++ b/jstests/multiVersion/index_value_empty_string_upgrade.js
@@ -4,53 +4,55 @@
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/libs/get_index_helpers.js');
+load('jstests/libs/get_index_helpers.js');
- const dbpath = MongoRunner.dataPath + 'empty_string_index_value';
- resetDbpath(dbpath);
+const dbpath = MongoRunner.dataPath + 'empty_string_index_value';
+resetDbpath(dbpath);
- const oldVersion = '4.0';
- const newVersion = 'latest';
+const oldVersion = '4.0';
+const newVersion = 'latest';
- // We set noCleanData to true in order to preserve the data files across mongod restart.
- const mongodOptions = {dbpath: dbpath, noCleanData: true, binVersion: oldVersion};
+// We set noCleanData to true in order to preserve the data files across mongod restart.
+const mongodOptions = {
+ dbpath: dbpath,
+ noCleanData: true,
+ binVersion: oldVersion
+};
- // Start up an old binary version mongod.
- let conn = MongoRunner.runMongod(mongodOptions);
+// Start up an old binary version mongod.
+let conn = MongoRunner.runMongod(mongodOptions);
- assert.neq(null, conn, `mongod was unable able to start with version ${oldVersion}`);
+assert.neq(null, conn, `mongod was unable able to start with version ${oldVersion}`);
- // Set up a collection on a 4.0 binary version node with one document and an index with
- // an empty string as index value, and then shut it down.
- let testDB = conn.getDB('test');
- assert.commandWorked(testDB.createCollection('testColl'));
- assert.commandWorked(testDB.testColl.insert({a: 1}));
- assert.commandWorked(testDB.testColl.createIndex({a: ""}));
- MongoRunner.stopMongod(conn);
+// Set up a collection on a 4.0 binary version node with one document and an index with
+// an empty string as index value, and then shut it down.
+let testDB = conn.getDB('test');
+assert.commandWorked(testDB.createCollection('testColl'));
+assert.commandWorked(testDB.testColl.insert({a: 1}));
+assert.commandWorked(testDB.testColl.createIndex({a: ""}));
+MongoRunner.stopMongod(conn);
- // Restart the mongod with the latest binary version and the 4.0 version data files.
- mongodOptions.binVersion = newVersion;
- conn = MongoRunner.runMongod(mongodOptions);
- assert.neq(null, conn);
+// Restart the mongod with the latest binary version and the 4.0 version data files.
+mongodOptions.binVersion = newVersion;
+conn = MongoRunner.runMongod(mongodOptions);
+assert.neq(null, conn);
- // Confirm that mongod startup does not fail due to the index specification
- // containing an empty string.
- testDB = conn.getDB('test');
- testDB.testColl.find();
- assert.eq(1,
- testDB.testColl.count({}, {hint: {a: ""}}),
- `data from ${oldVersion} should be available; options: ` + tojson(mongodOptions));
+// Confirm that mongod startup does not fail due to the index specification
+// containing an empty string.
+testDB = conn.getDB('test');
+testDB.testColl.find();
+assert.eq(1,
+ testDB.testColl.count({}, {hint: {a: ""}}),
+ `data from ${oldVersion} should be available; options: ` + tojson(mongodOptions));
- assert.neq(null,
- GetIndexHelpers.findByKeyPattern(testDB.testColl.getIndexes(), {a: ""}),
- `index from ${oldVersion} should be available; options: ` + tojson(mongodOptions));
+assert.neq(null,
+ GetIndexHelpers.findByKeyPattern(testDB.testColl.getIndexes(), {a: ""}),
+ `index from ${oldVersion} should be available; options: ` + tojson(mongodOptions));
- // Verify that indexes with empty string values cannot be created
- assert.commandFailedWithCode(testDB.testColl.createIndex({x: ""}),
- ErrorCodes.CannotCreateIndex);
-
- MongoRunner.stopMongod(conn);
+// Verify that indexes with empty string values cannot be created
+assert.commandFailedWithCode(testDB.testColl.createIndex({x: ""}), ErrorCodes.CannotCreateIndex);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/multiVersion/initialize_from_old_node.js b/jstests/multiVersion/initialize_from_old_node.js
index 0335ec9bf56..ea89a9c0adc 100644
--- a/jstests/multiVersion/initialize_from_old_node.js
+++ b/jstests/multiVersion/initialize_from_old_node.js
@@ -4,22 +4,22 @@
*/
(function() {
- "use strict";
- var name = "initialize_from_old";
- var oldVersion = 'last-stable';
- var newVersion = 'latest';
- var nodes = {
- n0: {binVersion: oldVersion},
- n1: {binVersion: newVersion},
- n2: {binVersion: newVersion}
- };
- var rst = new ReplSetTest({nodes: nodes, name: name});
- var conns = rst.startSet();
- var oldNode = conns[0];
- var config = rst.getReplSetConfig();
- var response = oldNode.getDB("admin").runCommand({replSetInitiate: config});
- assert.commandWorked(response);
- // Wait for secondaries to finish their initial sync before shutting down the cluster.
- rst.awaitSecondaryNodes();
- rst.stopSet();
+"use strict";
+var name = "initialize_from_old";
+var oldVersion = 'last-stable';
+var newVersion = 'latest';
+var nodes = {
+ n0: {binVersion: oldVersion},
+ n1: {binVersion: newVersion},
+ n2: {binVersion: newVersion}
+};
+var rst = new ReplSetTest({nodes: nodes, name: name});
+var conns = rst.startSet();
+var oldNode = conns[0];
+var config = rst.getReplSetConfig();
+var response = oldNode.getDB("admin").runCommand({replSetInitiate: config});
+assert.commandWorked(response);
+// Wait for secondaries to finish their initial sync before shutting down the cluster.
+rst.awaitSecondaryNodes();
+rst.stopSet();
})();
diff --git a/jstests/multiVersion/keystring_index.js b/jstests/multiVersion/keystring_index.js
index 42028486f84..15a6c8cc281 100644
--- a/jstests/multiVersion/keystring_index.js
+++ b/jstests/multiVersion/keystring_index.js
@@ -29,228 +29,225 @@
* is considered except for hashed and wildcard, which only consider the v2 non-unique case.
*/
(function() {
- 'use strict';
- load('jstests/hooks/validate_collections.js');
-
- // ----- Config
- // The number of documents created for each collection
- const numDocs = 100;
-
- const indexTypes = [
- {
- // an indicator of what the index is
- indexName: "BTreeIndex",
- // This function is called to create documents, which are then inserted into the
- // collection.
- createDoc: i => ({a: i}),
- // the options given to the .createIndex method
- // i.e. collection.createIndex(creationOptions)
- creationOptions: {a: 1},
- // This optional parameter specifies extra options to give to createIndex.
- // In the code, collection.createIndexes(creationOptions, createIndexOptions)
- // is called.
- createIndexOptions: {}
- },
- {indexName: "2d", createDoc: i => ({loc: [i, i]}), creationOptions: {loc: "2d"}},
- {
- indexName: "hayStack",
- createDoc: i => ({loc: {lng: (i / 2.0) * (i / 2.0), lat: (i / 2.0)}, a: i}),
- creationOptions: {loc: "geoHaystack", a: 1},
- createIndexOptions: {bucketSize: 1}
+'use strict';
+load('jstests/hooks/validate_collections.js');
+
+// ----- Config
+// The number of documents created for each collection
+const numDocs = 100;
+
+const indexTypes = [
+ {
+ // an indicator of what the index is
+ indexName: "BTreeIndex",
+ // This function is called to create documents, which are then inserted into the
+ // collection.
+ createDoc: i => ({a: i}),
+ // the options given to the .createIndex method
+ // i.e. collection.createIndex(creationOptions)
+ creationOptions: {a: 1},
+ // This optional parameter specifies extra options to give to createIndex.
+ // In the code, collection.createIndexes(creationOptions, createIndexOptions)
+ // is called.
+ createIndexOptions: {}
+ },
+ {indexName: "2d", createDoc: i => ({loc: [i, i]}), creationOptions: {loc: "2d"}},
+ {
+ indexName: "hayStack",
+ createDoc: i => ({loc: {lng: (i / 2.0) * (i / 2.0), lat: (i / 2.0)}, a: i}),
+ creationOptions: {loc: "geoHaystack", a: 1},
+ createIndexOptions: {bucketSize: 1}
+ },
+ {
+ indexName: "2dSphere",
+ createDoc: i => {
+ if (i == 0)
+ return {
+ "loc": {
+ "type": "Polygon",
+ "coordinates": [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]
+ }
+ };
+ else
+ return ({loc: {type: "Point", coordinates: [(i / 10.0) * (i / 10.0), (i / 10.0)]}});
},
- {
- indexName: "2dSphere",
- createDoc: i => {
- if (i == 0)
- return {
- "loc": {
- "type": "Polygon",
- "coordinates": [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]
- }
- };
- else
- return (
- {loc: {type: "Point", coordinates: [(i / 10.0) * (i / 10.0), (i / 10.0)]}});
- },
- creationOptions: {loc: "2dsphere"}
+ creationOptions: {loc: "2dsphere"}
+ },
+ {indexName: "text", createDoc: i => ({a: "a".repeat(i + 1)}), creationOptions: {a: "text"}},
+ {indexName: "hashed", createDoc: i => ({a: i}), creationOptions: {a: "hashed"}},
+ {
+ indexName: "wildCard",
+ createDoc: i => {
+ if (i == 0)
+ return {};
+ else if (i == 1)
+ return {a: null};
+ else if (i == 2)
+ return {a: {}};
+ else if (i % 2 == 0)
+ return {a: {b: i}};
+ else
+ return {a: [i]};
},
- {indexName: "text", createDoc: i => ({a: "a".repeat(i + 1)}), creationOptions: {a: "text"}},
- {indexName: "hashed", createDoc: i => ({a: i}), creationOptions: {a: "hashed"}},
- {
- indexName: "wildCard",
- createDoc: i => {
- if (i == 0)
- return {};
- else if (i == 1)
- return {a: null};
- else if (i == 2)
- return {a: {}};
- else if (i % 2 == 0)
- return {a: {b: i}};
- else
- return {a: [i]};
- },
- creationOptions: {"$**": 1}
- }
- ];
- // -----
-
- const dbpath = MongoRunner.dataPath + 'keystring_index';
- resetDbpath(dbpath);
-
- const defaultOptions = {dbpath};
-
- const version42 = {binVersion: '4.2', testCollection: 'testdb'};
- let mongodOptions42 = Object.extend({binVersion: version42.binVersion}, defaultOptions);
- let mongodOptionsCurrent = Object.extend({binVersion: 'latest'}, defaultOptions);
-
- // We will first start up an old binary version database, populate the database,
- // then upgrade and validate.
-
- // Start up an old binary version mongod.
- let conn = MongoRunner.runMongod(mongodOptions42);
-
- assert.neq(
- null, conn, 'mongod was unable able to start with version ' + tojson(mongodOptions42));
-
- let testDb = conn.getDB('test');
- assert.neq(null, testDb, 'testDb not found. conn.getDB(\'test\') returned null');
-
- populateDb(testDb);
- MongoRunner.stopMongod(conn);
-
- // Restart the mongod with the latest binary version on the old version's data files.
- conn = MongoRunner.runMongod(mongodOptionsCurrent);
- assert.neq(null, conn, 'mongod was unable to start with the latest version');
- testDb = conn.getDB('test');
-
- // Validate all the indexes.
- validateCollections(testDb, {full: true});
-
- // Next, we will repopulate the database with the latest version then downgrade and run
- // validate.
- dropAllUserCollections(testDb);
- populateDb(testDb);
- MongoRunner.stopMongod(conn);
-
- conn = MongoRunner.runMongod(mongodOptions42);
- assert.neq(
- null, conn, 'mongod was unable able to start with version ' + tojson(mongodOptions42));
-
- testDb = conn.getDB('test');
- assert.neq(null, testDb, 'testDb not found. conn.getDB(\'test\') returned null');
-
- validateCollections(testDb, {full: true});
- MongoRunner.stopMongod(conn);
-
- // ----------------- Utilities
-
- // Populate the database using the config specified by the indexTypes array.
- function populateDb(testDb) {
- // Create a new collection and index for each indexType in the array.
- for (let i = 0; i < indexTypes.length; i++) {
- const indexOptions = indexTypes[i];
- // Try unique and non-unique.
- for (const unique in [true, false]) {
- // Try index-version 1 and 2.
- for (let indexVersion = 1; indexVersion <= 2; indexVersion++) {
- let indexName = indexOptions.indexName;
-
- // We only run V2 non-unique for hashed and wildCard because they don't exist in
- // v1.
- if ((indexName == "hashed" || indexName == "wildCard") &&
- (unique == true || indexVersion == 1))
- continue;
-
- indexName += unique == true ? "Unique" : "NotUnique";
- indexName += `Version${indexVersion}`;
- let collectionName = version42.testCollection + indexName;
- print(`${indexName}: Creating Collection`);
- assert.commandWorked(testDb.createCollection(collectionName));
-
- print(`${indexName}: Inserting Documents`);
- if (unique)
- insertDocumentsUnique(
- testDb[collectionName], numDocs, indexOptions.createDoc);
- else
- insertDocumentsNotUnique(
- testDb[collectionName], numDocs, indexOptions.createDoc);
-
- let extraCreateIndexOptions = {
- name: indexName,
- v: indexVersion,
- unique: unique == true
- };
-
- if ("createIndexOptions" in indexOptions)
- extraCreateIndexOptions =
- Object.extend(extraCreateIndexOptions, indexOptions.createIndexOptions);
- print(JSON.stringify(extraCreateIndexOptions));
- print(`${indexName}: Creating Index`);
- assert.commandWorked(testDb[collectionName].createIndex(
- indexOptions.creationOptions, extraCreateIndexOptions));
-
- // Assert that the correct index type was created.
- let indexSpec = getIndexSpecByName(testDb[collectionName], indexName);
- assert.eq(indexVersion, indexSpec.v, tojson(indexSpec));
- }
- }
- }
+ creationOptions: {"$**": 1}
}
-
- // Drop all user created collections in a database.
- function dropAllUserCollections(testDb) {
- testDb.getCollectionNames().forEach((collName) => {
- if (!collName.startsWith("system.")) {
- testDb[collName].drop();
+];
+// -----
+
+const dbpath = MongoRunner.dataPath + 'keystring_index';
+resetDbpath(dbpath);
+
+const defaultOptions = {dbpath};
+
+const version42 = {
+ binVersion: '4.2',
+ testCollection: 'testdb'
+};
+let mongodOptions42 = Object.extend({binVersion: version42.binVersion}, defaultOptions);
+let mongodOptionsCurrent = Object.extend({binVersion: 'latest'}, defaultOptions);
+
+// We will first start up an old binary version database, populate the database,
+// then upgrade and validate.
+
+// Start up an old binary version mongod.
+let conn = MongoRunner.runMongod(mongodOptions42);
+
+assert.neq(null, conn, 'mongod was unable able to start with version ' + tojson(mongodOptions42));
+
+let testDb = conn.getDB('test');
+assert.neq(null, testDb, 'testDb not found. conn.getDB(\'test\') returned null');
+
+populateDb(testDb);
+MongoRunner.stopMongod(conn);
+
+// Restart the mongod with the latest binary version on the old version's data files.
+conn = MongoRunner.runMongod(mongodOptionsCurrent);
+assert.neq(null, conn, 'mongod was unable to start with the latest version');
+testDb = conn.getDB('test');
+
+// Validate all the indexes.
+validateCollections(testDb, {full: true});
+
+// Next, we will repopulate the database with the latest version then downgrade and run
+// validate.
+dropAllUserCollections(testDb);
+populateDb(testDb);
+MongoRunner.stopMongod(conn);
+
+conn = MongoRunner.runMongod(mongodOptions42);
+assert.neq(null, conn, 'mongod was unable able to start with version ' + tojson(mongodOptions42));
+
+testDb = conn.getDB('test');
+assert.neq(null, testDb, 'testDb not found. conn.getDB(\'test\') returned null');
+
+validateCollections(testDb, {full: true});
+MongoRunner.stopMongod(conn);
+
+// ----------------- Utilities
+
+// Populate the database using the config specified by the indexTypes array.
+function populateDb(testDb) {
+ // Create a new collection and index for each indexType in the array.
+ for (let i = 0; i < indexTypes.length; i++) {
+ const indexOptions = indexTypes[i];
+ // Try unique and non-unique.
+ for (const unique in [true, false]) {
+ // Try index-version 1 and 2.
+ for (let indexVersion = 1; indexVersion <= 2; indexVersion++) {
+ let indexName = indexOptions.indexName;
+
+ // We only run V2 non-unique for hashed and wildCard because they don't exist in
+ // v1.
+ if ((indexName == "hashed" || indexName == "wildCard") &&
+ (unique == true || indexVersion == 1))
+ continue;
+
+ indexName += unique == true ? "Unique" : "NotUnique";
+ indexName += `Version${indexVersion}`;
+ let collectionName = version42.testCollection + indexName;
+ print(`${indexName}: Creating Collection`);
+ assert.commandWorked(testDb.createCollection(collectionName));
+
+ print(`${indexName}: Inserting Documents`);
+ if (unique)
+ insertDocumentsUnique(testDb[collectionName], numDocs, indexOptions.createDoc);
+ else
+ insertDocumentsNotUnique(
+ testDb[collectionName], numDocs, indexOptions.createDoc);
+
+ let extraCreateIndexOptions = {
+ name: indexName,
+ v: indexVersion,
+ unique: unique == true
+ };
+
+ if ("createIndexOptions" in indexOptions)
+ extraCreateIndexOptions =
+ Object.extend(extraCreateIndexOptions, indexOptions.createIndexOptions);
+ print(JSON.stringify(extraCreateIndexOptions));
+ print(`${indexName}: Creating Index`);
+ assert.commandWorked(testDb[collectionName].createIndex(
+ indexOptions.creationOptions, extraCreateIndexOptions));
+
+ // Assert that the correct index type was created.
+ let indexSpec = getIndexSpecByName(testDb[collectionName], indexName);
+ assert.eq(indexVersion, indexSpec.v, tojson(indexSpec));
}
- });
+ }
}
+}
- function getIndexSpecByName(coll, indexName) {
- const indexes = coll.getIndexes();
- const indexesFilteredByName = indexes.filter(spec => spec.name === indexName);
- assert.eq(1,
- indexesFilteredByName.length,
- "index '" + indexName + "' not found: " + tojson(indexes));
- return indexesFilteredByName[0];
- }
+// Drop all user created collections in a database.
+function dropAllUserCollections(testDb) {
+ testDb.getCollectionNames().forEach((collName) => {
+ if (!collName.startsWith("system.")) {
+ testDb[collName].drop();
+ }
+ });
+}
- function fibonacci(num, memo) {
- memo = memo || {};
+function getIndexSpecByName(coll, indexName) {
+ const indexes = coll.getIndexes();
+ const indexesFilteredByName = indexes.filter(spec => spec.name === indexName);
+ assert.eq(
+ 1, indexesFilteredByName.length, "index '" + indexName + "' not found: " + tojson(indexes));
+ return indexesFilteredByName[0];
+}
- if (memo[num])
- return memo[num];
- if (num <= 1)
- return 1;
+function fibonacci(num, memo) {
+ memo = memo || {};
- memo[num] = fibonacci(num - 1, memo) + fibonacci(num - 2, memo);
+ if (memo[num])
return memo[num];
- }
-
- // Insert numDocs documents into the collection by calling getDoc.
- // NOTE: Documents returned from getDoc are inserted more than once.
- function insertDocumentsNotUnique(collection, numDocs, getDoc) {
- let fibNum = 0;
- // fibonacci numbers are used because the fibonnaci sequence is a
- // exponentially growing sequence that allows us to create documents
- // that are duplicated X number of times, for many small values of X and
- // a few large values of X.
- for (let i = 0; i < numDocs; i += fibonacci(fibNum++)) {
- let doc = getDoc(i);
- for (let j = 0; j < fibonacci(fibNum); j++) {
- assert.commandWorked(collection.insert(doc));
- }
- }
- }
-
- // Inserts numDocs into the collection by calling getDoc.
- // NOTE: getDoc is called exactly numDocs times.
- function insertDocumentsUnique(collection, numDocs, getDoc) {
- for (let i = 0; i < numDocs; i++) {
- let doc = getDoc(i);
+ if (num <= 1)
+ return 1;
+
+ memo[num] = fibonacci(num - 1, memo) + fibonacci(num - 2, memo);
+ return memo[num];
+}
+
+// Insert numDocs documents into the collection by calling getDoc.
+// NOTE: Documents returned from getDoc are inserted more than once.
+function insertDocumentsNotUnique(collection, numDocs, getDoc) {
+ let fibNum = 0;
+ // fibonacci numbers are used because the fibonnaci sequence is a
+ // exponentially growing sequence that allows us to create documents
+ // that are duplicated X number of times, for many small values of X and
+ // a few large values of X.
+ for (let i = 0; i < numDocs; i += fibonacci(fibNum++)) {
+ let doc = getDoc(i);
+ for (let j = 0; j < fibonacci(fibNum); j++) {
assert.commandWorked(collection.insert(doc));
}
}
-
+}
+
+// Inserts numDocs into the collection by calling getDoc.
+// NOTE: getDoc is called exactly numDocs times.
+function insertDocumentsUnique(collection, numDocs, getDoc) {
+ for (let i = 0; i < numDocs; i++) {
+ let doc = getDoc(i);
+ assert.commandWorked(collection.insert(doc));
+ }
+}
})();
diff --git a/jstests/multiVersion/libs/data_generators.js b/jstests/multiVersion/libs/data_generators.js
index edf358b8319..5f3ccf20e10 100644
--- a/jstests/multiVersion/libs/data_generators.js
+++ b/jstests/multiVersion/libs/data_generators.js
@@ -612,7 +612,7 @@ function CollectionMetadataGenerator(options) {
for (var option in options) {
if (options.hasOwnProperty(option)) {
if (option === 'capped') {
- if (typeof(options['capped']) !== 'boolean') {
+ if (typeof (options['capped']) !== 'boolean') {
throw Error(
"\"capped\" options must be boolean in CollectionMetadataGenerator");
}
diff --git a/jstests/multiVersion/libs/dumprestore_helpers.js b/jstests/multiVersion/libs/dumprestore_helpers.js
index c62c817332b..5ee3bac4306 100644
--- a/jstests/multiVersion/libs/dumprestore_helpers.js
+++ b/jstests/multiVersion/libs/dumprestore_helpers.js
@@ -55,10 +55,9 @@ function multiVersionDumpRestoreTest(configObj) {
var shardingTestConfig = {
name: testBaseName + "_sharded_source",
mongos: [{binVersion: configObj.serverSourceVersion}],
- shards: [{
- binVersion: configObj.serverSourceVersion,
- storageEngine: configObj.storageEngine
- }],
+ shards: [
+ {binVersion: configObj.serverSourceVersion, storageEngine: configObj.storageEngine}
+ ],
config: [{binVersion: configObj.serverSourceVersion}]
};
var shardingTest = new ShardingTest(shardingTestConfig);
diff --git a/jstests/multiVersion/libs/global_snapshot_reads_helpers.js b/jstests/multiVersion/libs/global_snapshot_reads_helpers.js
index be7730fdc99..407da4bbc7a 100644
--- a/jstests/multiVersion/libs/global_snapshot_reads_helpers.js
+++ b/jstests/multiVersion/libs/global_snapshot_reads_helpers.js
@@ -41,11 +41,11 @@ function runCommandAndVerifyResponse(sessionDb, txnNumber, cmdObj, expectSuccess
return true;
});
} else {
- assert.commandFailedWithCode(sessionDb.runCommand(cmdObj),
- expectedCode,
- "command did not fail with expected error code, cmd: " +
- tojson(cmdObj) + ", expectedCode: " +
- tojson(expectedCode));
+ assert.commandFailedWithCode(
+ sessionDb.runCommand(cmdObj),
+ expectedCode,
+ "command did not fail with expected error code, cmd: " + tojson(cmdObj) +
+ ", expectedCode: " + tojson(expectedCode));
}
return txnNumber;
}
@@ -73,10 +73,10 @@ function verifyGlobalSnapshotReads(conn, expectSuccess, expectedCode) {
txnNumber = runCommandAndVerifyResponse(shardedDb,
txnNumber,
{
- find: "sharded",
- filter: {x: 1},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber)
+ find: "sharded",
+ filter: {x: 1},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber)
},
expectSuccess,
expectedCode);
diff --git a/jstests/multiVersion/libs/initial_sync.js b/jstests/multiVersion/libs/initial_sync.js
index 4999a6a2405..329602f0c4b 100644
--- a/jstests/multiVersion/libs/initial_sync.js
+++ b/jstests/multiVersion/libs/initial_sync.js
@@ -12,7 +12,6 @@ load("./jstests/replsets/rslib.js");
*/
var multversionInitialSyncTest = function(
name, replSetVersion, newNodeVersion, configSettings, fcv) {
-
var nodes = {n1: {binVersion: replSetVersion}, n2: {binVersion: replSetVersion}};
jsTestLog("Starting up a two-node '" + replSetVersion + "' version replica set.");
diff --git a/jstests/multiVersion/libs/multi_cluster.js b/jstests/multiVersion/libs/multi_cluster.js
index 2937d0d4d6d..e611e541c3f 100644
--- a/jstests/multiVersion/libs/multi_cluster.js
+++ b/jstests/multiVersion/libs/multi_cluster.js
@@ -89,7 +89,6 @@ ShardingTest.prototype.upgradeCluster = function(binVersion, options) {
};
ShardingTest.prototype.restartMongoses = function() {
-
var numMongoses = this._mongos.length;
for (var i = 0; i < numMongoses; i++) {
diff --git a/jstests/multiVersion/libs/multi_rs.js b/jstests/multiVersion/libs/multi_rs.js
index 60bc253d877..ce67a5ed157 100644
--- a/jstests/multiVersion/libs/multi_rs.js
+++ b/jstests/multiVersion/libs/multi_rs.js
@@ -15,7 +15,6 @@ ReplSetTest.prototype.upgradeSet = function(options, user, pwd) {
// Then upgrade the primary after stepping down.
this.upgradePrimary(primary, options, user, pwd);
-
};
ReplSetTest.prototype.upgradeSecondaries = function(primary, options, user, pwd) {
@@ -125,7 +124,7 @@ ReplSetTest.prototype.reconnect = function(node) {
this.nodes[nodeId] = new Mongo(node.host);
var except = {};
for (var i in node) {
- if (typeof(node[i]) == "function")
+ if (typeof (node[i]) == "function")
continue;
this.nodes[nodeId][i] = node[i];
}
diff --git a/jstests/multiVersion/libs/sharded_txn_upgrade_downgrade_cluster_shared.js b/jstests/multiVersion/libs/sharded_txn_upgrade_downgrade_cluster_shared.js
index 0f77983adbf..2c74d3c632e 100644
--- a/jstests/multiVersion/libs/sharded_txn_upgrade_downgrade_cluster_shared.js
+++ b/jstests/multiVersion/libs/sharded_txn_upgrade_downgrade_cluster_shared.js
@@ -46,7 +46,9 @@ function runTxn(testDB, collName, {lsid, txnNumber}, {multiShard}) {
insert: collName,
documents: docs,
txnNumber: NumberLong(txnNumber),
- startTransaction: true, lsid, autocommit,
+ startTransaction: true,
+ lsid,
+ autocommit,
});
if (!startTransactionRes.ok) {
return startTransactionRes;
@@ -55,7 +57,9 @@ function runTxn(testDB, collName, {lsid, txnNumber}, {multiShard}) {
const secondStatementRes = testDB.runCommand({
insert: collName,
documents: docs,
- txnNumber: NumberLong(txnNumber), lsid, autocommit,
+ txnNumber: NumberLong(txnNumber),
+ lsid,
+ autocommit,
});
if (!secondStatementRes.ok) {
return secondStatementRes;
@@ -82,7 +86,8 @@ function assertMultiShardRetryableWriteWorked(testDB, collName, {lsid, txnNumber
assert.commandWorked(testDB.runCommand({
insert: collName,
documents: [{skey: -1, fromRetryableWrite: true}, {skey: 1, fromRetryableWrite: true}],
- txnNumber: NumberLong(txnNumber), lsid
+ txnNumber: NumberLong(txnNumber),
+ lsid
}));
}
@@ -93,7 +98,8 @@ function assertMultiShardRetryableWriteCanBeRetried(testDB, collName, {lsid, txn
assert.commandWorked(testDB.runCommand({
insert: collName,
documents: [{skey: -1, fromRetryableWrite: true}, {skey: 1, fromRetryableWrite: true}],
- txnNumber: NumberLong(txnNumber), lsid
+ txnNumber: NumberLong(txnNumber),
+ lsid
}));
assert.eq(numMultiShardRetryableWrites * 2, // Each write inserts 2 documents.
testDB[collName].find({fromRetryableWrite: true}).itcount());
diff --git a/jstests/multiVersion/libs/verify_collection_data.js b/jstests/multiVersion/libs/verify_collection_data.js
index b8eeca557f7..10b26534ccc 100644
--- a/jstests/multiVersion/libs/verify_collection_data.js
+++ b/jstests/multiVersion/libs/verify_collection_data.js
@@ -22,7 +22,6 @@ load('./jstests/multiVersion/libs/data_generators.js');
// Function to actually add the data generated by the given dataGenerator to a collection
createCollectionWithData = function(db, collectionName, dataGenerator) {
-
// Drop collection if exists
// TODO: add ability to control this
db.getCollection(collectionName).drop();
@@ -104,7 +103,6 @@ function CollectionDataValidator() {
this.validateCollectionData = function(
collection, dbVersionForCollection, options = {indexSpecFieldsToSkip: []}) {
-
if (!_initialized) {
throw Error("validateCollectionWithAllData called, but data is not initialized");
}
diff --git a/jstests/multiVersion/libs/verify_versions.js b/jstests/multiVersion/libs/verify_versions.js
index f20da90de80..fcc9345e276 100644
--- a/jstests/multiVersion/libs/verify_versions.js
+++ b/jstests/multiVersion/libs/verify_versions.js
@@ -4,39 +4,38 @@
var Mongo, assert;
(function() {
- "use strict";
- Mongo.prototype.getBinVersion = function() {
- var result = this.getDB("admin").runCommand({serverStatus: 1});
- return result.version;
- };
+"use strict";
+Mongo.prototype.getBinVersion = function() {
+ var result = this.getDB("admin").runCommand({serverStatus: 1});
+ return result.version;
+};
- // Checks that our mongodb process is of a certain version
- assert.binVersion = function(mongo, version) {
- var currVersion = mongo.getBinVersion();
- assert(MongoRunner.areBinVersionsTheSame(MongoRunner.getBinVersionFor(currVersion),
- MongoRunner.getBinVersionFor(version)),
- "version " + version + " (" + MongoRunner.getBinVersionFor(version) + ")" +
- " is not the same as " + MongoRunner.getBinVersionFor(currVersion));
- };
+// Checks that our mongodb process is of a certain version
+assert.binVersion = function(mongo, version) {
+ var currVersion = mongo.getBinVersion();
+ assert(MongoRunner.areBinVersionsTheSame(MongoRunner.getBinVersionFor(currVersion),
+ MongoRunner.getBinVersionFor(version)),
+ "version " + version + " (" + MongoRunner.getBinVersionFor(version) + ")" +
+ " is not the same as " + MongoRunner.getBinVersionFor(currVersion));
+};
- // Compares an array of desired versions and an array of found versions,
- // looking for versions not found
- assert.allBinVersions = function(versionsWanted, versionsFound) {
-
- for (var i = 0; i < versionsWanted.length; i++) {
- var version = versionsWanted[i];
- var found = false;
- for (var j = 0; j < versionsFound.length; j++) {
- if (MongoRunner.areBinVersionsTheSame(version, versionsFound[j])) {
- found = true;
- break;
- }
+// Compares an array of desired versions and an array of found versions,
+// looking for versions not found
+assert.allBinVersions = function(versionsWanted, versionsFound) {
+ for (var i = 0; i < versionsWanted.length; i++) {
+ var version = versionsWanted[i];
+ var found = false;
+ for (var j = 0; j < versionsFound.length; j++) {
+ if (MongoRunner.areBinVersionsTheSame(version, versionsFound[j])) {
+ found = true;
+ break;
}
-
- assert(found,
- "could not find version " + version + " (" +
- MongoRunner.getBinVersionFor(version) + ")" + " in " + versionsFound);
}
- };
+ assert(found,
+ "could not find version " + version + " (" + MongoRunner.getBinVersionFor(version) +
+ ")" +
+ " in " + versionsFound);
+ }
+};
}());
diff --git a/jstests/multiVersion/migration_between_mixed_version_mongods.js b/jstests/multiVersion/migration_between_mixed_version_mongods.js
index a2539d87ddd..bd6b41b0e16 100644
--- a/jstests/multiVersion/migration_between_mixed_version_mongods.js
+++ b/jstests/multiVersion/migration_between_mixed_version_mongods.js
@@ -10,101 +10,99 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
load("./jstests/multiVersion/libs/verify_versions.js");
(function() {
- "use strict";
-
- var options = {
- shards: [
- {binVersion: "last-stable"},
- {binVersion: "last-stable"},
- {binVersion: "latest"},
- {binVersion: "latest"}
- ],
- mongos: 1,
- other: {mongosOptions: {binVersion: "last-stable"}, shardAsReplicaSet: false}
- };
-
- var st = new ShardingTest(options);
- st.stopBalancer();
-
- assert.binVersion(st.shard0, "last-stable");
- assert.binVersion(st.shard1, "last-stable");
- assert.binVersion(st.shard2, "latest");
- assert.binVersion(st.shard3, "latest");
- assert.binVersion(st.s0, "last-stable");
-
- var mongos = st.s0, admin = mongos.getDB('admin'),
- shards = mongos.getCollection('config.shards').find().toArray(),
-
- fooDB = "fooTest", fooNS = fooDB + ".foo", fooColl = mongos.getCollection(fooNS),
- fooDonor = st.shard0, fooRecipient = st.shard2,
- fooDonorColl = fooDonor.getCollection(fooNS),
- fooRecipientColl = fooRecipient.getCollection(fooNS),
-
- barDB = "barTest", barNS = barDB + ".foo", barColl = mongos.getCollection(barNS),
- barDonor = st.shard3, barRecipient = st.shard1,
- barDonorColl = barDonor.getCollection(barNS),
- barRecipientColl = barRecipient.getCollection(barNS);
-
- assert.commandWorked(admin.runCommand({enableSharding: fooDB}));
- assert.commandWorked(admin.runCommand({enableSharding: barDB}));
- st.ensurePrimaryShard(fooDB, shards[0]._id);
- st.ensurePrimaryShard(barDB, shards[3]._id);
-
- assert.commandWorked(admin.runCommand({shardCollection: fooNS, key: {a: 1}}));
- assert.commandWorked(admin.runCommand({split: fooNS, middle: {a: 10}}));
- assert.commandWorked(admin.runCommand({shardCollection: barNS, key: {a: 1}}));
- assert.commandWorked(admin.runCommand({split: barNS, middle: {a: 10}}));
-
- fooColl.insert({a: 0});
- assert.eq(null, fooColl.getDB().getLastError());
- fooColl.insert({a: 10});
- assert.eq(null, fooColl.getDB().getLastError());
- assert.eq(0, fooRecipientColl.count());
- assert.eq(2, fooDonorColl.count());
- assert.eq(2, fooColl.count());
-
- barColl.insert({a: 0});
- assert.eq(null, barColl.getDB().getLastError());
- barColl.insert({a: 10});
- assert.eq(null, barColl.getDB().getLastError());
- assert.eq(0, barRecipientColl.count());
- assert.eq(2, barDonorColl.count());
- assert.eq(2, barColl.count());
-
- /**
- * Perform two migrations:
- * shard0 (last-stable) -> foo chunk -> shard2 (latest)
- * shard3 (latest) -> bar chunk -> shard1 (last-stable)
- */
-
- assert.commandWorked(admin.runCommand(
- {moveChunk: fooNS, find: {a: 10}, to: shards[2]._id, _waitForDelete: true}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: barNS, find: {a: 10}, to: shards[1]._id, _waitForDelete: true}));
- assert.eq(1,
- fooRecipientColl.count(),
- "Foo collection migration failed. " +
- "Last-stable -> latest mongod version migration failure.");
- assert.eq(1,
- fooDonorColl.count(),
- "Foo donor lost its document. " +
- "Last-stable -> latest mongod version migration failure.");
- assert.eq(2,
- fooColl.count(),
- "Incorrect number of documents in foo collection. " +
- "Last-stable -> latest mongod version migration failure.");
- assert.eq(1,
- barRecipientColl.count(),
- "Bar collection migration failed. " +
- "Latest -> last-stable mongod version migration failure.");
- assert.eq(1,
- barDonorColl.count(),
- "Bar donor lost its document. " +
- "Latest -> last-stable mongod version migration failure.");
- assert.eq(2,
- barColl.count(),
- "Incorrect number of documents in bar collection. " +
- "Latest -> last-stable mongod version migration failure.");
-
- st.stop();
+"use strict";
+
+var options = {
+ shards: [
+ {binVersion: "last-stable"},
+ {binVersion: "last-stable"},
+ {binVersion: "latest"},
+ {binVersion: "latest"}
+ ],
+ mongos: 1,
+ other: {mongosOptions: {binVersion: "last-stable"}, shardAsReplicaSet: false}
+};
+
+var st = new ShardingTest(options);
+st.stopBalancer();
+
+assert.binVersion(st.shard0, "last-stable");
+assert.binVersion(st.shard1, "last-stable");
+assert.binVersion(st.shard2, "latest");
+assert.binVersion(st.shard3, "latest");
+assert.binVersion(st.s0, "last-stable");
+
+var mongos = st.s0, admin = mongos.getDB('admin'),
+ shards = mongos.getCollection('config.shards').find().toArray(),
+
+ fooDB = "fooTest", fooNS = fooDB + ".foo", fooColl = mongos.getCollection(fooNS),
+ fooDonor = st.shard0, fooRecipient = st.shard2, fooDonorColl = fooDonor.getCollection(fooNS),
+ fooRecipientColl = fooRecipient.getCollection(fooNS),
+
+ barDB = "barTest", barNS = barDB + ".foo", barColl = mongos.getCollection(barNS),
+ barDonor = st.shard3, barRecipient = st.shard1, barDonorColl = barDonor.getCollection(barNS),
+ barRecipientColl = barRecipient.getCollection(barNS);
+
+assert.commandWorked(admin.runCommand({enableSharding: fooDB}));
+assert.commandWorked(admin.runCommand({enableSharding: barDB}));
+st.ensurePrimaryShard(fooDB, shards[0]._id);
+st.ensurePrimaryShard(barDB, shards[3]._id);
+
+assert.commandWorked(admin.runCommand({shardCollection: fooNS, key: {a: 1}}));
+assert.commandWorked(admin.runCommand({split: fooNS, middle: {a: 10}}));
+assert.commandWorked(admin.runCommand({shardCollection: barNS, key: {a: 1}}));
+assert.commandWorked(admin.runCommand({split: barNS, middle: {a: 10}}));
+
+fooColl.insert({a: 0});
+assert.eq(null, fooColl.getDB().getLastError());
+fooColl.insert({a: 10});
+assert.eq(null, fooColl.getDB().getLastError());
+assert.eq(0, fooRecipientColl.count());
+assert.eq(2, fooDonorColl.count());
+assert.eq(2, fooColl.count());
+
+barColl.insert({a: 0});
+assert.eq(null, barColl.getDB().getLastError());
+barColl.insert({a: 10});
+assert.eq(null, barColl.getDB().getLastError());
+assert.eq(0, barRecipientColl.count());
+assert.eq(2, barDonorColl.count());
+assert.eq(2, barColl.count());
+
+/**
+ * Perform two migrations:
+ * shard0 (last-stable) -> foo chunk -> shard2 (latest)
+ * shard3 (latest) -> bar chunk -> shard1 (last-stable)
+ */
+
+assert.commandWorked(
+ admin.runCommand({moveChunk: fooNS, find: {a: 10}, to: shards[2]._id, _waitForDelete: true}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: barNS, find: {a: 10}, to: shards[1]._id, _waitForDelete: true}));
+assert.eq(1,
+ fooRecipientColl.count(),
+ "Foo collection migration failed. " +
+ "Last-stable -> latest mongod version migration failure.");
+assert.eq(1,
+ fooDonorColl.count(),
+ "Foo donor lost its document. " +
+ "Last-stable -> latest mongod version migration failure.");
+assert.eq(2,
+ fooColl.count(),
+ "Incorrect number of documents in foo collection. " +
+ "Last-stable -> latest mongod version migration failure.");
+assert.eq(1,
+ barRecipientColl.count(),
+ "Bar collection migration failed. " +
+ "Latest -> last-stable mongod version migration failure.");
+assert.eq(1,
+ barDonorColl.count(),
+ "Bar donor lost its document. " +
+ "Latest -> last-stable mongod version migration failure.");
+assert.eq(2,
+ barColl.count(),
+ "Incorrect number of documents in bar collection. " +
+ "Latest -> last-stable mongod version migration failure.");
+
+st.stop();
})();
diff --git a/jstests/multiVersion/minor_version_tags_new_old_new.js b/jstests/multiVersion/minor_version_tags_new_old_new.js
index 29daf24e8c8..eaae74c8810 100644
--- a/jstests/multiVersion/minor_version_tags_new_old_new.js
+++ b/jstests/multiVersion/minor_version_tags_new_old_new.js
@@ -1,16 +1,16 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/libs/tags.js");
+load("jstests/replsets/libs/tags.js");
- var oldVersion = "last-stable";
- var newVersion = "latest";
- let nodes = [
- {binVersion: newVersion},
- {binVersion: oldVersion},
- {binVersion: newVersion},
- {binVersion: oldVersion},
- {binVersion: newVersion}
- ];
- new TagsTest({nodes: nodes, forceWriteMode: 'commands'}).run();
+var oldVersion = "last-stable";
+var newVersion = "latest";
+let nodes = [
+ {binVersion: newVersion},
+ {binVersion: oldVersion},
+ {binVersion: newVersion},
+ {binVersion: oldVersion},
+ {binVersion: newVersion}
+];
+new TagsTest({nodes: nodes, forceWriteMode: 'commands'}).run();
}());
diff --git a/jstests/multiVersion/minor_version_tags_old_new_old.js b/jstests/multiVersion/minor_version_tags_old_new_old.js
index ffbf838e2d7..22ce71964f9 100644
--- a/jstests/multiVersion/minor_version_tags_old_new_old.js
+++ b/jstests/multiVersion/minor_version_tags_old_new_old.js
@@ -1,16 +1,16 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/libs/tags.js");
+load("jstests/replsets/libs/tags.js");
- var oldVersion = "last-stable";
- var newVersion = "latest";
- let nodes = [
- {binVersion: oldVersion},
- {binVersion: newVersion},
- {binVersion: oldVersion},
- {binVersion: newVersion},
- {binVersion: oldVersion}
- ];
- new TagsTest({nodes: nodes, forceWriteMode: 'commands'}).run();
+var oldVersion = "last-stable";
+var newVersion = "latest";
+let nodes = [
+ {binVersion: oldVersion},
+ {binVersion: newVersion},
+ {binVersion: oldVersion},
+ {binVersion: newVersion},
+ {binVersion: oldVersion}
+];
+new TagsTest({nodes: nodes, forceWriteMode: 'commands'}).run();
}());
diff --git a/jstests/multiVersion/new_mongos_old_mongod_wire_version_clash.js b/jstests/multiVersion/new_mongos_old_mongod_wire_version_clash.js
index 9bd702323f4..ee9b5e65032 100644
--- a/jstests/multiVersion/new_mongos_old_mongod_wire_version_clash.js
+++ b/jstests/multiVersion/new_mongos_old_mongod_wire_version_clash.js
@@ -15,51 +15,50 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- /* Start a ShardingTest with a 'last-stable' mongos so that a 'last-stable'
- * shard can be added. (A 'last-stable' shard cannot be added from a
- * current mongos because the wire protocol must be presumed different.)
- */
- var st = new ShardingTest({
- shards: 1,
- other: {
- mongosOptions: {binVersion: 'last-stable'},
- shardOptions: {binVersion: 'last-stable'},
- shardAsReplicaSet: false
- }
- });
-
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
- assert.commandWorked(st.s.adminCommand({shardCollection: 'test.foo', key: {x: 1}}));
+/* Start a ShardingTest with a 'last-stable' mongos so that a 'last-stable'
+ * shard can be added. (A 'last-stable' shard cannot be added from a
+ * current mongos because the wire protocol must be presumed different.)
+ */
+var st = new ShardingTest({
+ shards: 1,
+ other: {
+ mongosOptions: {binVersion: 'last-stable'},
+ shardOptions: {binVersion: 'last-stable'},
+ shardAsReplicaSet: false
+ }
+});
- // Start a current-version mongos.
- var newMongos = MongoRunner.runMongos({configdb: st._configDB});
+assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+assert.commandWorked(st.s.adminCommand({shardCollection: 'test.foo', key: {x: 1}}));
- // Write commands report failure by returning writeError:
+// Start a current-version mongos.
+var newMongos = MongoRunner.runMongos({configdb: st._configDB});
- assert.writeErrorWithCode(newMongos.getDB('test').foo.insert({x: 1}),
- ErrorCodes.IncompatibleServerVersion);
+// Write commands report failure by returning writeError:
- assert.writeErrorWithCode(newMongos.getDB('test').foo.update({x: 1}, {x: 1, y: 2}),
- ErrorCodes.IncompatibleServerVersion);
+assert.writeErrorWithCode(newMongos.getDB('test').foo.insert({x: 1}),
+ ErrorCodes.IncompatibleServerVersion);
- assert.writeErrorWithCode(newMongos.getDB('test').foo.remove({x: 1}),
- ErrorCodes.IncompatibleServerVersion);
+assert.writeErrorWithCode(newMongos.getDB('test').foo.update({x: 1}, {x: 1, y: 2}),
+ ErrorCodes.IncompatibleServerVersion);
- // Query commands, on failure, throw instead:
+assert.writeErrorWithCode(newMongos.getDB('test').foo.remove({x: 1}),
+ ErrorCodes.IncompatibleServerVersion);
- let res;
- res = newMongos.getDB('test').runCommand({find: 'foo'});
- assert.eq(res.code, ErrorCodes.IncompatibleServerVersion);
+// Query commands, on failure, throw instead:
- res = newMongos.getDB('test').runCommand({find: 'foo', filter: {x: 1}});
- assert.eq(res.code, ErrorCodes.IncompatibleServerVersion);
+let res;
+res = newMongos.getDB('test').runCommand({find: 'foo'});
+assert.eq(res.code, ErrorCodes.IncompatibleServerVersion);
- res = newMongos.getDB('test').runCommand({aggregate: 'foo', pipeline: [], cursor: {}});
- assert.eq(res.code, ErrorCodes.IncompatibleServerVersion);
+res = newMongos.getDB('test').runCommand({find: 'foo', filter: {x: 1}});
+assert.eq(res.code, ErrorCodes.IncompatibleServerVersion);
- MongoRunner.stopMongos(newMongos);
- st.stop();
+res = newMongos.getDB('test').runCommand({aggregate: 'foo', pipeline: [], cursor: {}});
+assert.eq(res.code, ErrorCodes.IncompatibleServerVersion);
+MongoRunner.stopMongos(newMongos);
+st.stop();
})();
diff --git a/jstests/multiVersion/remove_feature_compatibility_version.js b/jstests/multiVersion/remove_feature_compatibility_version.js
index 7c73d2847a1..b4c67b77a0f 100644
--- a/jstests/multiVersion/remove_feature_compatibility_version.js
+++ b/jstests/multiVersion/remove_feature_compatibility_version.js
@@ -3,17 +3,16 @@
* or removing the FCV document should not be allowed.
*/
(function() {
- 'use strict';
+'use strict';
- let standalone = MongoRunner.runMongod();
- assert.neq(null, standalone, 'mongod was unable to start up');
- let adminDB = standalone.getDB('admin');
+let standalone = MongoRunner.runMongod();
+assert.neq(null, standalone, 'mongod was unable to start up');
+let adminDB = standalone.getDB('admin');
- // Renaming the collection or deleting the document should fail.
- assert.commandFailedWithCode(
- adminDB.runCommand(
- {renameCollection: 'admin.system.version', to: 'admin.dummy.collection'}),
- ErrorCodes.IllegalOperation);
- assert.writeErrorWithCode(adminDB.system.version.remove({}), 40670);
- MongoRunner.stopMongod(standalone);
+// Renaming the collection or deleting the document should fail.
+assert.commandFailedWithCode(
+ adminDB.runCommand({renameCollection: 'admin.system.version', to: 'admin.dummy.collection'}),
+ ErrorCodes.IllegalOperation);
+assert.writeErrorWithCode(adminDB.system.version.remove({}), 40670);
+MongoRunner.stopMongod(standalone);
})();
diff --git a/jstests/multiVersion/sharded_txn_downgrade_cluster.js b/jstests/multiVersion/sharded_txn_downgrade_cluster.js
index 394cb89903f..892cfb3ded4 100644
--- a/jstests/multiVersion/sharded_txn_downgrade_cluster.js
+++ b/jstests/multiVersion/sharded_txn_downgrade_cluster.js
@@ -10,87 +10,87 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/multiVersion/libs/multi_rs.js");
- load("jstests/multiVersion/libs/multi_cluster.js");
- load("jstests/multiVersion/libs/sharded_txn_upgrade_downgrade_cluster_shared.js");
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/multiVersion/libs/multi_rs.js");
+load("jstests/multiVersion/libs/multi_cluster.js");
+load("jstests/multiVersion/libs/sharded_txn_upgrade_downgrade_cluster_shared.js");
- const dbName = "test";
- const collName = "sharded_txn_downgrade_cluster";
+const dbName = "test";
+const collName = "sharded_txn_downgrade_cluster";
- // Start a cluster with two shards at the latest version.
- const st = setUpTwoShardClusterWithBinVersion(dbName, collName, "latest");
+// Start a cluster with two shards at the latest version.
+const st = setUpTwoShardClusterWithBinVersion(dbName, collName, "latest");
- const txnIds = {
- commit: {lsid: {id: UUID()}, txnNumber: 0},
- commitMulti: {lsid: {id: UUID()}, txnNumber: 0},
- write: {lsid: {id: UUID()}, txnNumber: 0},
- };
+const txnIds = {
+ commit: {lsid: {id: UUID()}, txnNumber: 0},
+ commitMulti: {lsid: {id: UUID()}, txnNumber: 0},
+ write: {lsid: {id: UUID()}, txnNumber: 0},
+};
- let testDB = st.s.getDB(dbName);
+let testDB = st.s.getDB(dbName);
- // Retryable writes and transactions with and without prepare should work.
- assert.commandWorked(runTxn(testDB, collName, txnIds.commit, {multiShard: false}));
- assert.commandWorked(runTxn(testDB, collName, txnIds.commitMulti, {multiShard: true}));
- assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.write);
+// Retryable writes and transactions with and without prepare should work.
+assert.commandWorked(runTxn(testDB, collName, txnIds.commit, {multiShard: false}));
+assert.commandWorked(runTxn(testDB, collName, txnIds.commitMulti, {multiShard: true}));
+assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.write);
- // commitTransaction for both transactions and the retryable write should be retryable.
- assert.commandWorked(retryCommit(testDB, txnIds.commit));
- assert.commandWorked(retryCommit(testDB, txnIds.commitMulti));
- assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
+// commitTransaction for both transactions and the retryable write should be retryable.
+assert.commandWorked(retryCommit(testDB, txnIds.commit));
+assert.commandWorked(retryCommit(testDB, txnIds.commitMulti));
+assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
- // Downgrade featureCompatibilityVersion.
- assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
+// Downgrade featureCompatibilityVersion.
+assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
- // Only the retryable write can be retried. Can't retry the multi shard transaction because it
- // uses coordinateCommit, which is not allowed in FCV 4.0.
- assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
- assert.commandFailedWithCode(retryCommit(testDB, txnIds.commit), ErrorCodes.NoSuchTransaction);
- assert.commandFailedWithCode(retryCommit(testDB, txnIds.commitMulti),
- ErrorCodes.CommandNotSupported);
+// Only the retryable write can be retried. Can't retry the multi shard transaction because it
+// uses coordinateCommit, which is not allowed in FCV 4.0.
+assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
+assert.commandFailedWithCode(retryCommit(testDB, txnIds.commit), ErrorCodes.NoSuchTransaction);
+assert.commandFailedWithCode(retryCommit(testDB, txnIds.commitMulti),
+ ErrorCodes.CommandNotSupported);
- downgradeUniqueIndexesScript(st.s.getDB("test"));
+downgradeUniqueIndexesScript(st.s.getDB("test"));
- // Downgrade the mongos servers first.
- jsTestLog("Downgrading mongos servers.");
- st.upgradeCluster("last-stable",
- {upgradeConfigs: false, upgradeMongos: true, upgradeShards: false});
+// Downgrade the mongos servers first.
+jsTestLog("Downgrading mongos servers.");
+st.upgradeCluster("last-stable",
+ {upgradeConfigs: false, upgradeMongos: true, upgradeShards: false});
- // Then downgrade the shard servers.
- jsTestLog("Downgrading shard servers.");
- st.upgradeCluster("last-stable",
- {upgradeConfigs: false, upgradeMongos: false, upgradeShards: true});
+// Then downgrade the shard servers.
+jsTestLog("Downgrading shard servers.");
+st.upgradeCluster("last-stable",
+ {upgradeConfigs: false, upgradeMongos: false, upgradeShards: true});
- // Then downgrade the config servers.
- jsTestLog("Downgrading config servers.");
- st.upgradeCluster("last-stable",
- {upgradeConfigs: true, upgradeMongos: false, upgradeShards: false});
- checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
+// Then downgrade the config servers.
+jsTestLog("Downgrading config servers.");
+st.upgradeCluster("last-stable",
+ {upgradeConfigs: true, upgradeMongos: false, upgradeShards: false});
+checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
- testDB = st.s.getDB(dbName);
+testDB = st.s.getDB(dbName);
- // Can still retry the retryable write.
- assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
+// Can still retry the retryable write.
+assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
- // The txnIds used for the earlier commits should be re-usable because their history was
- // removed.
- assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.commit);
- assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.commit);
+// The txnIds used for the earlier commits should be re-usable because their history was
+// removed.
+assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.commit);
+assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.commit);
- assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.commitMulti);
- assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.commitMulti);
+assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.commitMulti);
+assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.commitMulti);
- // Can perform a new operation on each session.
- Object.keys(txnIds).forEach((txnIdKey) => {
- txnIds[txnIdKey].txnNumber += 1;
- });
+// Can perform a new operation on each session.
+Object.keys(txnIds).forEach((txnIdKey) => {
+ txnIds[txnIdKey].txnNumber += 1;
+});
- assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.commit);
- assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.commitMulti);
- assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.write);
+assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.commit);
+assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.commitMulti);
+assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.write);
- st.stop();
+st.stop();
})();
diff --git a/jstests/multiVersion/sharded_txn_upgrade_cluster.js b/jstests/multiVersion/sharded_txn_upgrade_cluster.js
index 04c3bddfde7..6b89679aaab 100644
--- a/jstests/multiVersion/sharded_txn_upgrade_cluster.js
+++ b/jstests/multiVersion/sharded_txn_upgrade_cluster.js
@@ -10,73 +10,72 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/feature_compatibility_version.js");
- load("jstests/multiVersion/libs/multi_rs.js");
- load("jstests/multiVersion/libs/multi_cluster.js");
- load("jstests/multiVersion/libs/sharded_txn_upgrade_downgrade_cluster_shared.js");
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/multiVersion/libs/multi_rs.js");
+load("jstests/multiVersion/libs/multi_cluster.js");
+load("jstests/multiVersion/libs/sharded_txn_upgrade_downgrade_cluster_shared.js");
- const dbName = "test";
- const collName = "sharded_txn_upgrade_cluster";
+const dbName = "test";
+const collName = "sharded_txn_upgrade_cluster";
- // Start a cluster with two shards at the last stable version.
- const st = setUpTwoShardClusterWithBinVersion(dbName, collName, "last-stable");
+// Start a cluster with two shards at the last stable version.
+const st = setUpTwoShardClusterWithBinVersion(dbName, collName, "last-stable");
- const txnIds = {
- commit: {lsid: {id: UUID()}, txnNumber: 0},
- commitMulti: {lsid: {id: UUID()}, txnNumber: 0},
- write: {lsid: {id: UUID()}, txnNumber: 0},
- };
+const txnIds = {
+ commit: {lsid: {id: UUID()}, txnNumber: 0},
+ commitMulti: {lsid: {id: UUID()}, txnNumber: 0},
+ write: {lsid: {id: UUID()}, txnNumber: 0},
+};
- let testDB = st.s.getDB(dbName);
+let testDB = st.s.getDB(dbName);
- // Only retryable writes work and they are retryable.
- assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.write);
- assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
+// Only retryable writes work and they are retryable.
+assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.write);
+assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
- // Upgrade the config servers.
- jsTestLog("Upgrading config servers.");
- st.upgradeCluster("latest", {upgradeConfigs: true, upgradeMongos: false, upgradeShards: false});
+// Upgrade the config servers.
+jsTestLog("Upgrading config servers.");
+st.upgradeCluster("latest", {upgradeConfigs: true, upgradeMongos: false, upgradeShards: false});
- // Then upgrade the shard servers.
- jsTestLog("Upgrading shard servers.");
- st.upgradeCluster("latest", {upgradeConfigs: false, upgradeMongos: false, upgradeShards: true});
+// Then upgrade the shard servers.
+jsTestLog("Upgrading shard servers.");
+st.upgradeCluster("latest", {upgradeConfigs: false, upgradeMongos: false, upgradeShards: true});
- // Then upgrade mongos servers.
- jsTestLog("Upgrading mongos servers.");
- st.upgradeCluster("latest", {upgradeConfigs: false, upgradeMongos: true, upgradeShards: false});
- checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
+// Then upgrade mongos servers.
+jsTestLog("Upgrading mongos servers.");
+st.upgradeCluster("latest", {upgradeConfigs: false, upgradeMongos: true, upgradeShards: false});
+checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
- testDB = st.s.getDB(dbName);
+testDB = st.s.getDB(dbName);
- // Can still retry the retryable write.
- assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
+// Can still retry the retryable write.
+assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
- // Transactions that don't use prepare are allowed in FCV 4.0 with a 4.2 binary mongos.
- assert.commandWorked(runTxn(testDB, collName, txnIds.commit, {multiShard: false}));
+// Transactions that don't use prepare are allowed in FCV 4.0 with a 4.2 binary mongos.
+assert.commandWorked(runTxn(testDB, collName, txnIds.commit, {multiShard: false}));
- // Multi shard transactions will fail because coordinateCommit is not allowed in FCV 4.0.
- assert.commandFailedWithCode(runTxn(testDB, collName, txnIds.commitMulti, {multiShard: true}),
- ErrorCodes.CommandNotSupported);
+// Multi shard transactions will fail because coordinateCommit is not allowed in FCV 4.0.
+assert.commandFailedWithCode(runTxn(testDB, collName, txnIds.commitMulti, {multiShard: true}),
+ ErrorCodes.CommandNotSupported);
- // Upgrade the cluster's feature compatibility version to the latest.
- assert.commandWorked(
- st.s.getDB("admin").runCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(st.configRS.getPrimary().getDB("admin"), latestFCV);
+// Upgrade the cluster's feature compatibility version to the latest.
+assert.commandWorked(st.s.getDB("admin").runCommand({setFeatureCompatibilityVersion: latestFCV}));
+checkFCV(st.configRS.getPrimary().getDB("admin"), latestFCV);
- // Can still retry the retryable write and the committed transaction.
- assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
- assert.commandWorked(retryCommit(testDB, txnIds.commit));
+// Can still retry the retryable write and the committed transaction.
+assertMultiShardRetryableWriteCanBeRetried(testDB, collName, txnIds.write);
+assert.commandWorked(retryCommit(testDB, txnIds.commit));
- // Can perform a new operation on each session.
- Object.keys(txnIds).forEach((txnIdKey) => {
- txnIds[txnIdKey].txnNumber += 1;
- });
+// Can perform a new operation on each session.
+Object.keys(txnIds).forEach((txnIdKey) => {
+ txnIds[txnIdKey].txnNumber += 1;
+});
- assert.commandWorked(runTxn(testDB, collName, txnIds.commit, {multiShard: false}));
- assert.commandWorked(runTxn(testDB, collName, txnIds.commitMulti, {multiShard: true}));
- assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.write);
+assert.commandWorked(runTxn(testDB, collName, txnIds.commit, {multiShard: false}));
+assert.commandWorked(runTxn(testDB, collName, txnIds.commitMulti, {multiShard: true}));
+assertMultiShardRetryableWriteWorked(testDB, collName, txnIds.write);
- st.stop();
+st.stop();
})();
diff --git a/jstests/multiVersion/skip_level_upgrade.js b/jstests/multiVersion/skip_level_upgrade.js
index 61c4904b0c7..6f268be451a 100644
--- a/jstests/multiVersion/skip_level_upgrade.js
+++ b/jstests/multiVersion/skip_level_upgrade.js
@@ -14,79 +14,78 @@
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/libs/get_index_helpers.js');
+load('jstests/libs/get_index_helpers.js');
- const dbpath = MongoRunner.dataPath + 'skip_level_upgrade';
- resetDbpath(dbpath);
+const dbpath = MongoRunner.dataPath + 'skip_level_upgrade';
+resetDbpath(dbpath);
- // We set noCleanData to true in order to preserve the data files within an iteration.
- const defaultOptions = {
- dbpath: dbpath,
- noCleanData: true,
- };
+// We set noCleanData to true in order to preserve the data files within an iteration.
+const defaultOptions = {
+ dbpath: dbpath,
+ noCleanData: true,
+};
- // This lists all binary versions older than the last-stable version.
- // TODO SERVER-26792: In the future, we should have a common place from which both the
- // multiversion setup procedure and this test get information about supported major releases.
- const versions = [
- {binVersion: '3.2', testCollection: 'three_two'},
- {binVersion: '3.4', testCollection: 'three_four'},
- {binVersion: '3.6', testCollection: 'three_six'}
- ];
+// This lists all binary versions older than the last-stable version.
+// TODO SERVER-26792: In the future, we should have a common place from which both the
+// multiversion setup procedure and this test get information about supported major releases.
+const versions = [
+ {binVersion: '3.2', testCollection: 'three_two'},
+ {binVersion: '3.4', testCollection: 'three_four'},
+ {binVersion: '3.6', testCollection: 'three_six'}
+];
- // Iterate through versions specified in the versions list, and follow the steps outlined at
- // the top of this test file.
- for (let i = 0; i < versions.length; i++) {
- let version = versions[i];
- let mongodOptions = Object.extend({binVersion: version.binVersion}, defaultOptions);
+// Iterate through versions specified in the versions list, and follow the steps outlined at
+// the top of this test file.
+for (let i = 0; i < versions.length; i++) {
+ let version = versions[i];
+ let mongodOptions = Object.extend({binVersion: version.binVersion}, defaultOptions);
- // Start up an old binary version mongod.
- let conn = MongoRunner.runMongod(mongodOptions);
- let port = conn.port;
+ // Start up an old binary version mongod.
+ let conn = MongoRunner.runMongod(mongodOptions);
+ let port = conn.port;
- assert.neq(null,
- conn,
- 'mongod was unable able to start with version ' + tojson(version.binVersion));
+ assert.neq(
+ null, conn, 'mongod was unable able to start with version ' + tojson(version.binVersion));
- // Set up a collection on an old binary version node with one document and an index, and
- // then shut it down.
- let testDB = conn.getDB('test');
- assert.commandWorked(testDB.createCollection(version.testCollection));
- assert.writeOK(testDB[version.testCollection].insert({a: 1}));
- assert.commandWorked(testDB[version.testCollection].createIndex({a: 1}));
- MongoRunner.stopMongod(conn);
+ // Set up a collection on an old binary version node with one document and an index, and
+ // then shut it down.
+ let testDB = conn.getDB('test');
+ assert.commandWorked(testDB.createCollection(version.testCollection));
+ assert.writeOK(testDB[version.testCollection].insert({a: 1}));
+ assert.commandWorked(testDB[version.testCollection].createIndex({a: 1}));
+ MongoRunner.stopMongod(conn);
- // Restart the mongod with the latest binary version on the old version's data files.
- // Should fail due to being a skip level upgrade.
- mongodOptions = Object.extend({binVersion: 'latest'}, defaultOptions);
- conn = MongoRunner.runMongod(mongodOptions);
- assert.eq(null, conn);
+ // Restart the mongod with the latest binary version on the old version's data files.
+ // Should fail due to being a skip level upgrade.
+ mongodOptions = Object.extend({binVersion: 'latest'}, defaultOptions);
+ conn = MongoRunner.runMongod(mongodOptions);
+ assert.eq(null, conn);
- // Restart the mongod with the latest version with --repair. Should fail due to being a
- // skip level upgrade.
- let returnCode = runMongoProgram("mongod", "--port", port, "--repair", "--dbpath", dbpath);
- assert.neq(returnCode, 0, "expected mongod --repair to fail with a skip level upgrade");
+ // Restart the mongod with the latest version with --repair. Should fail due to being a
+ // skip level upgrade.
+ let returnCode = runMongoProgram("mongod", "--port", port, "--repair", "--dbpath", dbpath);
+ assert.neq(returnCode, 0, "expected mongod --repair to fail with a skip level upgrade");
- // Restart the mongod in the originally specified version. Should succeed.
- mongodOptions = Object.extend({binVersion: version.binVersion}, defaultOptions);
- conn = MongoRunner.runMongod(mongodOptions);
+ // Restart the mongod in the originally specified version. Should succeed.
+ mongodOptions = Object.extend({binVersion: version.binVersion}, defaultOptions);
+ conn = MongoRunner.runMongod(mongodOptions);
- // Verify that the data and indices from previous iterations are still accessible.
- testDB = conn.getDB('test');
- assert.eq(1,
- testDB[version.testCollection].count(),
- `data from ${version.testCollection} should be available; options: ` +
- tojson(mongodOptions));
- assert.neq(
- null,
- GetIndexHelpers.findByKeyPattern(testDB[version.testCollection].getIndexes(), {a: 1}),
- `index from ${version.testCollection} should be available; options: ` +
- tojson(mongodOptions));
+ // Verify that the data and indices from previous iterations are still accessible.
+ testDB = conn.getDB('test');
+ assert.eq(1,
+ testDB[version.testCollection].count(),
+ `data from ${version.testCollection} should be available; options: ` +
+ tojson(mongodOptions));
+ assert.neq(
+ null,
+ GetIndexHelpers.findByKeyPattern(testDB[version.testCollection].getIndexes(), {a: 1}),
+ `index from ${version.testCollection} should be available; options: ` +
+ tojson(mongodOptions));
- MongoRunner.stopMongod(conn);
+ MongoRunner.stopMongod(conn);
- resetDbpath(dbpath);
- }
+ resetDbpath(dbpath);
+}
})();
diff --git a/jstests/multiVersion/unique_index_empty_collmod.js b/jstests/multiVersion/unique_index_empty_collmod.js
index 732c6e1ef63..cd303aefde8 100644
--- a/jstests/multiVersion/unique_index_empty_collmod.js
+++ b/jstests/multiVersion/unique_index_empty_collmod.js
@@ -4,40 +4,40 @@
* @tags: [requires_replication, requires_wiredtiger]
*/
(function() {
- 'use strict';
+'use strict';
- const newIndexFormatVersion = 12;
- const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]});
- const nodes = rst.startSet();
- rst.initiate();
+const newIndexFormatVersion = 12;
+const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]});
+const nodes = rst.startSet();
+rst.initiate();
- let dbName = 'test';
- let collName = 't';
- const primary = rst.getPrimary();
- const primaryDb = primary.getDB(dbName);
- const secondary = rst.getSecondary();
- const coll = primaryDb.getCollection(collName);
+let dbName = 'test';
+let collName = 't';
+const primary = rst.getPrimary();
+const primaryDb = primary.getDB(dbName);
+const secondary = rst.getSecondary();
+const coll = primaryDb.getCollection(collName);
- assert.commandWorked(coll.createIndex({a: 1}, {unique: true}));
- assert.writeOK(coll.insert({_id: 0, a: 1}));
- assert.commandWorked(primaryDb.adminCommand({setFeatureCompatibilityVersion: '4.0'}));
- assert.commandWorked(primaryDb.runCommand({collMod: coll.getName()}));
+assert.commandWorked(coll.createIndex({a: 1}, {unique: true}));
+assert.writeOK(coll.insert({_id: 0, a: 1}));
+assert.commandWorked(primaryDb.adminCommand({setFeatureCompatibilityVersion: '4.0'}));
+assert.commandWorked(primaryDb.runCommand({collMod: coll.getName()}));
- // Wait for replication of the index creation.
- rst.awaitReplication();
- const secondaryDb = secondary.getDB(dbName);
- const coll_secondary = secondaryDb.getCollection(collName);
- const index = coll_secondary.getIndexes();
- assert.eq(index[1].unique, true, "Expected a unique index: " + tojson(index[1]));
- // Validate that the unique index is not updated on the secondary after an empty collMod
- // command.
- const indexFormatVersion = coll_secondary.aggregate({$collStats: {storageStats: {}}})
- .next()
- .storageStats.indexDetails[index[1].name]
- .metadata.formatVersion;
- assert.eq(indexFormatVersion,
- newIndexFormatVersion,
- "Expected index format version 12 for the unique index: " + tojson(index[1]));
+// Wait for replication of the index creation.
+rst.awaitReplication();
+const secondaryDb = secondary.getDB(dbName);
+const coll_secondary = secondaryDb.getCollection(collName);
+const index = coll_secondary.getIndexes();
+assert.eq(index[1].unique, true, "Expected a unique index: " + tojson(index[1]));
+// Validate that the unique index is not updated on the secondary after an empty collMod
+// command.
+const indexFormatVersion = coll_secondary.aggregate({$collStats: {storageStats: {}}})
+ .next()
+ .storageStats.indexDetails[index[1].name]
+ .metadata.formatVersion;
+assert.eq(indexFormatVersion,
+ newIndexFormatVersion,
+ "Expected index format version 12 for the unique index: " + tojson(index[1]));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/multiVersion/upgrade_downgrade_cluster.js b/jstests/multiVersion/upgrade_downgrade_cluster.js
index 0801ae57986..7e450c87b71 100644
--- a/jstests/multiVersion/upgrade_downgrade_cluster.js
+++ b/jstests/multiVersion/upgrade_downgrade_cluster.js
@@ -18,173 +18,173 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- /**
- * @param isRSCluster {bool} use replica set shards.
- */
- var runTest = function(isRSCluster) {
- "use strict";
-
- const kMinVersion = 5;
- const kCurrentVerion = 6;
-
- jsTest.log("Starting" + (isRSCluster ? " (replica set)" : "") + " cluster" + "...");
-
- var testCRUDAndAgg = function(db) {
- assert.writeOK(db.foo.insert({x: 1}));
- assert.writeOK(db.foo.insert({x: -1}));
- assert.writeOK(db.foo.update({x: 1}, {$set: {y: 1}}));
- assert.writeOK(db.foo.update({x: -1}, {$set: {y: 1}}));
- var doc1 = db.foo.findOne({x: 1});
- assert.eq(1, doc1.y);
- var doc2 = db.foo.findOne({x: -1});
- assert.eq(1, doc2.y);
-
- // Make sure a user can always do an aggregation with an $out using the 4.0-style
- // syntax.
- // TODO SERVER-36930 This immediately invoked function can be removed when we are sure
- // all nodes in the cluster understand both the new and the old $out syntax.
- (function testAggOut() {
- db.sanity_check.drop();
- assert.eq(0, db.foo.aggregate([{$out: "sanity_check"}]).itcount());
- assert.eq(2, db.sanity_check.find().itcount());
- }());
-
- assert.writeOK(db.foo.remove({x: 1}, true));
- assert.writeOK(db.foo.remove({x: -1}, true));
- assert.eq(null, db.foo.findOne());
- };
-
- var st = new ShardingTest({
- shards: 2,
- mongos: 1,
- other: {
- mongosOptions: {binVersion: "last-stable"},
- configOptions: {binVersion: "last-stable"},
- shardOptions: {binVersion: "last-stable"},
-
- rsOptions: {binVersion: "last-stable"},
- rs: isRSCluster,
- shardAsReplicaSet: false
- }
- });
- st.configRS.awaitReplication();
-
- // check that config.version document gets initialized properly
- var version = st.s.getCollection('config.version').findOne();
- assert.eq(version.minCompatibleVersion, kMinVersion);
- assert.eq(version.currentVersion, kCurrentVerion);
- var clusterID = version.clusterId;
- assert.neq(null, clusterID);
- assert.eq(version.excluding, undefined);
-
- // Setup sharded collection
- assert.commandWorked(st.s.adminCommand({enableSharding: 'sharded'}));
- st.ensurePrimaryShard('sharded', st.shard0.shardName);
-
- assert.commandWorked(st.s.adminCommand({shardCollection: 'sharded.foo', key: {x: 1}}));
- assert.commandWorked(st.s.adminCommand({split: 'sharded.foo', middle: {x: 0}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: 'sharded.foo', find: {x: 1}, to: st.shard1.shardName}));
-
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
-
- // upgrade the config servers first
- jsTest.log('upgrading config servers');
- st.upgradeCluster("latest", {upgradeMongos: false, upgradeShards: false});
-
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
-
- // Restart mongos to clear all cache and force it to do remote calls.
- st.restartMongoses();
-
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
-
- // Then upgrade the shards.
- jsTest.log('upgrading shard servers');
- st.upgradeCluster("latest", {upgradeMongos: false, upgradeConfigs: false});
-
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
-
- // Restart mongos to clear all cache and force it to do remote calls.
- st.restartMongoses();
-
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
-
- // Finally, upgrade mongos
- jsTest.log('upgrading mongos servers');
- st.upgradeCluster("latest", {upgradeConfigs: false, upgradeShards: false});
-
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
-
- // Restart mongos to clear all cache and force it to do remote calls.
- st.restartMongoses();
-
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
-
- // Check that version document is unmodified.
- version = st.s.getCollection('config.version').findOne();
- assert.eq(version.minCompatibleVersion, kMinVersion);
- assert.eq(version.currentVersion, kCurrentVerion);
- assert.eq(clusterID, version.clusterId);
- assert.eq(version.excluding, undefined);
-
- ///////////////////////////////////////////////////////////////////////////////////////////
- // Downgrade back
+/**
+ * @param isRSCluster {bool} use replica set shards.
+ */
+var runTest = function(isRSCluster) {
+ "use strict";
+
+ const kMinVersion = 5;
+ const kCurrentVerion = 6;
+
+ jsTest.log("Starting" + (isRSCluster ? " (replica set)" : "") + " cluster" +
+ "...");
+
+ var testCRUDAndAgg = function(db) {
+ assert.writeOK(db.foo.insert({x: 1}));
+ assert.writeOK(db.foo.insert({x: -1}));
+ assert.writeOK(db.foo.update({x: 1}, {$set: {y: 1}}));
+ assert.writeOK(db.foo.update({x: -1}, {$set: {y: 1}}));
+ var doc1 = db.foo.findOne({x: 1});
+ assert.eq(1, doc1.y);
+ var doc2 = db.foo.findOne({x: -1});
+ assert.eq(1, doc2.y);
+
+ // Make sure a user can always do an aggregation with an $out using the 4.0-style
+ // syntax.
+ // TODO SERVER-36930 This immediately invoked function can be removed when we are sure
+ // all nodes in the cluster understand both the new and the old $out syntax.
+ (function testAggOut() {
+ db.sanity_check.drop();
+ assert.eq(0, db.foo.aggregate([{$out: "sanity_check"}]).itcount());
+ assert.eq(2, db.sanity_check.find().itcount());
+ }());
+
+ assert.writeOK(db.foo.remove({x: 1}, true));
+ assert.writeOK(db.foo.remove({x: -1}, true));
+ assert.eq(null, db.foo.findOne());
+ };
- jsTest.log('downgrading mongos servers');
- st.upgradeCluster("last-stable", {upgradeConfigs: false, upgradeShards: false});
+ var st = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ other: {
+ mongosOptions: {binVersion: "last-stable"},
+ configOptions: {binVersion: "last-stable"},
+ shardOptions: {binVersion: "last-stable"},
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
+ rsOptions: {binVersion: "last-stable"},
+ rs: isRSCluster,
+ shardAsReplicaSet: false
+ }
+ });
+ st.configRS.awaitReplication();
- // Restart mongos to clear all cache and force it to do remote calls.
- st.restartMongoses();
+ // check that config.version document gets initialized properly
+ var version = st.s.getCollection('config.version').findOne();
+ assert.eq(version.minCompatibleVersion, kMinVersion);
+ assert.eq(version.currentVersion, kCurrentVerion);
+ var clusterID = version.clusterId;
+ assert.neq(null, clusterID);
+ assert.eq(version.excluding, undefined);
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
+ // Setup sharded collection
+ assert.commandWorked(st.s.adminCommand({enableSharding: 'sharded'}));
+ st.ensurePrimaryShard('sharded', st.shard0.shardName);
- jsTest.log('downgrading shard servers');
- st.upgradeCluster("last-stable", {upgradeMongos: false, upgradeConfigs: false});
+ assert.commandWorked(st.s.adminCommand({shardCollection: 'sharded.foo', key: {x: 1}}));
+ assert.commandWorked(st.s.adminCommand({split: 'sharded.foo', middle: {x: 0}}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: 'sharded.foo', find: {x: 1}, to: st.shard1.shardName}));
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
- // Restart mongos to clear all cache and force it to do remote calls.
- st.restartMongoses();
+ // upgrade the config servers first
+ jsTest.log('upgrading config servers');
+ st.upgradeCluster("latest", {upgradeMongos: false, upgradeShards: false});
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
- jsTest.log('downgrading config servers');
- st.upgradeCluster("last-stable", {upgradeMongos: false, upgradeShards: false});
+ // Restart mongos to clear all cache and force it to do remote calls.
+ st.restartMongoses();
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
- // Restart mongos to clear all cache and force it to do remote calls.
- st.restartMongoses();
+ // Then upgrade the shards.
+ jsTest.log('upgrading shard servers');
+ st.upgradeCluster("latest", {upgradeMongos: false, upgradeConfigs: false});
- testCRUDAndAgg(st.s.getDB('unsharded'));
- testCRUDAndAgg(st.s.getDB('sharded'));
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
- // Check that version document is unmodified.
- version = st.s.getCollection('config.version').findOne();
- assert.eq(version.minCompatibleVersion, kMinVersion);
- assert.eq(version.currentVersion, kCurrentVerion);
- assert.eq(clusterID, version.clusterId);
- assert.eq(version.excluding, undefined);
+ // Restart mongos to clear all cache and force it to do remote calls.
+ st.restartMongoses();
- st.stop();
- };
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+
+ // Finally, upgrade mongos
+ jsTest.log('upgrading mongos servers');
+ st.upgradeCluster("latest", {upgradeConfigs: false, upgradeShards: false});
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+
+ // Restart mongos to clear all cache and force it to do remote calls.
+ st.restartMongoses();
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+
+ // Check that version document is unmodified.
+ version = st.s.getCollection('config.version').findOne();
+ assert.eq(version.minCompatibleVersion, kMinVersion);
+ assert.eq(version.currentVersion, kCurrentVerion);
+ assert.eq(clusterID, version.clusterId);
+ assert.eq(version.excluding, undefined);
+
+ ///////////////////////////////////////////////////////////////////////////////////////////
+ // Downgrade back
+
+ jsTest.log('downgrading mongos servers');
+ st.upgradeCluster("last-stable", {upgradeConfigs: false, upgradeShards: false});
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+
+ // Restart mongos to clear all cache and force it to do remote calls.
+ st.restartMongoses();
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+
+ jsTest.log('downgrading shard servers');
+ st.upgradeCluster("last-stable", {upgradeMongos: false, upgradeConfigs: false});
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+
+ // Restart mongos to clear all cache and force it to do remote calls.
+ st.restartMongoses();
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+
+ jsTest.log('downgrading config servers');
+ st.upgradeCluster("last-stable", {upgradeMongos: false, upgradeShards: false});
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+
+ // Restart mongos to clear all cache and force it to do remote calls.
+ st.restartMongoses();
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+
+ // Check that version document is unmodified.
+ version = st.s.getCollection('config.version').findOne();
+ assert.eq(version.minCompatibleVersion, kMinVersion);
+ assert.eq(version.currentVersion, kCurrentVerion);
+ assert.eq(clusterID, version.clusterId);
+ assert.eq(version.excluding, undefined);
- runTest(false);
- runTest(true);
+ st.stop();
+};
+runTest(false);
+runTest(true);
})();
diff --git a/jstests/multiVersion/verify_versions_test.js b/jstests/multiVersion/verify_versions_test.js
index b1421d1c10e..e592f154f40 100644
--- a/jstests/multiVersion/verify_versions_test.js
+++ b/jstests/multiVersion/verify_versions_test.js
@@ -10,81 +10,81 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- "use strict";
-
- function assertBinVersionsEqual(v1, v2) {
- assert(MongoRunner.areBinVersionsTheSame(v1, v2),
- "Expected \"" + v1 + "\" to equal \"" + v2 + "\"");
- }
-
- function assertBinVersionsNotEqual(v1, v2) {
- assert(!MongoRunner.areBinVersionsTheSame(v1, v2),
- "Expected \"" + v1 + "\" not to equal \"" + v2 + "\"");
- }
-
- function assertBinVersionComparesHigher(v1, v2) {
- assert.eq(1,
- MongoRunner.compareBinVersions(v1, v2),
- "Expected \"" + v1 + "\" to compare higher than \"" + v2 + "\"");
- }
-
- function assertBinVersionComparesLower(v1, v2) {
- assert.eq(-1,
- MongoRunner.compareBinVersions(v1, v2),
- "Expected \"" + v1 + "\" to compare lower than \"" + v2 + "\"");
- }
-
- function assertBinVersionComparesEqual(v1, v2) {
- assert.eq(0,
- MongoRunner.compareBinVersions(v1, v2),
- "Expected \"" + v1 + "\" to compare equal to \"" + v2 + "\"");
- }
-
- // The current version is in the 4.3 series. This has to be changed very time we bump
- // the major version pair, but it provides a useful test of assumptions.
- assertBinVersionsEqual("4.3", version());
- assertBinVersionComparesEqual("4.3", version());
-
- // "latest" is the same version as the shell, "last-stable" is not.
- assertBinVersionsEqual("latest", version());
- assertBinVersionsEqual("", "latest");
- assertBinVersionsEqual("", version());
-
- assertBinVersionComparesEqual("latest", version());
- assertBinVersionComparesEqual("", "latest");
- assertBinVersionComparesEqual("", version());
-
- assertBinVersionsNotEqual("latest", "last-stable");
- assertBinVersionsNotEqual("last-stable", version());
-
- assertBinVersionComparesHigher("latest", "last-stable");
- assertBinVersionComparesLower("last-stable", version());
-
- // 3.2 means 3.2.z for any value of z. It does not mean 3.0 or 3.0.w.
- assertBinVersionsEqual("3.2", "3.2.4");
- assertBinVersionsEqual("3.2.4", "3.2");
- assertBinVersionsNotEqual("3.2", "3.0");
- assertBinVersionsNotEqual("3.0.9", "3.2.9");
-
- assertBinVersionComparesEqual("3.2", "3.2.4");
- assertBinVersionComparesEqual("3.2.4", "3.2");
- assertBinVersionComparesHigher("3.2", "3.0");
- assertBinVersionComparesLower("3.0.9", "3.2.9");
-
- assertBinVersionsEqual("3.4", "3.4.0-abcd");
- assertBinVersionsEqual("3.4.0", "3.4.0-abcd");
-
- assertBinVersionComparesEqual("3.4", "3.4.0-abcd");
- assertBinVersionComparesEqual("3.4.0", "3.4.0-abcd");
- assertBinVersionComparesHigher("3.6.0", "3.4.0-abcd");
- assertBinVersionComparesHigher("4.0.0", "3.6.99-abcd");
- assertBinVersionComparesHigher("3.4.1", "3.4.0-abcd");
- assertBinVersionComparesLower("3.4.0-abc", "3.4.1-xyz");
-
- // Prohibit versions that don't have at least two components (3 is no good, 3.2 is).
- assert.throws(MongoRunner.areBinVersionsTheSame, ["3", "3.2"]);
- assert.throws(MongoRunner.areBinVersionsTheSame, ["3.2", "3"]);
-
- // Throw an error when versions differ only by githash.
- assert.throws(MongoRunner.compareBinVersions, ["3.4.1-abc", "3.4.1-xyz"]);
+"use strict";
+
+function assertBinVersionsEqual(v1, v2) {
+ assert(MongoRunner.areBinVersionsTheSame(v1, v2),
+ "Expected \"" + v1 + "\" to equal \"" + v2 + "\"");
+}
+
+function assertBinVersionsNotEqual(v1, v2) {
+ assert(!MongoRunner.areBinVersionsTheSame(v1, v2),
+ "Expected \"" + v1 + "\" not to equal \"" + v2 + "\"");
+}
+
+function assertBinVersionComparesHigher(v1, v2) {
+ assert.eq(1,
+ MongoRunner.compareBinVersions(v1, v2),
+ "Expected \"" + v1 + "\" to compare higher than \"" + v2 + "\"");
+}
+
+function assertBinVersionComparesLower(v1, v2) {
+ assert.eq(-1,
+ MongoRunner.compareBinVersions(v1, v2),
+ "Expected \"" + v1 + "\" to compare lower than \"" + v2 + "\"");
+}
+
+function assertBinVersionComparesEqual(v1, v2) {
+ assert.eq(0,
+ MongoRunner.compareBinVersions(v1, v2),
+ "Expected \"" + v1 + "\" to compare equal to \"" + v2 + "\"");
+}
+
+// The current version is in the 4.3 series. This has to be changed very time we bump
+// the major version pair, but it provides a useful test of assumptions.
+assertBinVersionsEqual("4.3", version());
+assertBinVersionComparesEqual("4.3", version());
+
+// "latest" is the same version as the shell, "last-stable" is not.
+assertBinVersionsEqual("latest", version());
+assertBinVersionsEqual("", "latest");
+assertBinVersionsEqual("", version());
+
+assertBinVersionComparesEqual("latest", version());
+assertBinVersionComparesEqual("", "latest");
+assertBinVersionComparesEqual("", version());
+
+assertBinVersionsNotEqual("latest", "last-stable");
+assertBinVersionsNotEqual("last-stable", version());
+
+assertBinVersionComparesHigher("latest", "last-stable");
+assertBinVersionComparesLower("last-stable", version());
+
+// 3.2 means 3.2.z for any value of z. It does not mean 3.0 or 3.0.w.
+assertBinVersionsEqual("3.2", "3.2.4");
+assertBinVersionsEqual("3.2.4", "3.2");
+assertBinVersionsNotEqual("3.2", "3.0");
+assertBinVersionsNotEqual("3.0.9", "3.2.9");
+
+assertBinVersionComparesEqual("3.2", "3.2.4");
+assertBinVersionComparesEqual("3.2.4", "3.2");
+assertBinVersionComparesHigher("3.2", "3.0");
+assertBinVersionComparesLower("3.0.9", "3.2.9");
+
+assertBinVersionsEqual("3.4", "3.4.0-abcd");
+assertBinVersionsEqual("3.4.0", "3.4.0-abcd");
+
+assertBinVersionComparesEqual("3.4", "3.4.0-abcd");
+assertBinVersionComparesEqual("3.4.0", "3.4.0-abcd");
+assertBinVersionComparesHigher("3.6.0", "3.4.0-abcd");
+assertBinVersionComparesHigher("4.0.0", "3.6.99-abcd");
+assertBinVersionComparesHigher("3.4.1", "3.4.0-abcd");
+assertBinVersionComparesLower("3.4.0-abc", "3.4.1-xyz");
+
+// Prohibit versions that don't have at least two components (3 is no good, 3.2 is).
+assert.throws(MongoRunner.areBinVersionsTheSame, ["3", "3.2"]);
+assert.throws(MongoRunner.areBinVersionsTheSame, ["3.2", "3"]);
+
+// Throw an error when versions differ only by githash.
+assert.throws(MongoRunner.compareBinVersions, ["3.4.1-abc", "3.4.1-xyz"]);
}());