summaryrefslogtreecommitdiff
path: root/jstests/multiVersion
diff options
context:
space:
mode:
authorRobert Guo <robertguo@me.com>2020-03-17 16:04:08 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-03-18 04:07:46 +0000
commit4d87339d4b427966b9b9fcf879c17bef08437f94 (patch)
treed49e4cde09275c67c6a6698afcb47ac91c519c93 /jstests/multiVersion
parent5adb80de95ab4f7784eb2905f82e4d8712578e3a (diff)
downloadmongo-4d87339d4b427966b9b9fcf879c17bef08437f94.tar.gz
SERVER-46326 update last-stable to 4.3
Diffstat (limited to 'jstests/multiVersion')
-rw-r--r--jstests/multiVersion/add_invalid_shard.js2
-rw-r--r--jstests/multiVersion/agg_with_comment_during_upgrade.js87
-rw-r--r--jstests/multiVersion/clone_helper.js66
-rw-r--r--jstests/multiVersion/copydb_helper.js50
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/collection_validator_feature_compatibility_version.js272
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/feature_compatibility_version_lagging_secondary.js1
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/migration_between_mixed_FCV_mixed_version_mongods.js2
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/rollback_last_stable_to_latest.js2
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_last_stable.js2
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js2
-rw-r--r--jstests/multiVersion/genericSetFCVUsage/view_definition_feature_compatibility_version.js235
-rw-r--r--jstests/multiVersion/hashed_index_bad_keys_cleanup.js134
-rw-r--r--jstests/multiVersion/index_initial_sync_latest_primary.js23
-rw-r--r--jstests/multiVersion/index_initial_sync_latest_secondary.js24
-rw-r--r--jstests/multiVersion/initial_sync_drop_against_last_stable.js179
-rw-r--r--jstests/multiVersion/keystring_index.js290
-rw-r--r--jstests/multiVersion/long_collection_names.js123
-rw-r--r--jstests/multiVersion/migrations_with_mixed_fcv.js1
-rw-r--r--jstests/multiVersion/mixed_replica_set_with_latest_primary.js1
-rw-r--r--jstests/multiVersion/mr_multiversion_check_uuid.js197
-rw-r--r--jstests/multiVersion/new_mongos_old_mongod_wire_version_clash.js64
-rw-r--r--jstests/multiVersion/pending_chunk.js1
-rw-r--r--jstests/multiVersion/remove_ns_field_in_index_spec.js119
-rw-r--r--jstests/multiVersion/sortkey_meta.js57
-rw-r--r--jstests/multiVersion/unionWith_fcv.js78
25 files changed, 14 insertions, 1998 deletions
diff --git a/jstests/multiVersion/add_invalid_shard.js b/jstests/multiVersion/add_invalid_shard.js
index 1f9dfc9c40f..005d32d4c01 100644
--- a/jstests/multiVersion/add_invalid_shard.js
+++ b/jstests/multiVersion/add_invalid_shard.js
@@ -1,5 +1,7 @@
/**
* Test that adding invalid or duplicate shards will fail.
+ *
+ * @tags: [fix_for_fcv_46]
*/
(function() {
diff --git a/jstests/multiVersion/agg_with_comment_during_upgrade.js b/jstests/multiVersion/agg_with_comment_during_upgrade.js
deleted file mode 100644
index aefccc55f41..00000000000
--- a/jstests/multiVersion/agg_with_comment_during_upgrade.js
+++ /dev/null
@@ -1,87 +0,0 @@
-// Test that aggregations with the "comment" field succeed during the upgrade, in particular when
-// there are mixed version shards. When a 4.4 shard is nominated as the merger during upgrade, it
-// cannot propagate the comment on getMore commands. This is because any 4.2 nodes involved in the
-// query are unprepared to handle the "comment" field.
-//
-// This is designed as a regression test for SERVER-45002.
-//
-// TODO SERVER-45579: Remove this test after branching for 4.5, since this is specific to the
-// 4.2/4.4 upgrade/downgrade process.
-//
-// Checking UUID consistency uses cached connections, which are not valid across the server restarts
-// done during upgrade.
-TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
-
-(function() {
-"use strict";
-
-load("jstests/multiVersion/libs/multi_cluster.js"); // For upgradeCluster().
-load("jstests/multiVersion/libs/multi_rs.js"); // For upgradeSet().
-
-// Start with a last-stable cluster with two shards.
-const st = new ShardingTest({
- shards: 2,
- mongos: 1,
- rs: {nodes: 3},
- other: {
- mongosOptions: {binVersion: "last-stable"},
- configOptions: {binVersion: "last-stable"},
- rsOptions: {binVersion: "last-stable"},
- }
-});
-
-let testDb = st.s.getDB("testDb");
-assert.commandWorked(testDb.source.insert({_id: -1}));
-assert.commandWorked(testDb.source.insert({_id: 1}));
-
-// Shard a collection and ensure that there are chunks on both shards.
-st.ensurePrimaryShard("testDb", st.shard1.shardName);
-st.shardColl("source", {_id: 1}, {_id: 0}, {_id: -1}, "testDb", true);
-
-// Runs a $merge which will use the 4.4 node as the merger, specifying the "comment" parameter.
-// Ensures that the command succeeds and that the correct results are written to the output
-// collection. Cleans up afterwards by dropping the $merge destination collection.
-const runAggregateWithPrimaryShardMerger = function() {
- testDb = st.s.getDB("testDb");
- assert.eq(
- 0,
- testDb.source
- .aggregate(
- [
- {$_internalInhibitOptimization: {}},
- {
- $merge:
- {into: "destination", whenMatched: "replace", whenNotMatched: "insert"}
- }
- ],
- {comment: "my comment"})
- .itcount());
- assert.eq(2, testDb.destination.find().itcount());
-
- assert(testDb.destination.drop());
-};
-
-runAggregateWithPrimaryShardMerger();
-
-// Upgrade the primary shard to "latest", and verify that the agg command still works correctly.
-st.rs1.upgradeSet({binVersion: "latest"});
-runAggregateWithPrimaryShardMerger();
-
-// Upgrade the other shard and repeat the test.
-st.rs0.upgradeSet({binVersion: "latest"});
-runAggregateWithPrimaryShardMerger();
-
-// Upgrade the config servers and repeat the test.
-st.upgradeCluster("latest", {upgradeConfigs: true, upgradeMongos: false, upgradeShards: false});
-runAggregateWithPrimaryShardMerger();
-
-// Upgrade the mongos and repeat the test.
-st.upgradeCluster("latest", {upgradeConfigs: false, upgradeMongos: true, upgradeShards: false});
-runAggregateWithPrimaryShardMerger();
-
-// Set the FCV to "4.4" to complete the upgrade and repeat the test.
-assert.commandWorked(st.s.getDB("admin").runCommand({setFeatureCompatibilityVersion: "4.4"}));
-runAggregateWithPrimaryShardMerger();
-
-st.stop();
-})();
diff --git a/jstests/multiVersion/clone_helper.js b/jstests/multiVersion/clone_helper.js
deleted file mode 100644
index 893993239bf..00000000000
--- a/jstests/multiVersion/clone_helper.js
+++ /dev/null
@@ -1,66 +0,0 @@
-// SERVER-36438 Ensure the 4.2 cloneDatabase() shell helper still successfully executes the clone
-// command on a 4.0 server, now that the clone command has been removed as of 4.2.
-(function() {
-"use strict";
-const oldVersion = "4.0";
-
-let numDocs = 2000;
-
-// 1kb string
-let str = new Array(1000).toString();
-
-let replsetDBName = "cloneDBreplset";
-let standaloneDBName = "cloneDBstandalone";
-let testColName = "foo";
-let testViewName = "view";
-
-jsTest.log("Create replica set");
-let replTest = new ReplSetTest({name: "testSet", nodes: 3, nodeOptions: {binVersion: oldVersion}});
-replTest.startSet();
-replTest.initiate();
-let master = replTest.getPrimary();
-let masterDB = master.getDB(replsetDBName);
-masterDB.dropDatabase();
-
-jsTest.log("Create standalone server");
-let standalone = MongoRunner.runMongod({binVersion: oldVersion});
-let standaloneDB = standalone.getDB(replsetDBName);
-standaloneDB.dropDatabase();
-
-jsTest.log("Insert data into replica set");
-let bulk = masterDB[testColName].initializeUnorderedBulkOp();
-for (let i = 0; i < numDocs; i++) {
- bulk.insert({x: i, text: str});
-}
-assert.commandWorked(bulk.execute({w: 3}));
-
-jsTest.log("Create view on replica set");
-assert.commandWorked(masterDB.runCommand({create: testViewName, viewOn: testColName}));
-
-// Make sure all writes have replicated to secondary.
-replTest.awaitReplication();
-
-jsTest.log("Clone db from replica set to standalone server");
-standaloneDB.cloneDatabase(replTest.getURL());
-assert.eq(numDocs,
- standaloneDB[testColName].find().itcount(),
- "cloneDatabase from replset to standalone failed (document counts do not match)");
-assert.eq(numDocs,
- standaloneDB[testViewName].find().itcount(),
- "cloneDatabase from replset to standalone failed (count on view incorrect)");
-
-jsTest.log("Clone db from replica set PRIMARY to standalone server");
-standaloneDB.dropDatabase();
-standaloneDB.cloneDatabase(master.host);
-assert.eq(numDocs,
- standaloneDB[testColName].find().itcount(),
- "cloneDatabase from PRIMARY to standalone failed (document counts do not match)");
-assert.eq(numDocs,
- standaloneDB[testViewName].find().itcount(),
- "cloneDatabase from PRIMARY to standalone failed (count on view incorrect)");
-
-jsTest.log("Shut down replica set and standalone server");
-MongoRunner.stopMongod(standalone);
-
-replTest.stopSet();
-})();
diff --git a/jstests/multiVersion/copydb_helper.js b/jstests/multiVersion/copydb_helper.js
deleted file mode 100644
index 25c60f2552a..00000000000
--- a/jstests/multiVersion/copydb_helper.js
+++ /dev/null
@@ -1,50 +0,0 @@
-// SERVER-36438 Ensure the 4.2 copyDatabase() shell helper still successfully executes the copyDB
-// command on a 4.0 server, now that the copyDB command has been removed as of 4.2.
-(function() {
-"use strict";
-const oldVersion = "4.0";
-
-let runTest = function(useAuth) {
- let conn;
- if (useAuth) {
- conn = MongoRunner.runMongod({auth: "", binVersion: oldVersion});
- } else {
- conn = MongoRunner.runMongod({binVersion: oldVersion});
- }
-
- let fromDB = conn.getDB("copydb2-test-a");
- let toDB = conn.getDB("copydb2-test-b");
- let adminDB = conn.getDB("admin");
-
- if (useAuth) {
- adminDB.createUser({user: "root", pwd: "root", roles: ["root"]});
- adminDB.auth("root", "root");
- fromDB.createUser(
- {user: "chevy", pwd: "chase", roles: ["read", {role: "readWrite", db: toDB._name}]});
- }
-
- assert.commandWorked(fromDB.foo.insert({a: 1}));
- assert.commandWorked(fromDB.foo.createIndex({a: 1}));
-
- if (useAuth) {
- assert.commandWorked(toDB.getSiblingDB("admin").logout());
- fromDB.auth("chevy", "chase");
- }
-
- assert.eq(1, fromDB.foo.count());
- assert.eq(0, toDB.foo.count());
-
- assert.commandWorked(fromDB.copyDatabase(fromDB._name, toDB._name));
- assert.eq(1, fromDB.foo.count());
- assert.eq(1, toDB.foo.count());
- assert.eq(fromDB.foo.getIndexes().length, toDB.foo.getIndexes().length);
- MongoRunner.stopMongod(conn);
-};
-
-runTest(/*useAuth*/ false);
-
-// Authenticating as multiple users on multiple databases results in an error.
-if (!jsTest.options().auth) {
- runTest(/*useAuth*/ true);
-}
-})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/collection_validator_feature_compatibility_version.js b/jstests/multiVersion/genericSetFCVUsage/collection_validator_feature_compatibility_version.js
deleted file mode 100644
index 630a405529a..00000000000
--- a/jstests/multiVersion/genericSetFCVUsage/collection_validator_feature_compatibility_version.js
+++ /dev/null
@@ -1,272 +0,0 @@
-/**
- * Test that mongod will not allow creation of collection validators using new query features when
- * the feature compatibility version is older than the latest version.
- *
- * We restart mongod during the test and expect it to have the same data after restarting.
- * @tags: [requires_persistence]
- */
-
-(function() {
-"use strict";
-
-const testName = "collection_validator_feature_compatibility_version";
-const dbpath = MongoRunner.dataPath + testName;
-
-// The 'testCases' array should be populated with
-//
-// { validator: { ... }, nonMatchingDocument: { ... }, lastStableErrCode }
-//
-// objects that use query features new in the latest version of mongod. Note that this also
-// includes new aggregation expressions able to be used with the $expr match expression. This
-// test ensures that a collection validator accepts the new query feature when the feature
-// compatibility version is the latest version, and rejects it when the feature compatibility
-// version is the last-stable version.
-// The 'lastStableErrCode' field indicates what error the last stable version would throw when
-// parsing the validator.
-const testCases = [
- {
- validator: {$expr: {$eq: [{$meta: "indexKey"}, 'foobar']}},
- nonMatchingDocument: {a: 1},
- lastStableErrCode: 17308
- },
- {
- validator: {$expr: {$eq: [{$meta: "sortKey"}, 'foobar']}},
- nonMatchingDocument: {a: 1},
- lastStableErrCode: 17308
- },
- {
- validator: {$expr: {$eq: [{$meta: "recordId"}, 'foobar']}},
- nonMatchingDocument: {a: 1},
- lastStableErrCode: 17308
- },
- {
- validator: {$expr: {$eq: [{$meta: "geoNearPoint"}, 'foobar']}},
- nonMatchingDocument: {a: 1},
- lastStableErrCode: 17308
- },
- {
- validator: {$expr: {$eq: [{$meta: "geoNearDistance"}, 'foobar']}},
- nonMatchingDocument: {a: 1},
- lastStableErrCode: 17308
- },
- {validator: {$expr: {$isNumber: {}}}, nonMatchingDocument: {a: 1}, lastStableErrCode: 168},
- {
- validator: {$expr: {$eq: [{$bsonSize: {}}, 'foobar']}},
- nonMatchingDocument: {a: 1},
- lastStableErrCode: 168
- },
- {
- validator: {$expr: {$eq: [{$binarySize: ''}, 'foobar']}},
- nonMatchingDocument: {a: 1},
- lastStableErrCode: 168
- },
- {
- validator:
- {$expr: {$eq: [{$replaceOne: {input: '', find: '', replacement: ''}}, 'foobar']}},
- nonMatchingDocument: {a: 1},
- lastStableErrCode: 168
- },
- {
- validator:
- {$expr: {$eq: [{$replaceAll: {input: '', find: '', replacement: ''}}, 'foobar']}},
- nonMatchingDocument: {a: 1},
- lastStableErrCode: 168
- },
- {
- validator: {$expr: {$eq: [{$first: {$literal: ['a']}}, 'foobar']}},
- nonMatchingDocument: {a: 1},
- lastStableErrCode: 168
- },
- {
- validator: {$expr: {$eq: [{$last: {$literal: ['a']}}, 'foobar']}},
- nonMatchingDocument: {a: 1},
- lastStableErrCode: 168
- },
-];
-
-let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest"});
-assert.neq(null, conn, "mongod was unable to start up");
-
-let testDB = conn.getDB(testName);
-
-let adminDB = conn.getDB("admin");
-
-// Explicitly set the feature compatibility version to the latest version.
-assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
-
-testCases.forEach(function(test, i) {
- // Create a collection with a validator using new query features.
- const coll = testDB["coll" + i];
- assert.commandWorked(
- testDB.createCollection(coll.getName(), {validator: test.validator}),
- `Expected to be able to create collection with validator ${tojson(test.validator)}`);
-
- // The validator should cause this insert to fail.
- assert.writeErrorWithCode(
- coll.insert(test.nonMatchingDocument),
- ErrorCodes.DocumentValidationFailure,
- `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
- `collection with validator ${tojson(test.validator)}`);
-
- // Set a validator using new query features on an existing collection.
- coll.drop();
- assert.commandWorked(testDB.createCollection(coll.getName()));
- assert.commandWorked(
- testDB.runCommand({collMod: coll.getName(), validator: test.validator}),
- `Expected to be able to modify collection validator to be ${tojson(test.validator)}`);
-
- // Another failing update.
- assert.writeErrorWithCode(
- coll.insert(test.nonMatchingDocument),
- ErrorCodes.DocumentValidationFailure,
- `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
- `collection with validator ${tojson(test.validator)}`);
-});
-
-// Set the feature compatibility version to the last-stable version.
-assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-
-testCases.forEach(function(test, i) {
- // The validator is already in place, so it should still cause this insert to fail.
- const coll = testDB["coll" + i];
- assert.writeErrorWithCode(
- coll.insert(test.nonMatchingDocument),
- ErrorCodes.DocumentValidationFailure,
- `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
- `collection with validator ${tojson(test.validator)}`);
-
- // Trying to create a new collection with a validator using new query features should fail
- // while feature compatibility version is the last-stable version.
- let res = testDB.createCollection("other", {validator: test.validator});
- assert.commandFailedWithCode(
- res,
- ErrorCodes.QueryFeatureNotAllowed,
- 'Expected *not* to be able to create collection with validator ' + tojson(test.validator));
- assert(res.errmsg.match(/feature compatibility version/),
- `Expected error message from createCollection with validator ` +
- `${tojson(test.validator)} to reference 'feature compatibility version' but got: ` +
- res.errmsg);
-
- // Trying to update a collection with a validator using new query features should also fail.
- res = testDB.runCommand({collMod: coll.getName(), validator: test.validator});
- assert.commandFailedWithCode(
- res,
- ErrorCodes.QueryFeatureNotAllowed,
- `Expected to be able to create collection with validator ${tojson(test.validator)}`);
- assert(res.errmsg.match(/feature compatibility version/),
- `Expected error message from createCollection with validator ` +
- `${tojson(test.validator)} to reference 'feature compatibility version' but got: ` +
- res.errmsg);
-});
-
-MongoRunner.stopMongod(conn);
-
-if (testCases.length > 0) {
- // Versions of mongod 4.2 and later are able to start up with a collection validator that's
- // considered invalid. However, any writes to the collection will fail.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "last-stable", noCleanData: true});
- assert.neq(null, conn, "last stable mongod was unable to start up with invalid validator");
- const testDB = conn.getDB(testName);
-
- // Check that writes fail to all collections with validators using new query features.
- testCases.forEach(function(test, i) {
- const coll = testDB["coll" + i];
- assert.commandFailedWithCode(coll.insert({foo: 1}), test.lastStableErrCode);
- });
-
- MongoRunner.stopMongod(conn);
-}
-
-// Starting up the latest version of mongod, however, should succeed, even though the feature
-// compatibility version is still set to the last-stable version.
-conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true});
-assert.neq(null, conn, "mongod was unable to start up");
-
-adminDB = conn.getDB("admin");
-testDB = conn.getDB(testName);
-
-// And the validator should still work.
-testCases.forEach(function(test, i) {
- const coll = testDB["coll" + i];
- assert.writeErrorWithCode(
- coll.insert(test.nonMatchingDocument),
- ErrorCodes.DocumentValidationFailure,
- `Expected document ${tojson(test.nonMatchingDocument)} to fail validation for ` +
- `collection with validator ${tojson(test.validator)}`);
-
- // Remove the validator.
- assert.commandWorked(testDB.runCommand({collMod: coll.getName(), validator: {}}));
-});
-
-MongoRunner.stopMongod(conn);
-
-// Now, we should be able to start up the last-stable version of mongod.
-conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "last-stable", noCleanData: true});
-assert.neq(
- null,
- conn,
- `version ${MongoRunner.getBinVersionFor("last-stable")} of mongod failed to start, even` +
- " after we removed the validator using new query features");
-
-MongoRunner.stopMongod(conn);
-
-// The rest of the test uses the latest version of mongod.
-conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true});
-assert.neq(null, conn, "mongod was unable to start up");
-
-adminDB = conn.getDB("admin");
-testDB = conn.getDB(testName);
-
-// Set the feature compatibility version back to the latest version.
-assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
-
-testCases.forEach(function(test, i) {
- const coll = testDB["coll2" + i];
-
- // Now we should be able to create a collection with a validator using new query features
- // again.
- assert.commandWorked(
- testDB.createCollection(coll.getName(), {validator: test.validator}),
- `Expected to be able to create collection with validator ${tojson(test.validator)}`);
-
- // And we should be able to modify a collection to have a validator using new query
- // features.
- assert.commandWorked(
- testDB.runCommand({collMod: coll.getName(), validator: test.validator}),
- `Expected to be able to modify collection validator to be ${tojson(test.validator)}`);
-});
-
-// Set the feature compatibility version to the last-stable version and then restart with
-// internalValidateFeaturesAsMaster=false.
-assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-MongoRunner.stopMongod(conn);
-conn = MongoRunner.runMongod({
- dbpath: dbpath,
- binVersion: "latest",
- noCleanData: true,
- setParameter: "internalValidateFeaturesAsMaster=false"
-});
-assert.neq(null, conn, "mongod was unable to start up");
-
-testDB = conn.getDB(testName);
-
-testCases.forEach(function(test, i) {
- const coll = testDB["coll3" + i];
- // Even though the feature compatibility version is the last-stable version, we should still
- // be able to add a validator using new query features, because
- // internalValidateFeaturesAsMaster is false.
- assert.commandWorked(
- testDB.createCollection(coll.getName(), {validator: test.validator}),
- `Expected to be able to create collection with validator ${tojson(test.validator)}`);
-
- // We should also be able to modify a collection to have a validator using new query
- // features.
- coll.drop();
- assert.commandWorked(testDB.createCollection(coll.getName()));
- assert.commandWorked(
- testDB.runCommand({collMod: coll.getName(), validator: test.validator}),
- `Expected to be able to modify collection validator to be ${tojson(test.validator)}`);
-});
-
-MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/multiVersion/genericSetFCVUsage/feature_compatibility_version_lagging_secondary.js b/jstests/multiVersion/genericSetFCVUsage/feature_compatibility_version_lagging_secondary.js
index 0d06214c7b9..fdfd3ab72a1 100644
--- a/jstests/multiVersion/genericSetFCVUsage/feature_compatibility_version_lagging_secondary.js
+++ b/jstests/multiVersion/genericSetFCVUsage/feature_compatibility_version_lagging_secondary.js
@@ -1,5 +1,6 @@
// Tests that a primary with upgrade featureCompatibilityVersion cannot connect with a secondary
// with a lower binary version.
+// @tags: [fix_for_fcv_46]
(function() {
"use strict";
diff --git a/jstests/multiVersion/genericSetFCVUsage/migration_between_mixed_FCV_mixed_version_mongods.js b/jstests/multiVersion/genericSetFCVUsage/migration_between_mixed_FCV_mixed_version_mongods.js
index fb8764c68eb..4ce4db19ef4 100644
--- a/jstests/multiVersion/genericSetFCVUsage/migration_between_mixed_FCV_mixed_version_mongods.js
+++ b/jstests/multiVersion/genericSetFCVUsage/migration_between_mixed_FCV_mixed_version_mongods.js
@@ -1,6 +1,8 @@
/**
* Test that it is not possible to move a chunk from an upgrade featureCompatibilityVersion node to
* a downgrade binary version node.
+ *
+ * @tags: [fix_for_fcv_46]
*/
(function() {
diff --git a/jstests/multiVersion/genericSetFCVUsage/rollback_last_stable_to_latest.js b/jstests/multiVersion/genericSetFCVUsage/rollback_last_stable_to_latest.js
index 9d3c1a60172..792f8f339cd 100644
--- a/jstests/multiVersion/genericSetFCVUsage/rollback_last_stable_to_latest.js
+++ b/jstests/multiVersion/genericSetFCVUsage/rollback_last_stable_to_latest.js
@@ -1,6 +1,8 @@
/**
* Multiversion rollback test. Checks that rollback succeeds between a
* 'last-stable' version rollback node and a 'latest' version sync source.
+ *
+ * @tags: [fix_for_fcv_46]
*/
(function() {
diff --git a/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_last_stable.js b/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_last_stable.js
index 546065ecc5f..8fe1123dca2 100644
--- a/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_last_stable.js
+++ b/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_last_stable.js
@@ -1,6 +1,8 @@
/**
* Multiversion rollback test. Checks that rollback succeeds between a
* 'latest' version rollback node and a 'last-stable' version sync source.
+ *
+ * @tags: [fix_for_fcv_46]
*/
(function() {
diff --git a/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js b/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js
index 02e6fbb9d40..de82799715c 100644
--- a/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js
+++ b/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js
@@ -1,5 +1,7 @@
/**
* Tests setFeatureCompatibilityVersion.
+ *
+ * @tags: [fix_for_fcv_46]
*/
// Checking UUID consistency involves talking to a shard node, which in this test is shutdown
diff --git a/jstests/multiVersion/genericSetFCVUsage/view_definition_feature_compatibility_version.js b/jstests/multiVersion/genericSetFCVUsage/view_definition_feature_compatibility_version.js
deleted file mode 100644
index b6aa9f4a921..00000000000
--- a/jstests/multiVersion/genericSetFCVUsage/view_definition_feature_compatibility_version.js
+++ /dev/null
@@ -1,235 +0,0 @@
-/**
- * Test that mongod will not allow creation of a view using new aggregation features when the
- * feature compatibility version is older than the latest version.
- *
- * We restart mongod during the test and expect it to have the same data after restarting.
- * @tags: [requires_persistence]
- */
-
-(function() {
-"use strict";
-
-const testName = "view_definition_feature_compatibility_version_multiversion";
-const dbpath = MongoRunner.dataPath + testName;
-
-// The 'pipelinesWithNewFeatures' array should be populated with aggregation pipelines that use
-// aggregation features new in the latest version of mongod. This test ensures that a view
-// definition accepts the new aggregation feature when the feature compatibility version is the
-// latest version, and rejects it when the feature compatibility version is the last-stable
-// version.
-const pipelinesWithNewFeatures = [
- [{$project: {x: {$meta: "indexKey"}}}],
- [{$project: {x: {$meta: "recordId"}}}],
- [{$sort: {a: 1}}, {$project: {x: {$meta: "sortKey"}}}],
- [
- {$geoNear: {near: {type: "Point", coordinates: [0, 0]}, distanceField: "loc"}},
- {$project: {m: {$meta: "geoNearPoint"}}}
- ],
- [
- {$geoNear: {near: {type: "Point", coordinates: [0, 0]}, distanceField: "loc"}},
- {$project: {m: {$meta: "geoNearDistance"}}}
- ],
- [{$project: {x: {$isNumber: {}}}}],
- [{$project: {x: {$bsonSize: {}}}}],
- [{$project: {x: {$binarySize: ''}}}],
- [{$project: {x: {$replaceOne: {input: '', find: '', replacement: ''}}}}],
- [{$project: {x: {$replaceAll: {input: '', find: '', replacement: ''}}}}],
- [{$project: {x: {$first: {$literal: ['a']}}}}],
- [{$project: {x: {$last: {$literal: ['a']}}}}],
- [{$unionWith: "A"}],
- [{$unionWith: {coll: "A", pipeline: [{$match: {b: 1}}]}}],
- [{$lookup: {from: "A", pipeline: [{$unionWith: "B"}], as: "result"}}],
- [{$facet: {sub_pipe_invalid: [{$unionWith: "B"}], sub_pipe_valid: [{$match: {b: 1}}]}}],
- [{
- $group: {
- _id: 1,
- v: {
- $accumulator: {
- init: function() {},
- accumulate: function() {},
- accumulateArgs: [],
- merge: function() {},
- lang: "js"
- }
- }
- }
- }],
- [{$group: {_id: 1, v: {$_internalJsReduce: {eval: function() {}, data: {}}}}}],
- [{$project: {v: {$_internalJsEmit: {eval: function() {}, this: {}}}}}],
- [{$project: {v: {$function: {body: function() {}, args: [], lang: "js"}}}}],
-];
-
-let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest"});
-assert.neq(null, conn, "mongod was unable to start up");
-let testDB = conn.getDB(testName);
-
-// We need a GeoSpatial index to test $geoNear queries.
-assert.commandWorked(testDB.coll.createIndex({loc: "2dsphere"}));
-
-// Explicitly set feature compatibility version to the latest version.
-assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-
-// Test that we are able to create a new view with any of the new features.
-pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert.commandWorked(
- testDB.createView("firstView" + i, "coll", pipe),
- `Expected to be able to create view with pipeline ${tojson(pipe)} while in FCV` +
- ` ${latestFCV}`));
-
-// Test that we are able to update an existing view with any of the new features.
-pipelinesWithNewFeatures.forEach(function(pipe, i) {
- assert(testDB["firstView" + i].drop(), `Drop of view with pipeline ${tojson(pipe)} failed`);
- assert.commandWorked(testDB.createView("firstView" + i, "coll", []));
- assert.commandWorked(
- testDB.runCommand({collMod: "firstView" + i, viewOn: "coll", pipeline: pipe}),
- `Expected to be able to modify view to use pipeline ${tojson(pipe)} while in FCV` +
- ` ${latestFCV}`);
-});
-
-// Create an empty view which we will attempt to update to use new query features while the
-// feature compatibility version is the last-stable version.
-assert.commandWorked(testDB.createView("emptyView", "coll", []));
-
-// Set the feature compatibility version to the last-stable version.
-assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-
-// Read against an existing view using new query features should not fail.
-pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert.commandWorked(testDB.runCommand({find: "firstView" + i}),
- `Failed to query view with pipeline ${tojson(pipe)}`));
-
-// Trying to create a new view in the same database as existing invalid view should fail,
-// even if the new view doesn't use any new query features.
-assert.commandFailedWithCode(
- testDB.createView("newViewOldFeatures", "coll", [{$project: {_id: 1}}]),
- ErrorCodes.QueryFeatureNotAllowed,
- `Expected *not* to be able to create view on database ${testDB} while in FCV ${lastStableFCV}`);
-
-// Trying to create a new view succeeds if it's on a separate database.
-const testDB2 = conn.getDB(testName + '2');
-assert.commandWorked(testDB2.dropDatabase());
-assert.commandWorked(testDB2.createView("newViewOldFeatures", "coll", [{$project: {_id: 1}}]));
-
-// Trying to create a new view using new query features should fail.
-// (We use a separate DB to ensure this can only fail because of the view we're trying to create,
-// as opposed to an existing view.)
-pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert.commandFailedWithCode(
- testDB2.createView("view_fail" + i, "coll", pipe),
- ErrorCodes.QueryFeatureNotAllowed,
- `Expected *not* to be able to create view with pipeline ${tojson(pipe)} while in FCV` +
- ` ${lastStableFCV}`));
-
-// Trying to update existing view to use new query features should also fail.
-pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert.commandFailedWithCode(
- testDB.runCommand({collMod: "emptyView", viewOn: "coll", pipeline: pipe}),
- ErrorCodes.QueryFeatureNotAllowed,
- `Expected *not* to be able to modify view to use pipeline ${tojson(pipe)} while in` +
- `FCV ${lastStableFCV}`));
-
-MongoRunner.stopMongod(conn);
-
-// Starting up the last-stable version of mongod with new query features will succeed.
-conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "last-stable", noCleanData: true});
-assert.neq(null,
- conn,
- `version ${MongoRunner.getBinVersionFor("last-stable")} of mongod was` +
- " unable to start up");
-testDB = conn.getDB(testName);
-
-// Reads will fail against views with new query features when running the last-stable version.
-// Not checking the code returned on failure as it is not uniform across the various
-// 'pipeline' arguments tested.
-pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert.commandFailed(
- testDB.runCommand({find: "firstView" + i}),
- `Expected read against view with pipeline ${tojson(pipe)} to fail on version` +
- ` ${MongoRunner.getBinVersionFor("last-stable")}`));
-
-// Test that a read against a view that does not contain new query features succeeds.
-assert.commandWorked(testDB.runCommand({find: "emptyView"}));
-
-MongoRunner.stopMongod(conn);
-
-// Starting up the latest version of mongod should succeed, even though the feature
-// compatibility version is still set to the last-stable version.
-conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true});
-assert.neq(null, conn, "mongod was unable to start up");
-testDB = conn.getDB(testName);
-
-// Read against an existing view using new query features should not fail.
-pipelinesWithNewFeatures.forEach(
- (pipe, i) => assert.commandWorked(testDB.runCommand({find: "firstView" + i}),
- `Failed to query view with pipeline ${tojson(pipe)}`));
-
-// Set the feature compatibility version back to the latest version.
-assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-
-pipelinesWithNewFeatures.forEach(function(pipe, i) {
- assert.commandWorked(testDB.runCommand({find: "firstView" + i}),
- `Failed to query view with pipeline ${tojson(pipe)}`);
- // Test that we are able to create a new view with any of the new features.
- assert.commandWorked(
- testDB.createView("secondView" + i, "coll", pipe),
- `Expected to be able to create view with pipeline ${tojson(pipe)} while in FCV` +
- ` ${latestFCV}`);
-
- // Test that we are able to update an existing view to use any of the new features.
- assert(testDB["secondView" + i].drop(), `Drop of view with pipeline ${tojson(pipe)} failed`);
- assert.commandWorked(testDB.createView("secondView" + i, "coll", []));
- assert.commandWorked(
- testDB.runCommand({collMod: "secondView" + i, viewOn: "coll", pipeline: pipe}),
- `Expected to be able to modify view to use pipeline ${tojson(pipe)} while in FCV` +
- ` ${latestFCV}`);
-});
-
-// Set the feature compatibility version to the last-stable version and then restart with
-// internalValidateFeaturesAsMaster=false.
-assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-MongoRunner.stopMongod(conn);
-conn = MongoRunner.runMongod({
- dbpath: dbpath,
- binVersion: "latest",
- noCleanData: true,
- setParameter: "internalValidateFeaturesAsMaster=false"
-});
-assert.neq(null, conn, "mongod was unable to start up");
-testDB = conn.getDB(testName);
-
-pipelinesWithNewFeatures.forEach(function(pipe, i) {
- // Even though the feature compatibility version is the last-stable version, we should still
- // be able to create a view using new query features, because
- // internalValidateFeaturesAsMaster is false.
- assert.commandWorked(
- testDB.createView("thirdView" + i, "coll", pipe),
- `Expected to be able to create view with pipeline ${tojson(pipe)} while in FCV` +
- ` ${lastStableFCV} with internalValidateFeaturesAsMaster=false`);
-
- // We should also be able to modify a view to use new query features.
- assert(testDB["thirdView" + i].drop(), `Drop of view with pipeline ${tojson(pipe)} failed`);
- assert.commandWorked(testDB.createView("thirdView" + i, "coll", []));
- assert.commandWorked(
- testDB.runCommand({collMod: "thirdView" + i, viewOn: "coll", pipeline: pipe}),
- `Expected to be able to modify view to use pipeline ${tojson(pipe)} while in FCV` +
- ` ${lastStableFCV} with internalValidateFeaturesAsMaster=false`);
-});
-
-MongoRunner.stopMongod(conn);
-
-// Starting up the last-stable version of mongod with new query features should succeed.
-conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "last-stable", noCleanData: true});
-assert.neq(null,
- conn,
- `version ${MongoRunner.getBinVersionFor("last-stable")} of mongod was` +
- " unable to start up");
-testDB = conn.getDB(testName);
-
-// Existing views with new query features can be dropped.
-pipelinesWithNewFeatures.forEach((pipe, i) =>
- assert(testDB["firstView" + i].drop(),
- `Drop of view with pipeline ${tojson(pipe)} failed`));
-assert(testDB.system.views.drop(), "Drop of system.views collection failed");
-
-MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/multiVersion/hashed_index_bad_keys_cleanup.js b/jstests/multiVersion/hashed_index_bad_keys_cleanup.js
deleted file mode 100644
index 87785a023b0..00000000000
--- a/jstests/multiVersion/hashed_index_bad_keys_cleanup.js
+++ /dev/null
@@ -1,134 +0,0 @@
-/**
- * Prior to SERVER-44050, for hashed indexes, if there is an array along index field path, we did
- * not fail insertion. We incorrectly stored empty index key value for those cases. This lead to
- * corruption of index keys.
- *
- * In this test we verify that we are able to successfully update and delete documents that were
- * involved in creating corrupt indexes.
- *
- * When we branch for 4.4, this test should be deleted in the master branch. In the v4.4 branch it
- * should test a 4.2 => 4.4 upgrade scenario.
- */
-(function() {
-"use strict";
-
-load("jstests/multiVersion/libs/multi_rs.js"); // For upgradeSet.
-load("jstests/multiVersion/libs/verify_versions.js"); // For binVersion.
-
-const preBackportVersion = "4.2.1";
-const preBackportNodeOptions = {
- binVersion: preBackportVersion
-};
-const nodeOptionsOfLatestVersion = {
- binVersion: "latest"
-};
-
-// Set up a new replSet consisting of 2 nodes, initially running on bad binaries.
-const rst = new ReplSetTest({nodes: 2, nodeOptions: preBackportNodeOptions});
-rst.startSet();
-rst.initiate();
-
-let testDB = rst.getPrimary().getDB(jsTestName());
-let coll = testDB.coll;
-coll.drop();
-
-// Verify that the replset is on binary version specified in 'preBackportVersion'.
-assert.binVersion(testDB.getMongo(), preBackportVersion);
-
-// Insert bad documents using older version.
-assert.commandWorked(coll.createIndex({"p.q.r": "hashed"}));
-assert.commandWorked(coll.insert({_id: 1, p: []}));
-assert.commandWorked(coll.insert({_id: 2, p: {q: [1]}}));
-assert.commandWorked(coll.insert({_id: 3, p: [{q: 1}]}));
-assert.commandWorked(coll.insert({_id: 4, a: 1, p: [{q: 1}]}));
-assert.commandWorked(coll.insert({_id: 5, a: 1, p: [{q: 1}]}));
-
-// Assert that the collection has expected number of documents and index keys.
-function assertCollectionHasExpectedDocs(expectedNumDocs) {
- const collState = {
- documents: coll.find().toArray(),
- indexKeys: coll.find().hint({"p.q.r": "hashed"}).returnKey().toArray()
- };
- assert.eq(collState.documents.length, expectedNumDocs, collState);
- assert.eq(collState.indexKeys.length, expectedNumDocs, collState);
-}
-
-// Verify that the documents inserted have the corresponding index keys.
-assertCollectionHasExpectedDocs(5);
-
-// Helper function which runs validate() on primary and secondary nodes, then verifies that the
-// command returned the expected result.
-function assertValidateCmdReturned(expectedResult) {
- const resFromPrimary = assert.commandWorked(coll.validate({full: true}));
- assert.eq(resFromPrimary.valid, expectedResult, resFromPrimary);
-
- rst.awaitReplication();
- const testDBOnSecondary = rst.getSecondary().getDB(jsTestName());
- const resFromSecondary = assert.commandWorked(testDBOnSecondary.coll.validate({full: true}));
- assert.eq(resFromSecondary.valid, expectedResult, resFromSecondary);
-}
-
-// Confirm that validate() does not perceive a problem with the malformed documents.
-assertValidateCmdReturned(true);
-
-// Upgrade the set to the new binary version.
-rst.upgradeSet(nodeOptionsOfLatestVersion);
-testDB = rst.getPrimary().getDB(jsTestName());
-coll = testDB.coll;
-
-// Verify that the five documents inserted earlier have their index keys after upgrade.
-assertCollectionHasExpectedDocs(5);
-
-// Verify that after upgrade, inserting bad documents is not allowed.
-const arrayAlongPathFailCode = 16766;
-assert.commandFailedWithCode(coll.insert({p: []}), arrayAlongPathFailCode);
-assert.commandFailedWithCode(coll.insert({p: [{q: 1}]}), arrayAlongPathFailCode);
-assert.commandFailedWithCode(coll.insert({p: {q: {r: [3]}}}), arrayAlongPathFailCode);
-
-// After upgrade, validate() should now fail since there are existing bad documents.
-assertValidateCmdReturned(false);
-
-// Deleting bad documents succeeds.
-assert.commandWorked(coll.deleteOne({_id: 1}));
-assert.commandWorked(coll.deleteMany({a: 1}));
-
-// Updating documents to contain array along field path should fail.
-assert.commandFailedWithCode(coll.update({_id: 2}, {p: {q: [{r: 1}]}}), arrayAlongPathFailCode);
-assert.commandFailedWithCode(
- testDB.runCommand({findAndModify: coll.getName(), query: {_id: 2}, update: {p: {q: [{r: 1}]}}}),
- arrayAlongPathFailCode);
-
-assert.commandFailedWithCode(coll.update({_id: 2}, {p: {q: {r: [3]}}}), arrayAlongPathFailCode);
-assert.commandFailedWithCode(testDB.runCommand({
- update: coll.getName(),
- updates: [
- {q: {_id: 3}, u: {$set: {p: {q: [{r: 1}]}}}},
- {q: {_id: 2}, u: {$set: {p: {q: [{r: 1}]}}}}
- ],
- ordered: false
-}),
- arrayAlongPathFailCode);
-
-// Verify that updating to a valid index field works.
-assert.commandWorked(coll.update({_id: 2}, {p: {q: {r: 4}}}));
-
-// Verify that the index key is updated correctly by quering with hashed index.
-let res = coll.find({"p.q.r": 4}).hint({"p.q.r": "hashed"}).toArray();
-assert.eq(res, [{_id: 2, p: {q: {r: 4}}}]);
-
-// Validate should still fail since a bad document {_id: 3} exists.
-assertValidateCmdReturned(false);
-
-// Delete the last remaining bad document.
-assert.commandWorked(coll.deleteOne({_id: 3}));
-
-// Now that all the bad documents are deleted or updated, verify that validate succeeds.
-assertValidateCmdReturned(true);
-
-// Verify that there is only one index key left (for {_id: 2}).
-res = coll.find().hint({"p.q.r": "hashed"}).returnKey().itcount();
-assert.eq(res, 1);
-
-rst.awaitReplication();
-rst.stopSet();
-}()); \ No newline at end of file
diff --git a/jstests/multiVersion/index_initial_sync_latest_primary.js b/jstests/multiVersion/index_initial_sync_latest_primary.js
deleted file mode 100644
index b7785996cff..00000000000
--- a/jstests/multiVersion/index_initial_sync_latest_primary.js
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * If a 4.2 secondary attempts to initial sync from a primary while there is an index build in
- * progress, the index should be visible on the secondary.
- * @tags: [requires_replication]
- */
-(function() {
-"use strict";
-
-load('jstests/noPassthrough/libs/index_initial_sync.js');
-
-const nodes = [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
- binVersion: 'last-stable',
- },
-];
-new IndexInitialSyncTest({nodes: nodes}).run();
-})();
diff --git a/jstests/multiVersion/index_initial_sync_latest_secondary.js b/jstests/multiVersion/index_initial_sync_latest_secondary.js
deleted file mode 100644
index b8f5116815a..00000000000
--- a/jstests/multiVersion/index_initial_sync_latest_secondary.js
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * If a secondary attempts to initial sync from a 4.2 primary while there is an index build in
- * progress, the index should be visible on the secondary.
- * @tags: [requires_replication]
- */
-(function() {
-"use strict";
-
-load('jstests/noPassthrough/libs/index_initial_sync.js');
-
-const nodes = [
- {
- binVersion: 'last-stable',
- },
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
- },
-];
-new IndexInitialSyncTest({nodes: nodes}).run();
-})();
diff --git a/jstests/multiVersion/initial_sync_drop_against_last_stable.js b/jstests/multiVersion/initial_sync_drop_against_last_stable.js
deleted file mode 100644
index 13ff903cc51..00000000000
--- a/jstests/multiVersion/initial_sync_drop_against_last_stable.js
+++ /dev/null
@@ -1,179 +0,0 @@
-/**
- * Test that CollectionCloner completes without error when a collection is dropped during cloning,
- * specifically when that sync source is in 4.2.
- */
-load("jstests/libs/logv2_helpers.js");
-
-(function() {
-"use strict";
-
-load("jstests/libs/fail_point_util.js");
-load('jstests/replsets/libs/two_phase_drops.js');
-load("jstests/libs/uuid_util.js");
-
-// Set up replica set with two nodes. We will add a third and force it to sync from the secondary.
-const testName = "initial_sync_drop_against_last_stable";
-const dbName = testName;
-const replTest = new ReplSetTest({
- name: testName,
- nodes: [
- {}, /* primary */
- {rsConfig: {priority: 0, votes: 0}, binVersion: "last-stable"}, /* sync source */
- {rsConfig: {priority: 0, votes: 0}} /* initial syncing node */
- ]
-});
-replTest.startSet();
-replTest.initiate();
-
-const collName = "testcoll";
-const primary = replTest.getPrimary();
-const primaryDB = primary.getDB(dbName);
-const primaryColl = primaryDB[collName];
-const nss = primaryColl.getFullName();
-
-// The sync source.
-const syncSource = replTest.getSecondary();
-
-// The initial syncing node. Places in the test that refer to 'the secondary' refer to this node.
-let secondary = replTest.getSecondaries()[1];
-assert.neq(syncSource, secondary, "initial syncing node should be the third in the set");
-let secondaryDB = secondary.getDB(dbName);
-let secondaryColl = secondaryDB[collName];
-
-// This function adds data to the collection, restarts the secondary node with the given
-// parameters and setting the given failpoint, waits for the failpoint to be hit,
-// drops the collection, then disables the failpoint. It then optionally waits for the
-// expectedLog message and waits for the secondary to complete initial sync, then ensures
-// the collection on the secondary is empty.
-function setupTest({failPoint, extraFailPointData, secondaryStartupParams}) {
- jsTestLog("Writing data to collection.");
- assert.commandWorked(primaryColl.insert([{_id: 1}, {_id: 2}]));
- const data = Object.merge(extraFailPointData || {}, {nss: nss});
-
- jsTestLog("Restarting secondary with failPoint " + failPoint + " set for " + nss);
- secondaryStartupParams = secondaryStartupParams || {};
- secondaryStartupParams['failpoint.' + failPoint] = tojson({mode: 'alwaysOn', data: data});
- // Force the initial syncing node to sync against the 4.2 secondary.
- secondaryStartupParams['failpoint.forceSyncSourceCandidate'] =
- tojson({mode: 'alwaysOn', data: {hostAndPort: syncSource.host}});
- // Skip clearing initial sync progress after a successful initial sync attempt so that we
- // can check initialSyncStatus fields after initial sync is complete.
- secondaryStartupParams['failpoint.skipClearInitialSyncState'] = tojson({mode: 'alwaysOn'});
- secondaryStartupParams['numInitialSyncAttempts'] = 1;
- secondary =
- replTest.restart(secondary, {startClean: true, setParameter: secondaryStartupParams});
- secondaryDB = secondary.getDB(dbName);
- secondaryColl = secondaryDB[collName];
-
- jsTestLog("Waiting for secondary to reach failPoint " + failPoint);
- assert.commandWorked(secondary.adminCommand({
- waitForFailPoint: failPoint,
- timesEntered: 1,
- maxTimeMS: kDefaultWaitForFailPointTimeout
- }));
-
- // Restarting the secondary may have resulted in an election. Wait until the system
- // stabilizes and reaches RS_STARTUP2 state.
- replTest.getPrimary();
- replTest.waitForState(secondary, ReplSetTest.State.STARTUP_2);
-}
-
-function finishTest({failPoint, expectedLog, expectedLogId, waitForDrop, createNew}) {
- // Get the uuid for use in checking the log line.
- let uuid = getUUIDFromListCollections(primaryDB, collName);
-
- jsTestLog("Dropping collection on primary: " + primaryColl.getFullName());
- assert(primaryColl.drop());
- replTest.awaitReplication(null, null, [syncSource]);
-
- if (waitForDrop) {
- jsTestLog("Waiting for drop to commit on primary");
- TwoPhaseDropCollectionTest.waitForDropToComplete(primaryDB, collName);
- }
-
- if (createNew) {
- jsTestLog("Creating a new collection with the same name: " + primaryColl.getFullName());
- assert.commandWorked(primaryColl.insert({_id: "not the same collection"}));
- }
-
- jsTestLog("Allowing secondary to continue.");
- assert.commandWorked(secondary.adminCommand({configureFailPoint: failPoint, mode: 'off'}));
-
- if (isJsonLog(primaryColl.getMongo())) {
- if (expectedLogId) {
- let attrValues = {
- "namespace": nss,
- "uuid": function(attr) {
- return UUID(attr.uuid.$uuid).toString() === uuid.toString();
- }
- };
-
- checkLog.containsJson(secondary, expectedLogId, attrValues);
- }
- } else {
- if (expectedLog) {
- expectedLog = eval(expectedLog);
- jsTestLog(expectedLog);
- checkLog.contains(secondary, expectedLog);
- }
- }
-
- jsTestLog("Waiting for initial sync to complete.");
- replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
-
- let res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
- assert.eq(0, res.initialSyncStatus.failedInitialSyncAttempts);
-
- if (createNew) {
- assert.eq([{_id: "not the same collection"}], secondaryColl.find().toArray());
- assert(primaryColl.drop());
- } else {
- assert.eq(0, secondaryColl.find().itcount());
- }
-
- replTest.checkReplicatedDataHashes();
-}
-
-function runDropTest(params) {
- setupTest(params);
- finishTest(params);
-}
-
-jsTestLog("[1] Testing dropping between listIndexes and find.");
-runDropTest({
- failPoint: "hangBeforeClonerStage",
- extraFailPointData: {cloner: "CollectionCloner", stage: "query"}
-});
-
-jsTestLog(
- "[2] Testing dropping between listIndexes and find, with new same-name collection created.");
-runDropTest({
- failPoint: "hangBeforeClonerStage",
- extraFailPointData: {cloner: "CollectionCloner", stage: "query"},
- createNew: true
-});
-
-jsTestLog("[3] Testing committed drop between getMore calls.");
-runDropTest({
- failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
- secondaryStartupParams: {collectionClonerBatchSize: 1},
- waitForDrop: true,
- expectedLogId: 21132,
- expectedLog:
- "`CollectionCloner ns: '${nss}' uuid: ${uuid} stopped because collection was dropped on source.`"
-});
-
-jsTestLog(
- "[4] Testing committed drop with new same-name collection created, between getMore calls.");
-runDropTest({
- failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
- secondaryStartupParams: {collectionClonerBatchSize: 1},
- waitForDrop: true,
- expectedLogId: 21132,
- expectedLog:
- "`CollectionCloner ns: '${nss}' uuid: ${uuid} stopped because collection was dropped on source.`",
- createNew: true
-});
-
-replTest.stopSet();
-})(); \ No newline at end of file
diff --git a/jstests/multiVersion/keystring_index.js b/jstests/multiVersion/keystring_index.js
deleted file mode 100644
index 24d3c694a79..00000000000
--- a/jstests/multiVersion/keystring_index.js
+++ /dev/null
@@ -1,290 +0,0 @@
-/**
- * Regression test that runs validate to test KeyString changes across 4.2 and the current
- * version as specified in SERVER-41908.
- *
- * - First, start mongod in 4.2.
- * - For each index create a new collection in testDb, inserting documents and finally an index is
- * created.
- * - After all indexes and collections are added, shutdown mongod.
- * - Restart the database as the current version.
- * - Run Validate.
- * - Remove all collections.
- * - Recreate all the indexes.
- * - Shuwdown mongod.
- * - Restart mongod in 4.2.
- * - Run Validate.
- *
- *
- * The following index types are tested:
- * - btree
- * - 2d
- * - geoHaystack
- * - 2dsphere
- * - text
- * - *hashed
- * - *wildcard
- * * these indexes are only created as v2 non-unique because they are not available unique or in v1
- *
- * For each index type, a v1 unique, v2 unique, v1 non-unique and v2 non-unique index
- * is considered except for hashed and wildcard, which only consider the v2 non-unique case.
- */
-(function() {
-'use strict';
-load('jstests/hooks/validate_collections.js');
-
-// ----- Config
-// The number of documents created for each collection
-const numDocs = 100;
-
-const indexTypes = [
- {
- // an indicator of what the index is
- indexName: "BTreeIndex",
- // This function is called to create documents, which are then inserted into the
- // collection.
- createDoc: i => ({
- a: i,
- b: {x: i, y: i + 1},
- c: [i, i + 1],
- }),
- // the options given to the .createIndex method
- // i.e. collection.createIndex(creationOptions)
- creationOptions: {a: 1, b: 1, c: -1},
- // This optional parameter specifies extra options to give to createIndex.
- // In the code, collection.createIndexes(creationOptions, createIndexOptions)
- // is called.
- createIndexOptions: {}
- },
- {indexName: "2d", createDoc: i => ({loc: [i, i]}), creationOptions: {loc: "2d"}},
- {
- indexName: "hayStack",
- createDoc: i => ({
- loc: {lng: (i / 2.0) * (i / 2.0), lat: (i / 2.0)},
- a: {x: i, y: i + 1, z: [i, i + 1]},
- }),
- creationOptions: {loc: "geoHaystack", a: 1},
- createIndexOptions: {bucketSize: 1}
- },
- {
- indexName: "2dSphere",
- createDoc: i => {
- if (i == 0)
- return {
- "loc": {
- "type": "Polygon",
- "coordinates": [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]
- },
- b: {x: i, y: i + 1},
- c: [i, i + 1],
- };
- else
- return ({
- loc: {type: "Point", coordinates: [(i / 10.0) * (i / 10.0), (i / 10.0)]},
- b: {x: i, y: i + 1},
- c: [i, i + 1],
- });
- },
- creationOptions: {loc: "2dsphere", b: 1, c: -1}
- },
- {
- indexName: "text",
- createDoc: i => ({
- a: "a".repeat(i + 1),
- b: {x: i, y: i + 1, z: [i, i + 1]},
- }),
- creationOptions: {a: "text", b: 1}
- },
- {
- indexName: "hashed",
- createDoc: i => ({
- a: {x: i, y: i + 1, z: [i, i + 1]},
- }),
- creationOptions: {a: "hashed"}
- },
- {
- indexName: "wildCard",
- createDoc: i => {
- if (i == 0)
- return {};
- else if (i == 1)
- return {a: null};
- else if (i == 2)
- return {a: {}};
- else
- return {
- a: i,
- b: {x: i, y: i + 1},
- c: [i, i + 1],
- };
- },
- creationOptions: {"$**": 1}
- }
-];
-// -----
-
-const dbpath = MongoRunner.dataPath + 'keystring_index';
-resetDbpath(dbpath);
-
-const defaultOptions = {
- dbpath: dbpath,
- noCleanData: true
-};
-
-const testCollection = 'testColl';
-
-let mongodOptions42 = Object.extend({binVersion: '4.2'}, defaultOptions);
-let mongodOptionsCurrent = Object.extend({binVersion: 'latest'}, defaultOptions);
-
-// We will first start up an old binary version database, populate the database,
-// then upgrade and validate.
-
-// Start up an old binary version mongod.
-jsTestLog("Starting version: 4.2");
-let conn = MongoRunner.runMongod(mongodOptions42);
-
-assert.neq(null, conn, 'mongod was unable able to start with version ' + tojson(mongodOptions42));
-
-let testDb = conn.getDB('test');
-assert.neq(null, testDb, 'testDb not found. conn.getDB(\'test\') returned null');
-
-populateDb(testDb);
-MongoRunner.stopMongod(conn);
-
-jsTestLog("Starting version: latest");
-
-// Restart the mongod with the latest binary version on the old version's data files.
-conn = MongoRunner.runMongod(mongodOptionsCurrent);
-assert.neq(null, conn, 'mongod was unable to start with the latest version');
-testDb = conn.getDB('test');
-assert.neq(null, testDb, 'testDb not found');
-
-jsTestLog("Validating: 4.2 indexes with latest");
-
-// Validate all the indexes.
-assert.commandWorked(validateCollections(testDb, {full: true}));
-
-// Next, we will repopulate the database with the latest version then downgrade and run
-// validate.
-dropAllUserCollections(testDb);
-populateDb(testDb);
-MongoRunner.stopMongod(conn);
-
-conn = MongoRunner.runMongod(mongodOptions42);
-assert.neq(null, conn, 'mongod was unable able to start with version ' + tojson(mongodOptions42));
-
-testDb = conn.getDB('test');
-assert.neq(null, testDb, 'testDb not found. conn.getDB(\'test\') returned null');
-
-jsTestLog("Validating: latest indexes with 4.2");
-
-assert.commandWorked(validateCollections(testDb, {full: true}));
-MongoRunner.stopMongod(conn);
-
-// ----------------- Utilities
-
-// Populate the database using the config specified by the indexTypes array.
-function populateDb(testDb) {
- // Create a new collection and index for each indexType in the array.
- for (let i = 0; i < indexTypes.length; i++) {
- const indexOptions = indexTypes[i];
- // Try unique and non-unique.
- for (const unique in [true, false]) {
- // Try index-version 1 and 2.
- for (let indexVersion = 1; indexVersion <= 2; indexVersion++) {
- let indexName = indexOptions.indexName;
-
- // We only run V2 non-unique for hashed and wildCard because they don't exist in
- // v1.
- if ((indexName == "hashed" || indexName == "wildCard") &&
- (unique == true || indexVersion == 1))
- continue;
-
- indexName += unique == true ? "Unique" : "NotUnique";
- indexName += `Version${indexVersion}`;
- let collectionName = testCollection + '_' + indexName;
- print(`${indexName}: Creating Collection`);
- assert.commandWorked(testDb.createCollection(collectionName));
-
- print(`${indexName}: Inserting Documents`);
- if (unique)
- insertDocumentsUnique(testDb[collectionName], numDocs, indexOptions.createDoc);
- else
- insertDocumentsNotUnique(
- testDb[collectionName], numDocs, indexOptions.createDoc);
-
- let extraCreateIndexOptions = {
- name: indexName,
- v: indexVersion,
- unique: unique == true
- };
-
- if ("createIndexOptions" in indexOptions)
- extraCreateIndexOptions =
- Object.extend(extraCreateIndexOptions, indexOptions.createIndexOptions);
- print(JSON.stringify(extraCreateIndexOptions));
- print(`${indexName}: Creating Index`);
- assert.commandWorked(testDb[collectionName].createIndex(
- indexOptions.creationOptions, extraCreateIndexOptions));
-
- // Assert that the correct index type was created.
- let indexSpec = getIndexSpecByName(testDb[collectionName], indexName);
- assert.eq(indexVersion, indexSpec.v, tojson(indexSpec));
- }
- }
- }
-}
-
-// Drop all user created collections in a database.
-function dropAllUserCollections(testDb) {
- testDb.getCollectionNames().forEach((collName) => {
- if (!collName.startsWith("system.")) {
- testDb[collName].drop();
- }
- });
-}
-
-function getIndexSpecByName(coll, indexName) {
- const indexes = coll.getIndexes();
- const indexesFilteredByName = indexes.filter(spec => spec.name === indexName);
- assert.eq(
- 1, indexesFilteredByName.length, "index '" + indexName + "' not found: " + tojson(indexes));
- return indexesFilteredByName[0];
-}
-
-function fibonacci(num, memo) {
- memo = memo || {};
-
- if (memo[num])
- return memo[num];
- if (num <= 1)
- return 1;
-
- memo[num] = fibonacci(num - 1, memo) + fibonacci(num - 2, memo);
- return memo[num];
-}
-
-// Insert numDocs documents into the collection by calling getDoc.
-// NOTE: Documents returned from getDoc are inserted more than once.
-function insertDocumentsNotUnique(collection, numDocs, getDoc) {
- let fibNum = 0;
- // fibonacci numbers are used because the fibonnaci sequence is a
- // exponentially growing sequence that allows us to create documents
- // that are duplicated X number of times, for many small values of X and
- // a few large values of X.
- for (let i = 0; i < numDocs; i += fibonacci(fibNum++)) {
- let doc = getDoc(i);
- for (let j = 0; j < fibonacci(fibNum); j++) {
- assert.commandWorked(collection.insert(doc));
- }
- }
-}
-
-// Inserts numDocs into the collection by calling getDoc.
-// NOTE: getDoc is called exactly numDocs times.
-function insertDocumentsUnique(collection, numDocs, getDoc) {
- for (let i = 0; i < numDocs; i++) {
- let doc = getDoc(i);
- assert.commandWorked(collection.insert(doc));
- }
-}
-})();
diff --git a/jstests/multiVersion/long_collection_names.js b/jstests/multiVersion/long_collection_names.js
deleted file mode 100644
index 81ddf587218..00000000000
--- a/jstests/multiVersion/long_collection_names.js
+++ /dev/null
@@ -1,123 +0,0 @@
-/**
- * Tests the FCV for long collection names.
- *
- * On 4.2 and below, the maximum fully qualified collection name is 120 characters or less
- * (which includes the database name).
- *
- * In this multi version test, we ensure that we can create long collection names on the 4.4 binary
- * while the FCV document is set to 4.4. Restarting with long collection names present on a 4.2
- * binary should not crash the server. Users would need to manually remove or rename the long
- * collection names prior to downgrading. Additionally, we should be prevented from creating long
- * collection names when using FCV 4.2 on a 4.4 binary.
- */
-(function() {
-'use strict';
-
-const dbName = 'test';
-const renameDbName = 'rename_test';
-const shortCollName = 'short_collection';
-const longCollName = 'long_collection' +
- 'a'.repeat(200);
-const longCollNameRename = 'long_collection' +
- 'b'.repeat(200);
-
-const dbpath = MongoRunner.dataPath + 'long_collection_names';
-resetDbpath(dbpath);
-
-const mongodOptions42 =
- Object.extend({binVersion: 'last-stable'}, {dbpath: dbpath, cleanData: false});
-const mongodOptions44 = Object.extend({binVersion: 'latest'}, {dbpath: dbpath, cleanData: false});
-
-/**
- * Start up with the latest binary and ensure that long collection names can be created while
- * using FCV 4.4.
- */
-let conn = MongoRunner.runMongod(mongodOptions44);
-assert.neq(null, conn, 'mongod was unable to start with version ' + tojson(mongodOptions44));
-
-let testDb = conn.getDB(dbName);
-
-assert.commandWorked(testDb.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-
-// Create two collections, one with a short name and the other with a long name.
-assert.commandWorked(testDb.createCollection(shortCollName));
-assert.commandWorked(testDb.createCollection(longCollName));
-
-// Rename a short collection name to a long collection name within the same database.
-assert.commandWorked(testDb.adminCommand(
- {renameCollection: dbName + '.' + shortCollName, to: dbName + '.' + longCollNameRename}));
-
-assert.eq(true, testDb.getCollection(longCollNameRename).drop());
-assert.commandWorked(testDb.createCollection(shortCollName));
-
-// Rename a short collection name to a long collection name in a different database.
-assert.commandWorked(testDb.adminCommand(
- {renameCollection: dbName + '.' + shortCollName, to: renameDbName + '.' + longCollNameRename}));
-
-assert.eq(true, testDb.getSiblingDB(renameDbName).getCollection(longCollNameRename).drop());
-assert.commandWorked(testDb.createCollection(shortCollName));
-
-MongoRunner.stopMongod(conn);
-
-/**
- * Restarting with a 4.2 binary with FCV 4.4 shouldn't startup nor crash.
- */
-let restartOpts42 = Object.extend(mongodOptions42, {restart: true});
-conn = MongoRunner.runMongod(restartOpts42);
-assert.eq(null, conn, 'mongod was able to start with version ' + tojson(restartOpts42));
-
-/**
- * Restart with the 4.4 binary to set the FCV to 4.2.
- */
-let restartOpts44 = Object.extend(mongodOptions44, {restart: true});
-conn = MongoRunner.runMongod(restartOpts44);
-assert.neq(null, conn, 'mongod was unable to start with version ' + tojson(restartOpts44));
-
-testDb = conn.getDB(dbName);
-assert.commandWorked(testDb.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-MongoRunner.stopMongod(conn);
-
-/**
- * Restart with the 4.2 binary while in FCV 4.2 with long collection names present. This shouldn't
- * crash the server.
- */
-conn = MongoRunner.runMongod(restartOpts42);
-assert.neq(null, conn, 'mongod was unable to start with version ' + tojson(restartOpts42));
-
-testDb = conn.getDB(dbName);
-
-// Ensure we have the proper collections.
-let collNames = testDb.getCollectionNames();
-
-assert.eq(true, collNames.includes(shortCollName));
-assert.eq(true, collNames.includes(longCollName));
-
-MongoRunner.stopMongod(conn);
-
-/**
- * Restart with the 4.4 binary while in FCV 4.2. We shouldn't be able to create any collections with
- * long names.
- */
-conn = MongoRunner.runMongod(restartOpts44);
-assert.neq(null, conn, 'mongod was unable to start with version ' + tojson(restartOpts44));
-
-testDb = conn.getDB(dbName);
-
-// Creating a long collection name on a 4.4 binary with FCV 4.2 should fail.
-assert.commandFailedWithCode(testDb.createCollection('c'.repeat(8192)),
- ErrorCodes.IncompatibleServerVersion);
-
-// Running rename within the same database or across two databases should fail for long collection
-// names.
-assert.commandFailedWithCode(
- testDb.adminCommand(
- {renameCollection: dbName + '.' + shortCollName, to: dbName + '.' + longCollNameRename}),
- ErrorCodes.IllegalOperation);
-assert.commandFailedWithCode(testDb.adminCommand({
- renameCollection: dbName + '.' + shortCollName,
- to: renameDbName + '.' + longCollNameRename
-}),
- ErrorCodes.IllegalOperation);
-
-MongoRunner.stopMongod(conn);
-})();
diff --git a/jstests/multiVersion/migrations_with_mixed_fcv.js b/jstests/multiVersion/migrations_with_mixed_fcv.js
index 5285053b711..236efff8ecd 100644
--- a/jstests/multiVersion/migrations_with_mixed_fcv.js
+++ b/jstests/multiVersion/migrations_with_mixed_fcv.js
@@ -1,5 +1,6 @@
/*
* Tests that migrations work correctly across shards with mixed FCV state.
+ * @tags: [fix_for_fcv_46]
*/
(function() {
diff --git a/jstests/multiVersion/mixed_replica_set_with_latest_primary.js b/jstests/multiVersion/mixed_replica_set_with_latest_primary.js
index 5e35b9afc6d..10802ae523a 100644
--- a/jstests/multiVersion/mixed_replica_set_with_latest_primary.js
+++ b/jstests/multiVersion/mixed_replica_set_with_latest_primary.js
@@ -1,6 +1,7 @@
/**
* Tests initializing a mixed version replica set through the shell.
*
+ * @tags: [fix_for_fcv_46]
*/
(function() {
diff --git a/jstests/multiVersion/mr_multiversion_check_uuid.js b/jstests/multiVersion/mr_multiversion_check_uuid.js
deleted file mode 100644
index 38a82b0a0d0..00000000000
--- a/jstests/multiVersion/mr_multiversion_check_uuid.js
+++ /dev/null
@@ -1,197 +0,0 @@
-// Test that the UUID of the target collection of a mapReduce remains consistent between the config
-// server and the shards. This is in the multiversion suite since SERVER-44527 is relevant for the
-// pre-4.4 version of the mongod implementation of mapReduce, which still runs when the FCV is 4.2.
-
-// Checking UUID consistency uses cached connections, which are not valid across restarts or
-// stepdowns.
-TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
-
-(function() {
-"use strict";
-load("jstests/libs/uuid_util.js");
-
-function assertCollectionNotOnShard(db, coll) {
- const listCollsRes = db.runCommand({listCollections: 1, filter: {name: coll}});
- assert.commandWorked(listCollsRes);
- assert.neq(undefined, listCollsRes.cursor);
- assert.neq(undefined, listCollsRes.cursor.firstBatch);
- assert.eq(0, listCollsRes.cursor.firstBatch.length);
-}
-
-// Setup a sharded cluster with the last-stable mongos and the latest binVersion shards. This is
-// meant to test the legacy code path in a multiversion cluster.
-const st = new ShardingTest({
- shards: 2,
- rs: {nodes: 2, binVersion: "latest"},
- other: {mongosOptions: {binVersion: "last-stable"}, chunkSize: 1}
-});
-
-const testDB = st.s0.getDB(jsTestName());
-const inputColl = testDB.srcSharded;
-
-st.adminCommand({enableSharding: testDB.getName()});
-st.ensurePrimaryShard(testDB.getName(), st.shard1.shardName);
-st.adminCommand({shardCollection: inputColl.getFullName(), key: {_id: 1}});
-
-const nDistinctKeys = 512;
-const nValuesPerKey = 100;
-const nTotalDocs = nDistinctKeys * nValuesPerKey;
-
-const bulk = inputColl.initializeUnorderedBulkOp();
-for (let key = 0; key < nDistinctKeys; key++) {
- for (let value = 0; value < nValuesPerKey; value++) {
- bulk.insert({key: key, value: value});
- }
-}
-assert.commandWorked(bulk.execute());
-
-const outputColl = testDB[inputColl.getName() + "_out"];
-
-function verifyOutput(mrOutput, expectedNOutputDocs) {
- assert.commandWorked(mrOutput);
- assert.eq(expectedNOutputDocs, outputColl.find().itcount());
-}
-
-function mapFn() {
- emit(this.key, 1);
-}
-function reduceFn(key, values) {
- return Array.sum(values);
-}
-
-(function testShardedOutput() {
- // Check that merge to an existing empty sharded collection works and preserves the UUID after
- // M/R.
- st.adminCommand({shardCollection: outputColl.getFullName(), key: {_id: 1}});
- let origUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
- let out = testDB.srcSharded.mapReduce(
- mapFn, reduceFn, {out: {merge: outputColl.getName(), sharded: true}});
- verifyOutput(out, nDistinctKeys);
- let newUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
- assert.eq(origUUID, newUUID);
-
- // Shard1 is the primary shard and only one chunk should have been written, so the chunk with
- // the new UUID should have been written to it.
- assert.eq(newUUID,
- getUUIDFromListCollections(st.shard1.getDB(testDB.getName()), outputColl.getName()));
-
- // Shard0 should not have any chunks from the output collection because all shards should have
- // returned an empty split point list in the first phase of the mapReduce, since the reduced
- // data size is far less than the chunk size setting of 1MB.
- assertCollectionNotOnShard(st.shard0.getDB(testDB.getName()), outputColl.getName());
-
- // Shard and split the output collection, moving the chunk with {_id: 2000} to shard0. All data
- // from the result of the mapReduce will be directed to shard1.
- st.adminCommand({split: outputColl.getFullName(), middle: {"_id": 2000}});
- st.adminCommand(
- {moveChunk: outputColl.getFullName(), find: {"_id": 2000}, to: st.shard0.shardName});
- origUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
-
- // Check that merge to an existing sharded collection that has data only on the primary shard
- // works and that the collection uses the same UUID after M/R.
- assert.commandWorked(outputColl.insert({_id: 1000}));
- out = testDB.srcSharded.mapReduce(
- mapFn, reduceFn, {out: {merge: outputColl.getName(), sharded: true}});
- verifyOutput(out, nDistinctKeys + 1);
-
- newUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
- assert.eq(origUUID, newUUID);
- assert.eq(newUUID,
- getUUIDFromListCollections(st.shard0.getDB(testDB.getName()), outputColl.getName()));
- assert.eq(newUUID,
- getUUIDFromListCollections(st.shard1.getDB(testDB.getName()), outputColl.getName()));
-
- // Check that merge to an existing sharded collection that has data only on the non-primary
- // shard works and that the collection uses the same UUID after M/R.
- assert.commandWorked(outputColl.remove({}));
- assert.commandWorked(outputColl.insert({_id: 2001}));
-
- out = testDB.srcSharded.mapReduce(
- mapFn, reduceFn, {out: {merge: outputColl.getName(), sharded: true}});
- verifyOutput(out, nDistinctKeys + 1);
-
- newUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
- assert.eq(origUUID, newUUID);
- assert.eq(newUUID,
- getUUIDFromListCollections(st.shard0.getDB(testDB.getName()), outputColl.getName()));
- assert.eq(newUUID,
- getUUIDFromListCollections(st.shard1.getDB(testDB.getName()), outputColl.getName()));
-
- // Check that merge to an existing sharded collection that has data on all shards works and that
- // the collection uses the same UUID after M/R.
- assert.commandWorked(outputColl.remove({}));
- assert.commandWorked(outputColl.insert([{_id: 1000}, {_id: 2001}]));
-
- origUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
- out = testDB.srcSharded.mapReduce(
- mapFn, reduceFn, {out: {merge: outputColl.getName(), sharded: true}});
- verifyOutput(out, nDistinctKeys + 2);
-
- newUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
- assert.eq(origUUID, newUUID);
- assert.eq(newUUID,
- getUUIDFromListCollections(st.shard0.getDB(testDB.getName()), outputColl.getName()));
- assert.eq(newUUID,
- getUUIDFromListCollections(st.shard1.getDB(testDB.getName()), outputColl.getName()));
-
- // Similarly, check that reduce to an existing sharding collection that has data only on the
- // primary shard works and that the collection uses the same UUID after M/R.
- assert.commandWorked(outputColl.remove({}));
- assert.commandWorked(outputColl.insert({_id: 1000}));
- out = testDB.srcSharded.mapReduce(
- mapFn, reduceFn, {out: {reduce: outputColl.getName(), sharded: true}});
- verifyOutput(out, nDistinctKeys + 1);
-
- newUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
- assert.eq(origUUID, newUUID);
- assert.eq(newUUID,
- getUUIDFromListCollections(st.shard0.getDB(testDB.getName()), outputColl.getName()));
- assert.eq(newUUID,
- getUUIDFromListCollections(st.shard1.getDB(testDB.getName()), outputColl.getName()));
-
- // Check that replace to an existing sharded collection has data on all shards works and that
- // the collection creates a new UUID after M/R.
- assert.commandWorked(outputColl.insert({_id: 2001}));
- origUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
- out = testDB.srcSharded.mapReduce(
- mapFn, reduceFn, {out: {replace: outputColl.getName(), sharded: true}});
- verifyOutput(out, nDistinctKeys);
-
- newUUID = getUUIDFromConfigCollections(st.s, outputColl.getFullName());
- assert.neq(origUUID, newUUID);
-
- // Shard1 is the primary shard and only one chunk should have been written, so the chunk with
- // the new UUID should have been written to it.
- assert.eq(newUUID,
- getUUIDFromListCollections(st.shard1.getDB(testDB.getName()), outputColl.getName()));
-
- // Shard0 should not have any chunks from the output collection because all shards should have
- // returned an empty split point list in the first phase of the mapReduce, since the reduced
- // data size is far less than the chunk size setting of 1MB.
- assertCollectionNotOnShard(st.shard0.getDB(testDB.getName()), outputColl.getName());
-}());
-
-(function testUnshardedOutputColl() {
- // Check that replace with {sharded: true} to an existing unsharded collection works and creates
- // a sharded collection with a new UUID.
- const replaceOutput = testDB.replaceUnsharded;
- assert.commandWorked(testDB.runCommand({create: replaceOutput.getName()}));
- let origUUID =
- getUUIDFromListCollections(st.s.getDB(testDB.getName()), replaceOutput.getName());
-
- assert.commandWorked(testDB.srcSharded.mapReduce(
- mapFn, reduceFn, {out: {replace: replaceOutput.getName(), sharded: true}}));
- assert.eq(nDistinctKeys, replaceOutput.find().itcount());
-
- let newUUID = getUUIDFromConfigCollections(st.s, replaceOutput.getFullName());
- assert.neq(origUUID, newUUID);
- assert.eq(
- newUUID,
- getUUIDFromListCollections(st.shard1.getDB(testDB.getName()), replaceOutput.getName()));
-
- // Shard0 (non-primary) should not be aware of the unsharded collection.
- assertCollectionNotOnShard(st.shard0.getDB(testDB.getName()), replaceOutput.getName());
-}());
-
-st.stop();
-})();
diff --git a/jstests/multiVersion/new_mongos_old_mongod_wire_version_clash.js b/jstests/multiVersion/new_mongos_old_mongod_wire_version_clash.js
deleted file mode 100644
index 9cabb890862..00000000000
--- a/jstests/multiVersion/new_mongos_old_mongod_wire_version_clash.js
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Verify that a current mongos, when connected to an old mongod (one that
- * implements a different wire-protocol version) reports the resulting failures
- * properly.
- *
- * Note that the precise errors and failure modes caught here are not documented,
- * and are not depended upon by deployed systems. If improved error handling
- * results in this test failing, this test may be updated to reflect the actual
- * error reported.
- */
-
-// Checking UUID consistency involves talking to a shard node, which in this test is shutdown
-TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
-
-(function() {
-
-'use strict';
-
-/* Start a ShardingTest with a 'last-stable' mongos so that a 'last-stable'
- * shard can be added. (A 'last-stable' shard cannot be added from a
- * current mongos because the wire protocol must be presumed different.)
- */
-var st = new ShardingTest({
- shards: 1,
- other: {mongosOptions: {binVersion: 'last-stable'}, shardOptions: {binVersion: 'last-stable'}}
-});
-
-assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
-assert.commandWorked(st.s.adminCommand({shardCollection: 'test.foo', key: {x: 1}}));
-
-// Start a current-version mongos.
-var newMongos = MongoRunner.runMongos({configdb: st._configDB});
-
-// Write commands report failure by returning writeError:
-
-// TODO: SERVER-43835 ReplicaSetMonitor converts all failed host errors to
-// FailedToSatisfyReadPreference. ReplicaSetMonitor just keeps retrying after
-// IncompatibleServerVersion errors and eventually fails with FailedToSatisfyReadPreference.
-assert.writeErrorWithCode(newMongos.getDB('test').foo.insert({x: 1}),
- ErrorCodes.FailedToSatisfyReadPreference);
-
-assert.writeErrorWithCode(newMongos.getDB('test').foo.update({x: 1}, {x: 1, y: 2}),
- ErrorCodes.FailedToSatisfyReadPreference);
-
-assert.writeErrorWithCode(newMongos.getDB('test').foo.remove({x: 1}),
- ErrorCodes.FailedToSatisfyReadPreference);
-
-// Query commands, on failure, throw instead:
-
-let res;
-// TODO: SERVER-43835 ReplicaSetMonitor converts all failed host errors to
-// FailedToSatisfyReadPreference.
-res = newMongos.getDB('test').runCommand({find: 'foo'});
-assert.eq(res.code, ErrorCodes.FailedToSatisfyReadPreference);
-
-res = newMongos.getDB('test').runCommand({find: 'foo', filter: {x: 1}});
-assert.eq(res.code, ErrorCodes.FailedToSatisfyReadPreference);
-
-res = newMongos.getDB('test').runCommand({aggregate: 'foo', pipeline: [], cursor: {}});
-assert.eq(res.code, ErrorCodes.FailedToSatisfyReadPreference);
-
-MongoRunner.stopMongos(newMongos);
-st.stop();
-})();
diff --git a/jstests/multiVersion/pending_chunk.js b/jstests/multiVersion/pending_chunk.js
index 5d8b938e535..f581c4f6a53 100644
--- a/jstests/multiVersion/pending_chunk.js
+++ b/jstests/multiVersion/pending_chunk.js
@@ -1,5 +1,6 @@
//
// Tests pending chunk metadata.
+// @tags: [fix_for_fcv_46]
//
(function() {
diff --git a/jstests/multiVersion/remove_ns_field_in_index_spec.js b/jstests/multiVersion/remove_ns_field_in_index_spec.js
deleted file mode 100644
index 2fcbe7d909a..00000000000
--- a/jstests/multiVersion/remove_ns_field_in_index_spec.js
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Tests that the 'ns' field in index specs is removed during metadata changes when in FCV 4.4.
- * Also tests that collection renames in binary version 4.4 FCV 4.2 correctly modify the 'ns' field.
- *
- * Starting in 4.4, the 'ns' field for index specs is no longer generated. We want to ensure that
- * index specs created in prior server versions have their 'ns' field removed when running in FCV
- * 4.4.
- */
-(function() {
-'use strict';
-
-let dbName = 'test';
-let collName = 'coll';
-
-const dbpath = MongoRunner.dataPath + 'remove_ns_field_in_index_spec';
-resetDbpath(dbpath);
-
-const mongodOptions42 =
- Object.extend({binVersion: 'last-stable'}, {dbpath: dbpath, cleanData: false});
-const mongodOptions44 = Object.extend({binVersion: 'latest'}, {dbpath: dbpath, cleanData: false});
-
-/**
- * Start up with the 4.2 binary and create a collection. The default '_id' index should have the
- * 'ns' field present in its index spec.
- */
-let conn = MongoRunner.runMongod(mongodOptions42);
-assert.neq(null, conn, 'mongod was unable to start with version ' + tojson(mongodOptions42));
-
-let testDb = conn.getDB(dbName);
-assert.commandWorked(testDb.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-assert.commandWorked(testDb.createCollection(collName));
-
-let coll = testDb.getCollection(collName);
-
-// Create an index that will stick around across restarts.
-assert.commandWorked(coll.createIndex({a: 1}));
-
-let indexes = coll.getIndexes();
-
-assert.eq(2, indexes.length);
-assert.eq(dbName + '.' + collName, indexes[0].ns, indexes[0]);
-assert.eq(dbName + '.' + collName, indexes[1].ns, indexes[1]);
-
-MongoRunner.stopMongod(conn);
-
-/**
- * Restart with the 4.4 binary while in FCV 4.2. The index should not lose its 'ns' field when doing
- * a disk modifying metadata change.
- */
-let restartOpts44 = Object.extend(mongodOptions44, {restart: true});
-conn = MongoRunner.runMongod(restartOpts44);
-assert.neq(null, conn, 'mongod was unable to start with version ' + tojson(restartOpts44));
-
-testDb = conn.getDB(dbName);
-coll = testDb.getCollection(collName);
-
-// Run a metadata changing operation.
-assert.commandWorked(coll.createIndex({x: 1}));
-assert.commandWorked(coll.dropIndex({x: 1}));
-
-indexes = coll.getIndexes();
-assert.eq(2, indexes.length);
-assert.eq(dbName + '.' + collName, indexes[0].ns, indexes[0]);
-assert.eq(dbName + '.' + collName, indexes[1].ns, indexes[1]);
-
-// Test that renaming a collection within the same database in FCV 4.2 correctly modifies the 'ns'
-// fields for its index specs.
-const newCollName = "newColl";
-assert.commandWorked(testDb.adminCommand({
- renameCollection: coll.getFullName(),
- to: dbName + "." + newCollName,
-}));
-
-collName = newCollName;
-testDb = conn.getDB(dbName);
-coll = testDb.getCollection(collName);
-
-indexes = coll.getIndexes();
-assert.eq(2, indexes.length);
-assert.eq(dbName + '.' + collName, indexes[0].ns, indexes[0]);
-assert.eq(dbName + '.' + collName, indexes[1].ns, indexes[1]);
-
-// Test that renaming a collection to a different databases in FCV 4.2 omits the 'ns' field
-// entirely. This is not intentionally different than renaming within the same db, just a
-// consequence of the implementation detail that we create new collections. 4.2 binaries can
-// handle a missing 'ns' field.
-const newDBName = "newDB";
-assert.commandWorked(testDb.adminCommand({
- renameCollection: coll.getFullName(),
- to: newDBName + "." + collName,
-}));
-
-dbName = newDBName;
-testDb = conn.getDB(dbName);
-coll = testDb.getCollection(collName);
-
-indexes = coll.getIndexes();
-assert.eq(2, indexes.length);
-assert.eq(undefined, indexes[0].ns, indexes[0]);
-assert.eq(undefined, indexes[1].ns, indexes[1]);
-
-/**
- * Set the FCV to 4.4. The index should lose its 'ns' field when doing a disk modifying metadata
- * change.
- */
-assert.commandWorked(testDb.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-
-// Run a metadata changing operation.
-assert.commandWorked(coll.createIndex({x: 1}));
-assert.commandWorked(coll.dropIndex({x: 1}));
-
-indexes = coll.getIndexes();
-
-assert.eq(2, indexes.length);
-assert.eq(false, indexes[0].hasOwnProperty('ns'));
-assert.eq(false, indexes[1].hasOwnProperty('ns'));
-
-MongoRunner.stopMongod(conn);
-})();
diff --git a/jstests/multiVersion/sortkey_meta.js b/jstests/multiVersion/sortkey_meta.js
deleted file mode 100644
index b85bc8ecdd6..00000000000
--- a/jstests/multiVersion/sortkey_meta.js
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Test sortKey $meta projection behaviour with different feature compatibility versions.
- * - It should work in find projection with in all mongod and feature compatibility versions.
- * - In aggregate it should only work with mongod 4.4 in all mongod and feature compatibility
- * versions.
- *
- * We restart mongod during the test and expect it to have the same data after restarting.
- * @tags: [requires_persistence]
- */
-
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
-
-const testName = jsTest.name();
-const dbpath = MongoRunner.dataPath + testName;
-
-let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest"});
-assert.neq(null, conn, "mongod was unable to start up");
-let testDB = conn.getDB(testName);
-let coll = testDB.coll;
-coll.drop();
-
-// Explicitly set feature compatibility version to the latest version.
-assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-
-// Test that we can read sortKey $meta both in find and aggregate.
-assert.doesNotThrow(() => coll.find({}, {x: {$meta: "sortKey"}}).sort({a: 1}));
-assert.doesNotThrow(() => coll.aggregate([{$sort: {a: 1}}, {$project: {x: {$meta: "sortKey"}}}]));
-
-// Set the feature compatibility version to the last-stable version.
-assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-
-// Test that we can read sortKey $meta both in find and aggregate.
-assert.doesNotThrow(() => coll.find({}, {x: {$meta: "sortKey"}}).sort({a: 1}));
-assert.doesNotThrow(() => coll.aggregate([{$sort: {a: 1}}, {$project: {x: {$meta: "sortKey"}}}]));
-
-MongoRunner.stopMongod(conn);
-
-// Starting up the last-stable version of mongod.
-conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "last-stable", noCleanData: true});
-assert.neq(null,
- conn,
- `version ${MongoRunner.getBinVersionFor("last-stable")} of mongod was` +
- " unable to start up");
-testDB = conn.getDB(testName);
-coll = testDB.coll;
-
-// Test that we still can read sortKey $meta in find.
-assert.doesNotThrow(() => coll.find({}, {x: {$meta: "sortKey"}}).sort({a: 1}));
-
-// In 4.2 sortKey $meta is not supported in aggregate.
-assertErrorCode(coll, [{$sort: {a: 1}}, {$project: {x: {$meta: "sortKey"}}}], 17308);
-
-MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/multiVersion/unionWith_fcv.js b/jstests/multiVersion/unionWith_fcv.js
deleted file mode 100644
index 35b9a3c15f2..00000000000
--- a/jstests/multiVersion/unionWith_fcv.js
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Test the behavior of the $unionWith aggregation stage against a standalone/sharded cluster during
- * upgrade.
- *
- * Checking UUID consistency uses cached connections, which are not valid across restarts or
- * stepdowns.
- */
-TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
-
-(function() {
-"use strict";
-
-load("jstests/multiVersion/libs/multi_cluster.js"); // For upgradeCluster.
-
-let conn = MongoRunner.runMongod({binVersion: "latest"});
-assert.neq(null, conn, "mongod was unable to start up");
-let testDB = conn.getDB(jsTestName());
-
-// Set the feature compatibility version to the last-stable version.
-assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-
-// Seed the two involved collections.
-assert.commandWorked(testDB.collA.insert({fromA: 1}));
-assert.commandWorked(testDB.collB.insert({fromB: 1}));
-
-// Verify that we can still use $unionWith since the binary version is 4.4.
-const pipeline = [{$unionWith: "collB"}, {$project: {_id: 0}}];
-assert.sameMembers([{fromA: 1}, {fromB: 1}], testDB.collA.aggregate(pipeline).toArray());
-
-// Set the feature compatibility version to the latest version.
-assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-
-// Verify that we can still use $unionWith.
-assert.sameMembers([{fromA: 1}, {fromB: 1}], testDB.collA.aggregate(pipeline).toArray());
-
-MongoRunner.stopMongod(conn);
-
-// Start a sharded cluster in which all mongod and mongos processes are "last-stable" binVersion.
-let st = new ShardingTest({
- shards: 2,
- rs: {nodes: 2, binVersion: "last-stable"},
- other: {mongosOptions: {binVersion: "last-stable"}}
-});
-
-testDB = st.s.getDB(jsTestName());
-assert.commandWorked(testDB.runCommand({create: "collA"}));
-st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
-
-// Seed the two involved collections.
-assert.commandWorked(testDB.collA.insert({fromA: 1}));
-assert.commandWorked(testDB.collB.insert({fromB: 1}));
-
-// Aggregations with $unionWith should fail against older binary versions.
-assert.commandFailedWithCode(
- testDB.runCommand({aggregate: "collA", pipeline: pipeline, cursor: {}}), 40324);
-
-// Upgrade the config servers and the shards to the "latest" binVersion.
-st.upgradeCluster("latest", {upgradeShards: true, upgradeConfigs: true, upgradeMongos: false});
-
-// Since mongos is still on 4.2, $unionWith should fail to parse.
-assert.commandFailedWithCode(
- testDB.runCommand({aggregate: "collA", pipeline: pipeline, cursor: {}}), 40324);
-
-// Upgrade mongos to the "latest" binVersion but keep the old FCV.
-st.upgradeCluster("latest", {upgradeShards: false, upgradeConfigs: false, upgradeMongos: true});
-testDB = st.s.getDB(jsTestName());
-
-// Now an aggregation containing $unionWith should pass because all nodes are on binary version 4.4.
-assert.sameMembers([{fromA: 1}, {fromB: 1}], testDB.collA.aggregate(pipeline).toArray());
-
-// For completeness, set the FCV to the latest.
-assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-
-// Verify that $unionWith is allowed in a fully upgraded cluster.
-assert.sameMembers([{fromA: 1}, {fromB: 1}], testDB.collA.aggregate(pipeline).toArray());
-
-st.stop();
-}());