summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorCheahuychou Mao <cheahuychou.mao@mongodb.com>2020-03-23 16:08:10 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-03-24 19:10:40 +0000
commit52aa42a9f55b4d852d16cd1a53a1fdafda754129 (patch)
tree2ffc2bc70f8a6f42a26a4cad40511b3285ccb4a4 /jstests
parent2e9edb0ffaeb66ef09969814180bb748e8d5082d (diff)
downloadmongo-52aa42a9f55b4d852d16cd1a53a1fdafda754129.tar.gz
SERVER-45017 Remove index commands upgrade/downgrade code when 4.4 becomes last stable
Diffstat (limited to 'jstests')
-rw-r--r--jstests/libs/override_methods/check_indexes_consistent_across_cluster.js20
-rw-r--r--jstests/sharding/create_idx_empty_primary.js7
-rw-r--r--jstests/sharding/index_and_collection_option_propagation.js59
-rw-r--r--jstests/sharding/index_commands_during_initial_split.js1
-rw-r--r--jstests/sharding/index_commands_shard_targeting.js1
-rw-r--r--jstests/sharding/index_operations_abort_concurrent_outgoing_migrations.js36
-rw-r--r--jstests/sharding/libs/sharded_index_util.js5
-rw-r--r--jstests/sharding/sharding_statistics_server_status.js82
8 files changed, 50 insertions, 161 deletions
diff --git a/jstests/libs/override_methods/check_indexes_consistent_across_cluster.js b/jstests/libs/override_methods/check_indexes_consistent_across_cluster.js
index 7959f13b21d..61f7603f4c5 100644
--- a/jstests/libs/override_methods/check_indexes_consistent_across_cluster.js
+++ b/jstests/libs/override_methods/check_indexes_consistent_across_cluster.js
@@ -23,9 +23,6 @@ ShardingTest.prototype.checkIndexesConsistentAcrossCluster = function() {
const keyFile = this.keyFile;
- // TODO (SERVER-45017): Remove this check when v4.4 becomes last-stable.
- const isMixedVersion = this.isMixedVersionCluster();
-
/**
* Returns an array of config.collections docs for undropped collections.
*/
@@ -42,20 +39,6 @@ ShardingTest.prototype.checkIndexesConsistentAcrossCluster = function() {
*/
function makeGetIndexDocsFunc(ns) {
return () => {
- if (isMixedVersion) {
- return mongos.getCollection(ns)
- .aggregate(
- [
- {$indexStats: {}},
- {
- $group:
- {_id: "$host", indexes: {$push: {key: "$key", name: "$name"}}}
- },
- {$project: {_id: 0, host: "$_id", indexes: 1}}
- ],
- {readConcern: {level: "local"}})
- .toArray();
- }
return ShardedIndexUtil.getPerShardIndexes(mongos.getCollection(ns));
};
}
@@ -76,8 +59,7 @@ ShardingTest.prototype.checkIndexesConsistentAcrossCluster = function() {
continue;
}
- const inconsistentIndexes =
- ShardedIndexUtil.findInconsistentIndexesAcrossShards(indexDocs, isMixedVersion);
+ const inconsistentIndexes = ShardedIndexUtil.findInconsistentIndexesAcrossShards(indexDocs);
for (const shard in inconsistentIndexes) {
const shardInconsistentIndexes = inconsistentIndexes[shard];
diff --git a/jstests/sharding/create_idx_empty_primary.js b/jstests/sharding/create_idx_empty_primary.js
index 9b11df9f70e..ff89347f8e3 100644
--- a/jstests/sharding/create_idx_empty_primary.js
+++ b/jstests/sharding/create_idx_empty_primary.js
@@ -2,8 +2,6 @@
* Test to make sure that the createIndex command gets sent to all shards if the mongos
* version is last-stable, and to shards that own chunks only if the mongos version is
* latest.
- *
- * @tags: [need_fixing_for_46]
*/
(function() {
'use strict';
@@ -24,9 +22,6 @@ assert.commandWorked(testDB.user.insert({_id: 0}));
var res = testDB.user.ensureIndex({i: 1});
assert.commandWorked(res);
-// TODO (SERVER-45017): Remove this check when v4.4 becomes last-stable.
-const isLastStableMongos = (jsTestOptions().mongosBinVersion === "last-stable");
-
var indexes = testDB.user.getIndexes();
assert.eq(2, indexes.length);
@@ -34,7 +29,7 @@ indexes = st.rs0.getPrimary().getDB('test').user.getIndexes();
assert.eq(2, indexes.length);
indexes = st.rs1.getPrimary().getDB('test').user.getIndexes();
-assert.eq(isLastStableMongos ? 2 : 1, indexes.length);
+assert.eq(1, indexes.length);
st.stop();
})();
diff --git a/jstests/sharding/index_and_collection_option_propagation.js b/jstests/sharding/index_and_collection_option_propagation.js
index 58d06465b73..e24d8c9f064 100644
--- a/jstests/sharding/index_and_collection_option_propagation.js
+++ b/jstests/sharding/index_and_collection_option_propagation.js
@@ -8,8 +8,6 @@
* are reported in the 'raw' shard responses) as long as at least one shard returns success.
*
* This test verifies this behavior.
- *
- * @tags: [need_fixing_for_46]
*/
// This test shuts down a shard.
@@ -82,9 +80,6 @@ const dbName = "test";
const collName = "foo";
const ns = dbName + "." + collName;
-// TODO (SERVER-45017): Remove this check when v4.4 becomes last-stable.
-const isLastStableMongos = (jsTestOptions().mongosBinVersion === "last-stable");
-
var st = new ShardingTest(
{shards: {rs0: {nodes: 1}, rs1: {nodes: 1}, rs2: {nodes: 1}}, other: {config: 3}});
@@ -130,11 +125,7 @@ var res;
// createIndex
res = st.s.getDB(dbName).getCollection(collName).createIndex({"idx2": 1});
assert.commandWorked(res);
-if (isLastStableMongos) {
- assert.eq(res.raw[st.shard0.host].ok, 1, tojson(res));
-} else {
- assert.eq(undefined, res.raw[st.shard0.host], tojson(res));
-}
+assert.eq(undefined, res.raw[st.shard0.host], tojson(res));
assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res));
assert.eq(undefined, res.raw[st.shard2.host], tojson(res));
checkShardIndexes("idx2", [st.shard1], [st.shard2]);
@@ -142,11 +133,7 @@ checkShardIndexes("idx2", [st.shard1], [st.shard2]);
// dropIndex
res = st.s.getDB(dbName).getCollection(collName).dropIndex("idx1_1");
assert.commandWorked(res);
-if (isLastStableMongos) {
- assert.eq(res.raw[st.shard0.host].ok, 1, tojson(res));
-} else {
- assert.eq(undefined, res.raw[st.shard0.host], tojson(res));
-}
+assert.eq(undefined, res.raw[st.shard0.host], tojson(res));
assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res));
assert.eq(undefined, res.raw[st.shard2.host], tojson(res));
checkShardIndexes("idx1", [], [st.shard1, st.shard2]);
@@ -162,11 +149,7 @@ res = st.s.getDB(dbName).runCommand({
validationAction: "warn"
});
assert.commandWorked(res);
-if (isLastStableMongos) {
- assert.eq(res.raw[st.shard0.host].ok, 1, tojson(res));
-} else {
- assert.eq(undefined, res.raw[st.shard0.host], tojson(res));
-}
+assert.eq(undefined, res.raw[st.shard0.host], tojson(res));
assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res));
assert.eq(undefined, res.raw[st.shard2.host], tojson(res));
checkShardCollOption("validator", validationOption2, [st.shard1], [st.shard2]);
@@ -184,17 +167,8 @@ assert.neq(null, res.errmsg, tojson(res));
// If all shards report the same error, the overall command error should be set to that error.
res = st.s.getDB(dbName).getCollection(collName).createIndex({});
-if (isLastStableMongos) {
- assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
- assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
- assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
- assert.eq(res.raw[st.shard2.host].ok, 0, tojson(res));
- assert.eq(res.code, res.raw[st.shard2.host].code, tojson(res));
- assert.eq(res.codeName, res.raw[st.shard2.host].codeName, tojson(res));
-} else {
- assert.eq(undefined, res.raw[st.shard0.host], tojson(res));
- assert.eq(undefined, res.raw[st.shard2.host], tojson(res));
-}
+assert.eq(undefined, res.raw[st.shard0.host], tojson(res));
+assert.eq(undefined, res.raw[st.shard2.host], tojson(res));
assert.eq(res.raw[st.shard1.host].ok, 0, tojson(res));
assert.eq(res.code, res.raw[st.shard1.host].code, tojson(res));
assert.eq(res.codeName, res.raw[st.shard1.host].codeName, tojson(res));
@@ -205,13 +179,7 @@ assert.neq(null, res.errmsg, tojson(res));
// If all the non-ignorable errors reported by shards are the same, the overall command error
// should be set to that error.
res = st.s.getDB(dbName).getCollection(collName).createIndex({z: 1}, {unique: true});
-if (isLastStableMongos) {
- assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res));
- assert.eq(ErrorCodes.CannotCreateIndex, res.raw[st.shard0.host].code, tojson(res));
- assert.eq("CannotCreateIndex", res.raw[st.shard0.host].codeName, tojson(res));
-} else {
- assert.eq(undefined, res.raw[st.shard0.host], tojson(res));
-}
+assert.eq(undefined, res.raw[st.shard0.host], tojson(res));
assert.eq(res.raw[st.shard1.host].ok, 0, tojson(res));
assert.eq(null, res.raw[st.shard2.host], tojson(res));
assert.eq(ErrorCodes.CannotCreateIndex, res.raw[st.shard1.host].code, tojson(res));
@@ -238,19 +206,8 @@ assert(res.codeName === "HostUnreachable" || res.codeName === "FailedToSatisfyRe
// If some shard returns a non-ignorable error, it should be reported as the command error, even
// if other shards returned ignorable errors.
res = st.s.getDB(dbName).getCollection(collName).createIndex({"validIdx": 1});
-if (isLastStableMongos) {
- assert.eq(res.raw[st.shard0.host].ok, 0, tojson(res)); // shard was down
- assert.eq(res.code, res.raw[st.shard0.host].code, tojson(res));
- assert.eq(res.codeName, res.raw[st.shard0.host].codeName, tojson(res));
- // We can expect to see 'FailedToSatisfyReadPreference' this time, because after the previous
- // createIndexes attempt, mongos's ReplicaSetMonitor should have been updated.
- assert.eq(res.code, ErrorCodes.FailedToSatisfyReadPreference, tojson(res));
- assert.eq("FailedToSatisfyReadPreference", res.codeName, tojson(res));
- assert.neq(null, res.errmsg, tojson(res));
-} else {
- assert.eq(undefined, res.raw[st.shard0.host], tojson(res));
- assert.eq(res.ok, 1, tojson(res));
-}
+assert.eq(undefined, res.raw[st.shard0.host], tojson(res));
+assert.eq(res.ok, 1, tojson(res));
assert.eq(res.raw[st.shard1.host].ok, 1, tojson(res)); // gets created on shard that owns chunks
assert.eq(undefined, res.raw[st.shard2.host], tojson(res)); // shard does not own chunks
diff --git a/jstests/sharding/index_commands_during_initial_split.js b/jstests/sharding/index_commands_during_initial_split.js
index 27ba9c5fe39..39c70773ee9 100644
--- a/jstests/sharding/index_commands_during_initial_split.js
+++ b/jstests/sharding/index_commands_during_initial_split.js
@@ -1,7 +1,6 @@
/*
* Test that the index commands are correctly propagated if they are executed
* either before, during, or after the initial split critical section.
- * @tags: [requires_fcv_44]
*/
(function() {
"use strict";
diff --git a/jstests/sharding/index_commands_shard_targeting.js b/jstests/sharding/index_commands_shard_targeting.js
index e3b3f8d6a40..2472f5a9d56 100644
--- a/jstests/sharding/index_commands_shard_targeting.js
+++ b/jstests/sharding/index_commands_shard_targeting.js
@@ -2,7 +2,6 @@
* Test that the index commands send and check shard versions, and only target the shards
* that have chunks for the collection. Also test that the commands fail if they are run
* when the critical section is in progress, and block until the critical section is over.
- * @tags: [requires_fcv_44]
*/
(function() {
"use strict";
diff --git a/jstests/sharding/index_operations_abort_concurrent_outgoing_migrations.js b/jstests/sharding/index_operations_abort_concurrent_outgoing_migrations.js
index e7508fc58c1..13004094d08 100644
--- a/jstests/sharding/index_operations_abort_concurrent_outgoing_migrations.js
+++ b/jstests/sharding/index_operations_abort_concurrent_outgoing_migrations.js
@@ -1,6 +1,5 @@
/*
* Test that the index commands abort concurrent outgoing migrations.
- * @tags: [requires_fcv_44]
*/
(function() {
"use strict";
@@ -147,40 +146,5 @@ stepNames.forEach((stepName) => {
}
});
-assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-
-stepNames.forEach((stepName) => {
- jsTest.log(
- `Testing that createIndexes in FCV 4.2 aborts concurrent outgoing migrations that are in step ${
- stepName}...`);
- const collName = "testCreateIndexesFCV42MoveChunkStep" + stepName;
- const ns = dbName + "." + collName;
-
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: shardKey}));
-
- assertCommandAbortsConcurrentOutgoingMigration(st, stepName, ns, () => {
- const coll = st.s.getCollection(ns);
-
- // Insert document into collection to avoid optimization for index creation on an empty
- // collection. This allows us to pause index builds on the collection using a fail point.
- assert.commandWorked(coll.insert({a: 1}));
-
- assert.commandWorked(coll.createIndexes([index]));
- });
-
- // Verify that the index command succeeds.
- ShardedIndexUtil.assertIndexExistsOnShard(st.shard0, dbName, collName, index);
-
- // If createIndexes is run after the migration has reached the steady state, shard1
- // will not have the index created by the command because the index just does not
- // exist when shard1 clones the collection options and indexes from shard0. However,
- // if createIndexes is run after the cloning step starts but before the steady state
- // is reached, shard0 may have the index when shard1 does the cloning so shard1 may
- // or may not have the index.
- if (stepName == moveChunkStepNames.reachedSteadyState) {
- ShardedIndexUtil.assertIndexDoesNotExistOnShard(st.shard1, dbName, collName, index);
- }
-});
-
st.stop();
})();
diff --git a/jstests/sharding/libs/sharded_index_util.js b/jstests/sharding/libs/sharded_index_util.js
index 02289eb54f2..a371e768ca2 100644
--- a/jstests/sharding/libs/sharded_index_util.js
+++ b/jstests/sharding/libs/sharded_index_util.js
@@ -71,7 +71,7 @@ var ShardedIndexUtil = (function() {
* {"shard" : "rs1",
* "indexes" : [{"spec" : {"v" : 2, "key" : {"_id" :1}, "name" : "_id_"}}]}];
*/
- let findInconsistentIndexesAcrossShards = function(indexDocs, isMixedVersion) {
+ let findInconsistentIndexesAcrossShards = function(indexDocs) {
// Find indexes that exist on all shards. For the example above:
// [{"spec" : {"v" : 2, "key" : {"_id" : 1}, "name" : "_id_"}}];
let consistentIndexes = indexDocs[0].indexes;
@@ -86,8 +86,7 @@ var ShardedIndexUtil = (function() {
for (const indexDoc of indexDocs) {
const inconsistentIndexes =
indexDoc.indexes.filter(index => !this.containsBSON(consistentIndexes, index));
- inconsistentIndexesOnShard[isMixedVersion ? indexDoc.host : indexDoc.shard] =
- inconsistentIndexes;
+ inconsistentIndexesOnShard[indexDoc.shard] = inconsistentIndexes;
}
return inconsistentIndexesOnShard;
diff --git a/jstests/sharding/sharding_statistics_server_status.js b/jstests/sharding/sharding_statistics_server_status.js
index 1419732b201..2c21aa2aa17 100644
--- a/jstests/sharding/sharding_statistics_server_status.js
+++ b/jstests/sharding/sharding_statistics_server_status.js
@@ -34,10 +34,7 @@ function incrementStatsAndCheckServerShardStats(donor, recipient, numDocs) {
assert(statsFromServerStatus[i].totalCriticalSectionTimeMillis);
assert(statsFromServerStatus[i].totalDonorChunkCloneTimeMillis);
assert(statsFromServerStatus[i].countDonorMoveChunkLockTimeout);
- // TODO (SERVER-45017): Remove this mongos bin version check when v4.4 becomes last-stable.
- if (jsTestOptions().mongosBinVersion != "last-stable") {
- assert(statsFromServerStatus[i].countDonorMoveChunkAbortConflictingIndexOperation);
- }
+ assert(statsFromServerStatus[i].countDonorMoveChunkAbortConflictingIndexOperation);
assert.eq(stats[i].countDonorMoveChunkStarted,
statsFromServerStatus[i].countDonorMoveChunkStarted);
assert.eq(stats[i].countDocsClonedOnRecipient,
@@ -195,46 +192,43 @@ assert.commandWorked(donorConn.adminCommand(
//
// Tests for the count of migrations aborted due to concurrent index operations.
//
-// TODO (SERVER-45017): Remove this mongos bin version check when v4.4 becomes last-stable.
-if (jsTestOptions().mongosBinVersion != "last-stable") {
- // Counter starts at 0.
- checkServerStatusAbortedMigrationCount(donorConn, 0);
-
- // Pause a migration after cloning starts.
- pauseMoveChunkAtStep(donorConn, moveChunkStepNames.startedMoveChunk);
- moveChunkThread =
- new Thread(runConcurrentMoveChunk, st.s.host, dbName + "." + collName, st.shard1.shardName);
- moveChunkThread.start();
- waitForMoveChunkStep(donorConn, moveChunkStepNames.startedMoveChunk);
-
- // Run an index command.
- assert.commandWorked(coll.createIndexes([index]));
-
- // Unpause the migration and verify that it gets aborted.
- unpauseMoveChunkAtStep(donorConn, moveChunkStepNames.startedMoveChunk);
- moveChunkThread.join();
- assert.commandFailedWithCode(moveChunkThread.returnData(), ErrorCodes.Interrupted);
-
- checkServerStatusAbortedMigrationCount(donorConn, 1);
-
- // Pause a migration before entering the critical section.
- pauseMoveChunkAtStep(donorConn, moveChunkStepNames.reachedSteadyState);
- moveChunkThread =
- new Thread(runConcurrentMoveChunk, st.s.host, dbName + "." + collName, st.shard1.shardName);
- moveChunkThread.start();
- waitForMoveChunkStep(donorConn, moveChunkStepNames.reachedSteadyState);
-
- // Run an index command.
- assert.commandWorked(
- st.s.getDB(dbName).runCommand({collMod: collName, validator: {x: {$type: "string"}}}));
-
- // Unpause the migration and verify that it gets aborted.
- unpauseMoveChunkAtStep(donorConn, moveChunkStepNames.reachedSteadyState);
- moveChunkThread.join();
- assert.commandFailedWithCode(moveChunkThread.returnData(), ErrorCodes.Interrupted);
-
- checkServerStatusAbortedMigrationCount(donorConn, 2);
-}
+// Counter starts at 0.
+checkServerStatusAbortedMigrationCount(donorConn, 0);
+
+// Pause a migration after cloning starts.
+pauseMoveChunkAtStep(donorConn, moveChunkStepNames.startedMoveChunk);
+moveChunkThread =
+ new Thread(runConcurrentMoveChunk, st.s.host, dbName + "." + collName, st.shard1.shardName);
+moveChunkThread.start();
+waitForMoveChunkStep(donorConn, moveChunkStepNames.startedMoveChunk);
+
+// Run an index command.
+assert.commandWorked(coll.createIndexes([index]));
+
+// Unpause the migration and verify that it gets aborted.
+unpauseMoveChunkAtStep(donorConn, moveChunkStepNames.startedMoveChunk);
+moveChunkThread.join();
+assert.commandFailedWithCode(moveChunkThread.returnData(), ErrorCodes.Interrupted);
+
+checkServerStatusAbortedMigrationCount(donorConn, 1);
+
+// Pause a migration before entering the critical section.
+pauseMoveChunkAtStep(donorConn, moveChunkStepNames.reachedSteadyState);
+moveChunkThread =
+ new Thread(runConcurrentMoveChunk, st.s.host, dbName + "." + collName, st.shard1.shardName);
+moveChunkThread.start();
+waitForMoveChunkStep(donorConn, moveChunkStepNames.reachedSteadyState);
+
+// Run an index command.
+assert.commandWorked(
+ st.s.getDB(dbName).runCommand({collMod: collName, validator: {x: {$type: "string"}}}));
+
+// Unpause the migration and verify that it gets aborted.
+unpauseMoveChunkAtStep(donorConn, moveChunkStepNames.reachedSteadyState);
+moveChunkThread.join();
+assert.commandFailedWithCode(moveChunkThread.returnData(), ErrorCodes.Interrupted);
+
+checkServerStatusAbortedMigrationCount(donorConn, 2);
st.stop();
})();