summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPierlauro Sciarelli <pierlauro.sciarelli@mongodb.com>2021-01-21 11:51:21 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-01-21 14:23:20 +0000
commit9b962886132727fa5ab9863f96cbe825cb7a3ad0 (patch)
tree7cdc05d9905998f7a060af6be039f8bc7b00f518
parentefa6daec5a21e0ff396723a2f66ca5c1820d1fb5 (diff)
downloadmongo-9b962886132727fa5ab9863f96cbe825cb7a3ad0.tar.gz
SERVER-53825 Range deletions collection must not be dropped on downgrade to v4.4
-rw-r--r--jstests/multiVersion/delete_pending_range_deletions_on_downgrade.js53
-rw-r--r--src/mongo/db/commands/set_feature_compatibility_version_command.cpp13
2 files changed, 0 insertions, 66 deletions
diff --git a/jstests/multiVersion/delete_pending_range_deletions_on_downgrade.js b/jstests/multiVersion/delete_pending_range_deletions_on_downgrade.js
deleted file mode 100644
index c6c926db3e6..00000000000
--- a/jstests/multiVersion/delete_pending_range_deletions_on_downgrade.js
+++ /dev/null
@@ -1,53 +0,0 @@
-(function() {
-"use strict";
-
-load("jstests/libs/uuid_util.js");
-
-const dbName = "test";
-const collName = "foo";
-const ns = dbName + "." + collName;
-const rangeDeletionNs = "config.rangeDeletions";
-
-// Create 2 shards with 3 replicas each.
-let st = new ShardingTest({shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}});
-
-// Create a sharded collection with two chunks: [-inf, 50), [50, inf)
-assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
-assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
-assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}}));
-assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 50}}));
-
-// Pause range deletion.
-let originalShard0Primary = st.rs0.getPrimary();
-originalShard0Primary.adminCommand({configureFailPoint: 'suspendRangeDeletion', mode: 'alwaysOn'});
-
-// Write range to deletion collection
-let deletionTask = {
- _id: UUID(),
- nss: ns,
- collectionUuid: UUID(),
- donorShardId: "unused",
- range: {min: {x: 50}, max: {x: MaxKey}},
- whenToClean: "now",
- // Mark the range as pending, otherwise the task will be processed immediately on being
- // inserted (and deleted after it's proessed) rather than being deleted on setFCV downgrade.
- pending: true
-};
-
-let deletionsColl = st.shard0.getCollection(rangeDeletionNs);
-
-// Write range to deletion collection
-assert.commandWorked(deletionsColl.insert(deletionTask));
-
-// Verify deletion count.
-assert.eq(deletionsColl.find().itcount(), 1);
-
-print("setting fcv: " + lastLTSFCV);
-assert.commandWorked(st.s.getDB("admin").runCommand({setFeatureCompatibilityVersion: lastLTSFCV}));
-checkFCV(st.shard0.getDB("admin"), lastLTSFCV);
-
-// Verify deletion count.
-assert.eq(deletionsColl.find().itcount(), 0);
-
-st.stop();
-})();
diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
index 9ed2a2f7cea..070723236cd 100644
--- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
+++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
@@ -325,10 +325,6 @@ public:
});
}
- auto replCoord = repl::ReplicationCoordinator::get(opCtx);
- const bool isReplSet =
- replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet;
-
checkInitialSyncFinished(opCtx);
FeatureCompatibilityVersion::updateFeatureCompatibilityVersionDocument(
@@ -349,20 +345,11 @@ public:
return false;
if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
- LOGV2(20502, "Downgrade: dropping config.rangeDeletions collection");
- migrationutil::dropRangeDeletionsCollection(opCtx);
-
if (requestedVersion < FeatureCompatibilityParams::Version::kVersion49) {
// SERVER-52632: Remove once 5.0 becomes the LastLTS
shardmetadatautil::downgradeShardConfigDatabasesEntriesToPre49(opCtx);
shardmetadatautil::downgradeShardConfigCollectionEntriesToPre49(opCtx);
}
-
-
- } else if (isReplSet || serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
- // The default rwc document should only be deleted on plain replica sets and the
- // config server replica set, not on shards or standalones.
- deletePersistedDefaultRWConcernDocument(opCtx);
}
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {