summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJack Mulrow <jack.mulrow@mongodb.com>2020-03-23 17:05:48 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-03-27 15:16:30 +0000
commit272a73ec66783ffbf7f954c12fed41825caa96e4 (patch)
treebfd85467cd384ec2422e9ef7c016d781b49c4364
parent6694a0434f37db0d6671d05e37a54e78eb1f156b (diff)
downloadmongo-272a73ec66783ffbf7f954c12fed41825caa96e4.tar.gz
SERVER-44034 Remove refineCollectionShardKey and config.chunks/tags upgrade/downgrade code
-rw-r--r--jstests/multiVersion/change_streams_multi_version_cluster.js2
-rw-r--r--jstests/multiVersion/config_chunks_tags_set_fcv.js182
-rw-r--r--jstests/multiVersion/config_chunks_tags_upgrade_downgrade_cluster.js135
-rw-r--r--jstests/multiVersion/libs/config_chunks_tags_shared.js252
-rw-r--r--jstests/multiVersion/map_reduce_multiversion_cluster.js2
-rw-r--r--jstests/multiVersion/migration_between_mixed_version_mongods.js1
-rw-r--r--jstests/multiVersion/minor_version_tags_old_new_old.js1
-rw-r--r--jstests/multiVersion/refine_collection_shard_key_fcv.js36
-rw-r--r--jstests/sharding/mongos_get_shard_version.js19
-rw-r--r--src/mongo/db/commands/set_feature_compatibility_version_command.cpp29
-rw-r--r--src/mongo/db/s/active_shard_collection_registry.cpp17
-rw-r--r--src/mongo/db/s/active_shard_collection_registry.h8
-rw-r--r--src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp7
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp210
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h16
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp132
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp6
-rw-r--r--src/mongo/db/s/shardsvr_shard_collection.cpp13
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp51
-rw-r--r--src/mongo/s/catalog/type_chunk.h24
-rw-r--r--src/mongo/s/catalog/type_chunk_test.cpp64
-rw-r--r--src/mongo/s/catalog/type_tags.cpp7
-rw-r--r--src/mongo/s/catalog/type_tags.h8
-rw-r--r--src/mongo/s/catalog/type_tags_test.cpp14
24 files changed, 29 insertions, 1207 deletions
diff --git a/jstests/multiVersion/change_streams_multi_version_cluster.js b/jstests/multiVersion/change_streams_multi_version_cluster.js
index 0237bf22dff..18452ec6c1a 100644
--- a/jstests/multiVersion/change_streams_multi_version_cluster.js
+++ b/jstests/multiVersion/change_streams_multi_version_cluster.js
@@ -1,7 +1,7 @@
// Verify that we can successfully resume a change stream during several different stages of a
// cluster upgrade.
//
-// @tags: [uses_change_streams, requires_replication]
+// @tags: [uses_change_streams, requires_replication, fix_for_fcv_46]
// Checking UUID consistency uses cached connections, which are not valid across restarts or
// stepdowns.
diff --git a/jstests/multiVersion/config_chunks_tags_set_fcv.js b/jstests/multiVersion/config_chunks_tags_set_fcv.js
deleted file mode 100644
index 14d14f663f4..00000000000
--- a/jstests/multiVersion/config_chunks_tags_set_fcv.js
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * Tests that config.chunks and config.tags documents are correctly modified on FCV
- * upgrade/downgrade.
- */
-(function() {
-"use strict";
-
-load("jstests/libs/parallelTester.js"); // for Thread.
-load("jstests/multiVersion/libs/config_chunks_tags_shared.js");
-load("jstests/sharding/libs/sharded_transactions_helpers.js");
-
-// Assumes ns has the following chunk layout: [-inf, -50), [-50, 0) on shard0 and [0, inf) on
-// shard 1.
-function verifyChunkOperationsFailDuringSetFCV(st, ns) {
- assert.commandFailedWithCode(st.s.adminCommand({split: ns, middle: {_id: 50}}),
- ErrorCodes.ConflictingOperationInProgress);
- verifyChunkDistribution(st, ns, [2, 1]);
-
- // Shards running with old FCV won't automatically add writeConcern when running moveChunk or
- // _recvChunkStart on shards, which shards running FCV find objectionable. So we pass explicit
- // writeConcern to the mongos moveChunk command (which also requires secondaryThrottle: true),
- // which causes it to be passed through to the shard commands.
- assert.commandFailedWithCode(st.s.adminCommand({
- moveChunk: ns,
- find: {_id: 0},
- to: st.shard0.shardName,
- secondaryThrottle: true,
- writeConcern: {w: 1}
- }),
- ErrorCodes.ConflictingOperationInProgress);
- verifyChunkDistribution(st, ns, [2, 1]);
-
- assert.commandFailedWithCode(
- st.s.adminCommand({mergeChunks: ns, bounds: [{_id: MinKey}, {_id: 0}]}),
- ErrorCodes.ConflictingOperationInProgress);
- verifyChunkDistribution(st, ns, [2, 1]);
-}
-
-// Assumes shard0 is in zone0 which contains [-inf, 0) and is not in zone1.
-function verifyZoneOperationsSucceedDuringSetFCV(st, ns) {
- assert.commandWorked(
- st.s.adminCommand({updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: null}));
- assert.commandWorked(st.s.adminCommand(
- {updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: "zone0"}));
-
- assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard0.shardName, zone: "zone1"}));
- assert.commandWorked(
- st.s.adminCommand({removeShardFromZone: st.shard0.shardName, zone: "zone1"}));
-}
-
-const dbName = "test";
-const chunkNs = dbName + ".chunk_coll";
-const zoneNs = dbName + ".zone_coll";
-
-const st = new ShardingTest({shards: 2});
-const configPrimary = st.configRS.getPrimary();
-
-assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
-st.ensurePrimaryShard(dbName, st.shard0.shardName);
-
-setUpCollectionForChunksTesting(st, chunkNs);
-setUpCollectionForZoneTesting(st, zoneNs);
-
-//
-// Verify chunk and tag documents are updated by setFeatureCompatibilityVersion.
-//
-
-checkFCV(configPrimary.getDB("admin"), latestFCV);
-
-verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: true});
-
-jsTestLog("Downgrading FCV to last stable");
-assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-checkFCV(configPrimary.getDB("admin"), lastStableFCV);
-
-verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
-
-jsTestLog("Upgrading FCV to latest");
-assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-checkFCV(configPrimary.getDB("admin"), latestFCV);
-
-verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: true});
-
-//
-// Verify operations during setFeatureCompabitilityVersion use the correct format and that setFCV
-// blocks behind in-progress shard collections on shard servers.
-//
-
-function runInProgressSetFCVTest(st, {initialFCV, desiredFCV}) {
- const pauseInSetFCVFailPointName = desiredFCV === lastStableFCV
- ? "pauseBeforeDowngradingConfigMetadata"
- : "pauseBeforeUpgradingConfigMetadata";
-
- clearRawMongoProgramOutput();
- checkFCV(configPrimary.getDB("admin"), initialFCV);
-
- // Pause setFCV to test the in-progress states.
- assert.commandWorked(configPrimary.adminCommand(
- {configureFailPoint: pauseInSetFCVFailPointName, mode: "alwaysOn"}));
-
- // Start and pause a shard collection, and verify that the setFCV blocks behind it.
- const shardCollDuringSetFCV = new Thread((host, ns) => {
- const mongosConn = new Mongo(host);
- return mongosConn.adminCommand({shardCollection: ns, key: {_id: 1}});
- }, st.s.host, dbName + ".setFCVTo" + desiredFCV);
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- {configureFailPoint: "pauseShardCollectionBeforeReturning", mode: "alwaysOn"}));
- shardCollDuringSetFCV.start();
- waitForFailpoint("Hit pauseShardCollectionBeforeReturning", 1 /* numTimes */);
-
- // Assert setFCV can't hit the failpoint until the shard collection completes.
- const changeFCV = new Thread((host, fcv) => {
- const mongosConn = new Mongo(host);
- return mongosConn.adminCommand({setFeatureCompatibilityVersion: fcv});
- }, st.s.host, desiredFCV);
- changeFCV.start();
- assert.throws(() => {
- waitForFailpoint("Hit " + pauseInSetFCVFailPointName, 1 /* numTimes */, 3000 /* timeout */);
- });
-
- // Unpause the shard collection and wait for setFCV to reach the failpoint.
- assert.commandWorked(st.rs0.getPrimary().adminCommand(
- {configureFailPoint: "pauseShardCollectionBeforeReturning", mode: "off"}));
- shardCollDuringSetFCV.join();
- waitForFailpoint("Hit " + pauseInSetFCVFailPointName, 1 /* numTimes */);
-
- // Verify behavior while setFCV is in progress.
- verifyChunkOperationsFailDuringSetFCV(st, chunkNs);
- verifyZoneOperationsSucceedDuringSetFCV(st, zoneNs);
- testCRUDOperations(st, chunkNs);
- testCRUDOperations(st, zoneNs);
-
- // A collection can still be sharded during setFCV and should write chunks correctly.
- verifyInitialChunks(
- st, dbName + ".newCollDuringFCV" + desiredFCV, {expectNewFormat: desiredFCV === latestFCV});
-
- // Unset the setFCV failpoint and allow setFCV to finish.
- assert.commandWorked(
- configPrimary.adminCommand({configureFailPoint: pauseInSetFCVFailPointName, mode: "off"}));
- changeFCV.join();
- assert.commandWorked(changeFCV.returnData());
- checkFCV(configPrimary.getDB("admin"), desiredFCV);
-
- verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: desiredFCV === latestFCV});
-}
-
-runInProgressSetFCVTest(st, {initialFCV: latestFCV, desiredFCV: lastStableFCV});
-runInProgressSetFCVTest(st, {initialFCV: lastStableFCV, desiredFCV: latestFCV});
-
-//
-// Test setFCV with many chunks and tags.
-//
-
-// Set up collections with the same number of chunks and zones as the batch limit for the
-// transactions used to modify chunks and zones documents and with more than the limit to verify the
-// batching logic in both cases.
-const txnBatchSize = 100;
-setUpCollectionWithManyChunksAndZones(
- st, dbName + ".many_at_batch_size", txnBatchSize /* numChunks */, txnBatchSize /* numZones */);
-setUpCollectionWithManyChunksAndZones(st,
- dbName + ".many_over_batch_size",
- txnBatchSize + 5 /* numChunks */,
- txnBatchSize + 5 /* numZones */);
-
-checkFCV(configPrimary.getDB("admin"), latestFCV);
-
-verifyChunks(st, {expectNewFormat: true});
-
-jsTestLog("Downgrading FCV to last stable with many chunks and zones");
-assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-checkFCV(configPrimary.getDB("admin"), lastStableFCV);
-
-verifyChunks(st, {expectNewFormat: false});
-
-jsTestLog("Upgrading FCV to latest with many chunks and zones");
-assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-checkFCV(configPrimary.getDB("admin"), latestFCV);
-
-verifyChunks(st, {expectNewFormat: true});
-
-st.stop();
-}());
diff --git a/jstests/multiVersion/config_chunks_tags_upgrade_downgrade_cluster.js b/jstests/multiVersion/config_chunks_tags_upgrade_downgrade_cluster.js
deleted file mode 100644
index 0e81aca3c6d..00000000000
--- a/jstests/multiVersion/config_chunks_tags_upgrade_downgrade_cluster.js
+++ /dev/null
@@ -1,135 +0,0 @@
-/**
- * Tests upgrading a cluster from last stable to the latest version and downgrading it back to last
- * stable, verifying the behavior of chunk and zone operations throughout the process.
- */
-
-// Checking UUID consistency uses cached connections, which are not valid across restarts or
-// stepdowns.
-TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
-
-(function() {
-"use strict";
-
-load("jstests/multiVersion/libs/config_chunks_tags_shared.js");
-load("jstests/multiVersion/libs/multi_cluster.js");
-load("jstests/multiVersion/libs/multi_rs.js");
-
-// Runs commands on the config server that will use its RSM to target both shard primaries until
-// they succeed.
-function waitForConfigServerShardRSMRetarget(st) {
- assert.soonNoExcept(() => {
- assert.commandWorked(st.s.getDB("unrelated_db").unrelated_coll.insert({x: 1}));
- st.ensurePrimaryShard("unrelated_db", st.shard0.shardName);
- st.ensurePrimaryShard("unrelated_db", st.shard1.shardName);
- st.ensurePrimaryShard("unrelated_db", st.shard0.shardName);
- assert.commandWorked(st.s.getDB("unrelated_db").dropDatabase());
- return true;
- });
-}
-
-const dbName = "test";
-const chunkNs = dbName + ".chunk_coll";
-const zoneNs = dbName + ".zone_coll";
-
-// Start a cluster with two shards at the last stable version and a sharding enabled db.
-const st = new ShardingTest({
- shards: 2,
- other: {
- mongosOptions: {binVersion: "last-stable"},
- configOptions: {binVersion: "last-stable"},
- rsOptions: {binVersion: "last-stable"},
- },
- rs: {nodes: 3} // Use 3 node replica sets to allow binary changes with no downtime.
-});
-checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
-assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
-st.ensurePrimaryShard(dbName, st.shard0.shardName);
-
-// Set up sharded collections for targeted chunk and zone operation testing.
-setUpCollectionForChunksTesting(st, chunkNs);
-setUpCollectionForZoneTesting(st, zoneNs);
-
-// Set up another sharded collection on a different database to verify chunks and zones are updated
-// for every sharded collection.
-setUpExtraShardedCollections(st, "extra_db" /* dbName */);
-
-// Set up collections with the same number of chunks and zones as the batch limit for the
-// transactions used to modify chunks and zones documents and with more than the limit to verify the
-// batching logic in both cases.
-const txnBatchSize = 100;
-setUpCollectionWithManyChunksAndZones(
- st, dbName + ".many_at_batch_size", txnBatchSize /* numChunks */, txnBatchSize /* numZones */);
-setUpCollectionWithManyChunksAndZones(st,
- dbName + ".many_over_batch_size",
- txnBatchSize + 5 /* numChunks */,
- txnBatchSize + 5 /* numZones */);
-
-//
-// Upgrade back to the latest version.
-//
-
-verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
-
-jsTestLog("Upgrading config servers.");
-st.upgradeCluster("latest", {upgradeConfigs: true, upgradeMongos: false, upgradeShards: false});
-
-verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
-
-jsTestLog("Upgrading shard servers.");
-st.upgradeCluster("latest", {upgradeConfigs: false, upgradeMongos: false, upgradeShards: true});
-
-// Manually moving a chunk will use the config server's replica set monitor to target the primary of
-// the source shard. After upgrading the shard servers above, this RSM may be stale, so run
-// operations through the config server that will use the same RSM so it picks up the new primary.
-waitForConfigServerShardRSMRetarget(st);
-
-verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
-
-jsTestLog("Upgrading mongos servers.");
-st.upgradeCluster("latest", {upgradeConfigs: false, upgradeMongos: true, upgradeShards: false});
-checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
-
-verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
-
-jsTestLog("Upgrade feature compatibility version to latest");
-assert.commandWorked(st.s.getDB("admin").runCommand({setFeatureCompatibilityVersion: latestFCV}));
-checkFCV(st.configRS.getPrimary().getDB("admin"), latestFCV);
-
-verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: true});
-
-//
-// Downgrade back to the last stable version.
-//
-
-jsTestLog("Downgrade feature compatibility version to last stable");
-assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
-
-verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
-
-jsTestLog("Downgrading mongos servers.");
-st.upgradeCluster("last-stable",
- {upgradeConfigs: false, upgradeMongos: true, upgradeShards: false});
-
-verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
-
-jsTestLog("Downgrading shard servers.");
-st.upgradeCluster("last-stable",
- {upgradeConfigs: false, upgradeMongos: false, upgradeShards: true});
-
-// Manually moving a chunk will use the config server's replica set monitor to target the primary of
-// the source shard. After upgrading the shard servers above, this RSM may be stale, so run
-// operations through the config server that will use the same RSM so it picks up the new primary.
-waitForConfigServerShardRSMRetarget(st);
-
-verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
-
-jsTestLog("Downgrading config servers.");
-st.upgradeCluster("last-stable",
- {upgradeConfigs: true, upgradeMongos: false, upgradeShards: false});
-checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
-
-verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
-
-st.stop();
-})();
diff --git a/jstests/multiVersion/libs/config_chunks_tags_shared.js b/jstests/multiVersion/libs/config_chunks_tags_shared.js
deleted file mode 100644
index 01355815401..00000000000
--- a/jstests/multiVersion/libs/config_chunks_tags_shared.js
+++ /dev/null
@@ -1,252 +0,0 @@
-/**
- * Functions and variables shared between multiversion/config_chunks_tags_upgrade_cluster.js and
- * multiversion/config_chunks_tags_downgrade_cluster.js.
- */
-
-// Sets up a collection with chunks in the format expected by the testChunkOperations() helper.
-function setUpCollectionForChunksTesting(st, ns) {
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: -50}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
-}
-
-// Sets up zones and chunks for a collection to work with the testZoneOperations() helper.
-function setUpCollectionForZoneTesting(st, ns) {
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard0.shardName, zone: "zone0"}));
- assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: "zone1"}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(st.s.adminCommand(
- {updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: "zone0"}));
- assert.commandWorked(
- st.s.adminCommand({updateZoneKeyRange: ns, min: {_id: 0}, max: {_id: 50}, zone: "zone1"}));
-}
-
-// Sets up a sharded collection with the given number of chunks and zones.
-function setUpCollectionWithManyChunksAndZones(st, ns, numChunks, numZones) {
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
-
- for (let i = 0; i < numChunks - 1; i++) {
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: i}}));
- }
-
- for (let i = 0; i < numZones; i++) {
- assert.commandWorked(
- st.s.adminCommand({addShardToZone: st.shard0.shardName, zone: "many_zones-" + i}));
- assert.commandWorked(st.s.adminCommand(
- {updateZoneKeyRange: ns, min: {_id: i}, max: {_id: i + 1}, zone: "many_zones-" + i}));
- }
-}
-
-function setUpExtraShardedCollections(st, dbName) {
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard1.shardName);
-
- // Set up one zone with half the key range and two chunks split at {_id: 0}.
- const ns = dbName + ".extra_coll";
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(
- st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: "extra_zone0"}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(st.s.adminCommand(
- {updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: "extra_zone0"}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard0.shardName}));
-
- // Set up a sharded collection with a hashed shard key.
- const hashedNs = dbName + ".extra_coll_hashed";
- assert.commandWorked(st.s.adminCommand({shardCollection: hashedNs, key: {_id: "hashed"}}));
-}
-
-function verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat}) {
- verifyChunks(st, {expectNewFormat});
- verifyTags(st, {expectNewFormat});
-
- testChunkOperations(st, chunkNs);
- testZoneOperations(st, zoneNs);
- verifyInitialChunks(st, dbName, {expectNewFormat});
-
- verifyChunks(st, {expectNewFormat});
- verifyTags(st, {expectNewFormat});
-}
-
-function getChunks(st, ns) {
- if (ns) {
- return st.s.getDB("config").chunks.find({ns}).sort({min: 1}).toArray();
- }
- return st.s.getDB("config").chunks.find().sort({min: 1}).toArray();
-}
-
-// Asserts all chunk documents have the expected format.
-function verifyChunks(st, {ns, expectNewFormat}) {
- const chunks = getChunks(st, ns);
- assert.lte(1, chunks.length, tojson(chunks));
- chunks.forEach((chunk) => {
- if (expectNewFormat) {
- assert(chunk._id.isObjectId, tojson(chunk));
- assert.neq("string", typeof chunk._id, tojson(chunk));
- } else {
- assert(!chunk._id.isObjectId, tojson(chunk));
- assert.eq("string", typeof chunk._id, tojson(chunk));
- }
-
- let expectedChunkFields =
- ["_id", "ns", "min", "max", "shard", "lastmod", "lastmodEpoch", "history"];
-
- // Jumbo is an optional field.
- if (chunk.hasOwnProperty("jumbo")) {
- expectedChunkFields = expectedChunkFields.concat("jumbo");
- }
-
- assert.eq(Object.keys(chunk).length, expectedChunkFields.length, tojson(chunk));
- assert.hasFields(chunk, expectedChunkFields);
- });
-}
-
-function getTags(st) {
- return st.s.getDB("config").tags.find().sort({min: 1}).toArray();
-}
-
-// Asserts all tag documents have the expected format.
-function verifyTags(st, {expectNewFormat}) {
- const tags = getTags(st);
- assert.lt(1, tags.length, tojson(tags));
- tags.forEach((tag) => {
- if (expectNewFormat) {
- assert(tag._id.isObjectId, tojson(tag));
- // ObjectId returns "object" from typeof...
- // assert.neq("object", typeof tag._id, tojson(tag));
- } else {
- assert(!tag._id.isObjectId, tojson(tag));
- assert.eq("object", typeof tag._id, tojson(tag));
- }
-
- const expectedTagFields = ["_id", "ns", "tag", "min", "max"];
- assert.eq(Object.keys(tag).length, expectedTagFields.length, tojson(tag));
- assert.hasFields(tag, expectedTagFields);
- });
-}
-
-// Runs basic crud operations against the given namespace.
-function testCRUDOperations(st, ns) {
- const coll = st.s.getCollection(ns);
- assert.eq(0, coll.find().itcount());
-
- assert.commandWorked(coll.insert({_id: -5}));
- assert.commandWorked(coll.insert({_id: 5}));
-
- assert.commandWorked(coll.update({_id: -5}, {$set: {updated: true}}));
- assert.commandWorked(coll.update({_id: 5}, {$set: {updated: true}}));
-
- assert.docEq({_id: -5, updated: true}, coll.findOne({_id: -5}));
- assert.docEq({_id: 5, updated: true}, coll.findOne({_id: 5}));
-
- assert.commandWorked(coll.remove({_id: -5}, true /* justOne */));
- assert.commandWorked(coll.remove({_id: 5}, true /* justOne */));
- assert.eq(0, coll.find().itcount());
-}
-
-// Helper to verify chunks are owned by the expected shards.
-function verifyChunkDistribution(st, ns, expectedChunkDistribution) {
- for (let i = 0; i < expectedChunkDistribution.length; i++) {
- assert.eq(expectedChunkDistribution[i],
- st.s.getDB("config").chunks.count({ns: ns, shard: st["shard" + i].shardName}),
- "unexpected number of chunks on shard " + i);
- }
-}
-
-// Assumes ns has the following chunk layout: [-inf, -50), [-50, 0) on shard0 and [0, inf) on
-// shard 1.
-function testChunkOperations(st, ns) {
- verifyChunkDistribution(st, ns, [2, 1]);
-
- // Split chunk should work.
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 50}}));
- verifyChunkDistribution(st, ns, [2, 2]);
-
- testCRUDOperations(st, ns);
-
- // Move chunk should work with a control chunk.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard0.shardName}));
- verifyChunkDistribution(st, ns, [3, 1]);
-
- testCRUDOperations(st, ns);
-
- // Move chunk should work without a control chunk.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 50}, to: st.shard0.shardName}));
- verifyChunkDistribution(st, ns, [4, 0]);
-
- testCRUDOperations(st, ns);
-
- // Merge chunk should work.
- assert.commandWorked(st.s.adminCommand({mergeChunks: ns, bounds: [{_id: -50}, {_id: 50}]}));
- verifyChunkDistribution(st, ns, [3, 0]);
-
- testCRUDOperations(st, ns);
-
- // Reset the chunks to their original state.
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- verifyChunkDistribution(st, ns, [4, 0]);
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 50}, to: st.shard1.shardName}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
- verifyChunkDistribution(st, ns, [2, 2]);
- assert.commandWorked(st.s.adminCommand({mergeChunks: ns, bounds: [{_id: 0}, {_id: MaxKey}]}));
- verifyChunkDistribution(st, ns, [2, 1]);
-
- testCRUDOperations(st, ns);
-}
-
-// Assumes ns has two chunks: [-inf, 0), [0, inf), on shards 0 and 1, respectively and that shard0
-// is in zone0 which contains [-inf, 0) and shard1 is in zone1 which contains [0, 50).
-function testZoneOperations(st, ns) {
- // Verify conflicting zones can't be created.
- assert.commandFailedWithCode(
- st.s.adminCommand({updateZoneKeyRange: ns, min: {_id: -10}, max: {_id: 0}, zone: "zone1"}),
- ErrorCodes.RangeOverlapConflict);
-
- // Verify zone boundaries are still enforced.
- assert.commandFailedWithCode(
- st.s.adminCommand({moveChunk: ns, find: {_id: -1}, to: st.shard1.shardName}),
- ErrorCodes.IllegalOperation);
-
- //
- // Verify zone ranges can be updated.
- //
-
- assert.commandWorked(
- st.s.adminCommand({updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: null}));
- assert.commandWorked(st.s.adminCommand(
- {updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: "zone1"}));
-
- // Now the chunk can be moved to shard1, which is in zone1.
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
-
- // Reset the chunk and zones.
- assert.commandWorked(
- st.s.adminCommand({updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: null}));
- assert.commandWorked(st.s.adminCommand(
- {updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: "zone0"}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard0.shardName}));
-}
-
-let uniqueCollCounter = 0;
-
-// Assumes ns is a non-existent namespace on a database that is sharding enabled.
-function verifyInitialChunks(st, dbName, {expectNewFormat}) {
- const ns = dbName + ".unique_coll" + uniqueCollCounter++;
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
-
- // Assert the chunks created for the new namespace are in the correct format.
- verifyChunks(st, {ns, expectNewFormat});
-
- // Clean up the new collection.
- assert.commandWorked(st.s.adminCommand({drop: ns}));
-}
diff --git a/jstests/multiVersion/map_reduce_multiversion_cluster.js b/jstests/multiVersion/map_reduce_multiversion_cluster.js
index 5b7379c92ae..493dfd536c3 100644
--- a/jstests/multiVersion/map_reduce_multiversion_cluster.js
+++ b/jstests/multiVersion/map_reduce_multiversion_cluster.js
@@ -1,5 +1,7 @@
// Verify that we can run various forms of the mapReduce command during different stages of the
// cluster upgrade process.
+//
+// @tags: [fix_for_fcv_46]
// Checking UUID consistency uses cached connections, which are not valid across restarts or
// stepdowns.
diff --git a/jstests/multiVersion/migration_between_mixed_version_mongods.js b/jstests/multiVersion/migration_between_mixed_version_mongods.js
index ef9b7623f6b..bedd0e628ef 100644
--- a/jstests/multiVersion/migration_between_mixed_version_mongods.js
+++ b/jstests/multiVersion/migration_between_mixed_version_mongods.js
@@ -3,6 +3,7 @@
// donor is the latest version and the recipient the last-stable, and vice versa.
// Migrations should be successful.
//
+// @tags: [fix_for_fcv_46]
// Checking UUID consistency involves talking to a shard node, which in this test is shutdown
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
diff --git a/jstests/multiVersion/minor_version_tags_old_new_old.js b/jstests/multiVersion/minor_version_tags_old_new_old.js
index 22ce71964f9..a5f587ece78 100644
--- a/jstests/multiVersion/minor_version_tags_old_new_old.js
+++ b/jstests/multiVersion/minor_version_tags_old_new_old.js
@@ -1,3 +1,4 @@
+// @tags: [fix_for_fcv_46]
(function() {
'use strict';
diff --git a/jstests/multiVersion/refine_collection_shard_key_fcv.js b/jstests/multiVersion/refine_collection_shard_key_fcv.js
deleted file mode 100644
index 1bdf5ca6461..00000000000
--- a/jstests/multiVersion/refine_collection_shard_key_fcv.js
+++ /dev/null
@@ -1,36 +0,0 @@
-// Verifies refineCollectionShardKey can only be run when a cluster's FCV is 4.4.
-
-(function() {
-"use strict";
-
-const dbName = "test";
-const collName = "foo";
-const ns = dbName + '.' + collName;
-
-const st = new ShardingTest({shards: 1});
-const configAdminDB = st.configRS.getPrimary().getDB("admin");
-
-assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
-assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
-
-// Create an index that can be used for the following shard key refines.
-assert.commandWorked(st.s.getCollection(ns).createIndex({_id: 1, x: 1, y: 1}));
-
-// Refining a shard key succeeds in FCV 4.4.
-checkFCV(configAdminDB, latestFCV);
-assert.commandWorked(st.s.adminCommand({refineCollectionShardKey: ns, key: {_id: 1, x: 1}}));
-
-// Refining a shard key fails in FCV 4.2.
-assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
-checkFCV(configAdminDB, lastStableFCV);
-assert.commandFailedWithCode(
- st.s.adminCommand({refineCollectionShardKey: ns, key: {_id: 1, x: 1, y: 1}}),
- ErrorCodes.CommandNotSupported);
-
-// Refining a shard key succeeds after upgrading back to FCV 4.4.
-assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-checkFCV(configAdminDB, latestFCV);
-assert.commandWorked(st.s.adminCommand({refineCollectionShardKey: ns, key: {_id: 1, x: 1, y: 1}}));
-
-st.stop();
-}());
diff --git a/jstests/sharding/mongos_get_shard_version.js b/jstests/sharding/mongos_get_shard_version.js
index 693dc7cf5af..9a7345e4b5f 100644
--- a/jstests/sharding/mongos_get_shard_version.js
+++ b/jstests/sharding/mongos_get_shard_version.js
@@ -38,22 +38,9 @@ res = st.s.adminCommand({getShardVersion: ns, fullMetadata: true});
assert.commandWorked(res);
assert.eq(res.version.t, 1);
assert.eq(res.version.i, 0);
-if (jsTestOptions().mongosBinVersion == "last-stable") {
- assert.eq(undefined, res.chunks);
-
- // The _id format for config.chunks documents was changed in 4.4, so in the mixed version suite
- // the below size arithmetic does not hold and splitting chunks will fail with BSONObjectTooBig.
- // A mongos with the last-stable binary does not support returning chunks in getShardVersion, so
- // we can just return early.
- //
- // TODO SERVER-44034: Remove this branch when 4.4 becomes last stable.
- st.stop();
- return;
-} else {
- assert.eq(1, res.chunks.length);
- assert.eq(min, res.chunks[0][0]);
- assert.eq(max, res.chunks[0][1]);
-}
+assert.eq(1, res.chunks.length);
+assert.eq(min, res.chunks[0][0]);
+assert.eq(max, res.chunks[0][1]);
// Split the existing chunks to create a large number of chunks (> 16MB).
// This needs to be done twice since the BSONObj size limit also applies
diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
index 35e77d35aa6..d9afca0aace 100644
--- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
+++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
@@ -50,7 +50,6 @@
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/active_migrations_registry.h"
-#include "mongo/db/s/active_shard_collection_registry.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/s/migration_util.h"
#include "mongo/db/s/sharding_state.h"
@@ -70,8 +69,6 @@ namespace {
MONGO_FAIL_POINT_DEFINE(featureCompatibilityDowngrade);
MONGO_FAIL_POINT_DEFINE(featureCompatibilityUpgrade);
-MONGO_FAIL_POINT_DEFINE(pauseBeforeDowngradingConfigMetadata); // TODO SERVER-44034: Remove.
-MONGO_FAIL_POINT_DEFINE(pauseBeforeUpgradingConfigMetadata); // TODO SERVER-44034: Remove.
MONGO_FAIL_POINT_DEFINE(failUpgrading);
MONGO_FAIL_POINT_DEFINE(failDowngrading);
@@ -225,12 +222,6 @@ public:
LOGV2(20500, "Upgrade: submitting orphaned ranges for cleanup");
migrationutil::submitOrphanRangesForCleanup(opCtx);
}
-
- // The primary shard sharding a collection will write the initial chunks for a
- // collection directly to the config server, so wait for all shard collections to
- // complete to guarantee no chunks are missed by the update on the config server.
- ActiveShardCollectionRegistry::get(opCtx).waitForActiveShardCollectionsToComplete(
- opCtx);
}
// Upgrade shards before config finishes its upgrade.
@@ -243,13 +234,6 @@ public:
cmdObj,
BSON(FeatureCompatibilityVersionCommandParser::kCommandName
<< requestedVersion)))));
-
- if (MONGO_unlikely(pauseBeforeUpgradingConfigMetadata.shouldFail())) {
- LOGV2(20501, "Hit pauseBeforeUpgradingConfigMetadata");
- pauseBeforeUpgradingConfigMetadata.pauseWhileSet(opCtx);
- }
- ShardingCatalogManager::get(opCtx)->upgradeOrDowngradeChunksAndTags(
- opCtx, ShardingCatalogManager::ConfigUpgradeType::kUpgrade);
}
FeatureCompatibilityVersion::unsetTargetUpgradeOrDowngrade(opCtx, requestedVersion);
@@ -326,12 +310,6 @@ public:
if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
LOGV2(20502, "Downgrade: dropping config.rangeDeletions collection");
migrationutil::dropRangeDeletionsCollection(opCtx);
-
- // The primary shard sharding a collection will write the initial chunks for a
- // collection directly to the config server, so wait for all shard collections to
- // complete to guarantee no chunks are missed by the update on the config server.
- ActiveShardCollectionRegistry::get(opCtx).waitForActiveShardCollectionsToComplete(
- opCtx);
} else if (isReplSet || serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
// The default rwc document should only be deleted on plain replica sets and the
// config server replica set, not on shards or standalones.
@@ -348,13 +326,6 @@ public:
cmdObj,
BSON(FeatureCompatibilityVersionCommandParser::kCommandName
<< requestedVersion)))));
-
- if (MONGO_unlikely(pauseBeforeDowngradingConfigMetadata.shouldFail())) {
- LOGV2(20503, "Hit pauseBeforeDowngradingConfigMetadata");
- pauseBeforeDowngradingConfigMetadata.pauseWhileSet(opCtx);
- }
- ShardingCatalogManager::get(opCtx)->upgradeOrDowngradeChunksAndTags(
- opCtx, ShardingCatalogManager::ConfigUpgradeType::kDowngrade);
}
FeatureCompatibilityVersion::unsetTargetUpgradeOrDowngrade(opCtx, requestedVersion);
diff --git a/src/mongo/db/s/active_shard_collection_registry.cpp b/src/mongo/db/s/active_shard_collection_registry.cpp
index 9b667370808..d2bda7ece20 100644
--- a/src/mongo/db/s/active_shard_collection_registry.cpp
+++ b/src/mongo/db/s/active_shard_collection_registry.cpp
@@ -139,23 +139,6 @@ Status ActiveShardCollectionRegistry::ActiveShardCollectionState::constructError
<< "collection with arguments: " << activeRequest.toBSON()};
}
-void ActiveShardCollectionRegistry::waitForActiveShardCollectionsToComplete(
- OperationContext* opCtx) {
- // Take a snapshot of the currently active shard collections.
- std::vector<SharedSemiFuture<boost::optional<UUID>>> shardCollectionFutures;
- {
- stdx::lock_guard<Latch> lk(_mutex);
- for (const auto& it : _activeShardCollectionMap) {
- shardCollectionFutures.emplace_back(it.second->_uuidPromise.getFuture());
- }
- }
-
- // Synchronously wait for all futures to resolve.
- for (const auto& fut : shardCollectionFutures) {
- fut.wait(opCtx);
- }
-}
-
ScopedShardCollection::ScopedShardCollection(std::string nss,
ActiveShardCollectionRegistry* registry,
bool shouldExecute,
diff --git a/src/mongo/db/s/active_shard_collection_registry.h b/src/mongo/db/s/active_shard_collection_registry.h
index ba932d455e2..91423d65d7c 100644
--- a/src/mongo/db/s/active_shard_collection_registry.h
+++ b/src/mongo/db/s/active_shard_collection_registry.h
@@ -74,14 +74,6 @@ public:
StatusWith<ScopedShardCollection> registerShardCollection(
const ShardsvrShardCollection& request);
- /**
- * Takes a snapshot of all currently active shard collections and synchronously waits for each
- * to complete.
- *
- * TODO SERVER-44034: Remove this method.
- */
- void waitForActiveShardCollectionsToComplete(OperationContext* opCtx);
-
private:
friend class ScopedShardCollection;
diff --git a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp
index e6bbec377eb..dbbcef17264 100644
--- a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp
+++ b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp
@@ -38,7 +38,6 @@
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/s/shard_key_util.h"
-#include "mongo/db/server_options.h"
#include "mongo/logv2/log.h"
#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/grid.h"
@@ -66,12 +65,6 @@ public:
"refineCollectionShardKey must be called with majority writeConcern",
opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority);
- uassert(ErrorCodes::CommandNotSupported,
- "'refineCollectionShardKey' is only supported in feature compatibility version "
- "4.4",
- serverGlobalParams.featureCompatibility.getVersion() ==
- ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44);
-
// Set the operation context read concern level to local for reads into the config
// database.
repl::ReadConcernArgs::get(opCtx) =
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index 6e8ae6c1700..33eb7c4c3b2 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -33,28 +33,19 @@
#include "mongo/db/s/config/sharding_catalog_manager.h"
-#include "mongo/db/auth/authorization_session_impl.h"
-#include "mongo/db/commands/txn_cmds_gen.h"
-#include "mongo/db/logical_session_cache.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/s/balancer/type_migration.h"
-#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/catalog/config_server_version.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_config_version.h"
-#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog/type_lockpings.h"
#include "mongo/s/catalog/type_locks.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog/type_tags.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/database_version_gen.h"
#include "mongo/s/grid.h"
-#include "mongo/s/write_ops/batched_command_request.h"
-#include "mongo/s/write_ops/batched_command_response.h"
-#include "mongo/transport/service_entry_point.h"
namespace mongo {
namespace {
@@ -65,75 +56,6 @@ const WriteConcernOptions kNoWaitWriteConcern(1, WriteConcernOptions::SyncMode::
const auto getShardingCatalogManager =
ServiceContext::declareDecoration<boost::optional<ShardingCatalogManager>>();
-OpMsg runCommandInLocalTxn(OperationContext* opCtx,
- StringData db,
- bool startTransaction,
- TxnNumber txnNumber,
- BSONObj cmdObj) {
- BSONObjBuilder bob(std::move(cmdObj));
- if (startTransaction) {
- bob.append("startTransaction", true);
- }
- bob.append("autocommit", false);
- bob.append(OperationSessionInfo::kTxnNumberFieldName, txnNumber);
-
- BSONObjBuilder lsidBuilder(bob.subobjStart("lsid"));
- opCtx->getLogicalSessionId()->serialize(&bob);
- lsidBuilder.doneFast();
-
- return OpMsg::parseOwned(
- opCtx->getServiceContext()
- ->getServiceEntryPoint()
- ->handleRequest(opCtx,
- OpMsgRequest::fromDBAndBody(db.toString(), bob.obj()).serialize())
- .response);
-}
-
-void insertDocumentsInLocalTxn(OperationContext* opCtx,
- const NamespaceString& nss,
- std::vector<BSONObj> docs,
- bool startTransaction,
- TxnNumber txnNumber) {
- BatchedCommandRequest request([&] {
- write_ops::Insert insertOp(nss);
- insertOp.setDocuments(std::move(docs));
- return insertOp;
- }());
-
- uassertStatusOK(getStatusFromWriteCommandReply(
- runCommandInLocalTxn(opCtx, nss.db(), startTransaction, txnNumber, request.toBSON()).body));
-}
-
-void removeDocumentsInLocalTxn(OperationContext* opCtx,
- const NamespaceString& nss,
- const BSONObj& query,
- bool startTransaction,
- TxnNumber txnNumber) {
- BatchedCommandRequest request([&] {
- write_ops::Delete deleteOp(nss);
- deleteOp.setDeletes({[&] {
- write_ops::DeleteOpEntry entry;
- entry.setQ(query);
- entry.setMulti(true);
- return entry;
- }()});
- return deleteOp;
- }());
-
- uassertStatusOK(getStatusFromWriteCommandReply(
- runCommandInLocalTxn(opCtx, nss.db(), startTransaction, txnNumber, request.toBSON()).body));
-}
-
-void commitLocalTxn(OperationContext* opCtx, TxnNumber txnNumber) {
- uassertStatusOK(
- getStatusFromCommandResult(runCommandInLocalTxn(opCtx,
- NamespaceString::kAdminDb,
- false /* startTransaction */,
- txnNumber,
- BSON(CommitTransaction::kCommandName << 1))
- .body));
-}
-
} // namespace
void ShardingCatalogManager::create(ServiceContext* serviceContext,
@@ -416,138 +338,6 @@ Lock::ExclusiveLock ShardingCatalogManager::lockZoneMutex(OperationContext* opCt
return lk;
}
-// TODO SERVER-44034: Remove this function.
-void deleteAndInsertChunk(OperationContext* opCtx,
- const BSONObj& chunkDoc,
- bool startTransaction,
- TxnNumber txnNumber,
- ShardingCatalogManager::ConfigUpgradeType upgradeType) {
- auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(chunkDoc));
-
- removeDocumentsInLocalTxn(
- opCtx,
- ChunkType::ConfigNS,
- BSON(ChunkType::ns(chunk.getNS().ns()) << ChunkType::min(chunk.getMin())),
- startTransaction,
- txnNumber);
-
- insertDocumentsInLocalTxn(
- opCtx,
- ChunkType::ConfigNS,
- {upgradeType == ShardingCatalogManager::ConfigUpgradeType::kUpgrade
- // Note that ChunkType::toConfigBSON() will not include an _id if one hasn't been set,
- // which will be the case for chunks written in the 4.2 format because parsing ignores
- // _ids in the 4.2 format, so the insert path will generate one for us.
- ? chunk.toConfigBSON()
- : chunk.toConfigBSONLegacyID()},
- false /* startTransaction */,
- txnNumber);
-}
-
-// TODO SERVER-44034: Remove this function.
-void deleteAndInsertTag(OperationContext* opCtx,
- const BSONObj& tagDoc,
- bool startTransaction,
- TxnNumber txnNumber,
- ShardingCatalogManager::ConfigUpgradeType upgradeType) {
- auto tag = uassertStatusOK(TagsType::fromBSON(tagDoc));
-
- removeDocumentsInLocalTxn(
- opCtx,
- TagsType::ConfigNS,
- BSON(TagsType::ns(tag.getNS().ns()) << TagsType::min(tag.getMinKey())),
- startTransaction,
- txnNumber);
-
- insertDocumentsInLocalTxn(opCtx,
- TagsType::ConfigNS,
- {upgradeType == ShardingCatalogManager::ConfigUpgradeType::kUpgrade
- // Note that TagsType::toBSON() will not include an _id, so the
- // insert path will generate one for us.
- ? tag.toBSON()
- : tag.toBSONLegacyID()},
- false /* startTransaction */,
- txnNumber);
-}
-
-// TODO SERVER-44034: Remove this function and type.
-using ConfigDocModFunction = std::function<void(
- OperationContext*, BSONObj, bool, TxnNumber, ShardingCatalogManager::ConfigUpgradeType)>;
-void forEachConfigDocInBatchedTransactions(OperationContext* opCtx,
- const NamespaceString& configNss,
- const NamespaceString& shardedCollNss,
- ConfigDocModFunction configDocModFn,
- ShardingCatalogManager::ConfigUpgradeType upgradeType) {
- const auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- auto findResponse = uassertStatusOK(
- configShard->exhaustiveFindOnConfig(opCtx,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- configNss,
- BSON("ns" << shardedCollNss.ns()),
- {},
- boost::none /* limit */));
-
- AlternativeSessionRegion asr(opCtx);
- AuthorizationSession::get(asr.opCtx()->getClient())
- ->grantInternalAuthorization(asr.opCtx()->getClient());
- TxnNumber txnNumber = 0;
-
- const auto batchSizeLimit = 100;
- auto currentBatchSize = 0;
- for (const auto& doc : findResponse.docs) {
- auto startTransaction = currentBatchSize == 0;
-
- configDocModFn(asr.opCtx(), doc, startTransaction, txnNumber, upgradeType);
-
- currentBatchSize += 1;
- if (currentBatchSize == batchSizeLimit) {
- commitLocalTxn(asr.opCtx(), txnNumber);
- txnNumber += 1;
- currentBatchSize = 0;
- }
- }
-
- if (currentBatchSize != 0) {
- commitLocalTxn(asr.opCtx(), txnNumber);
- }
-}
-
-void ShardingCatalogManager::upgradeOrDowngradeChunksAndTags(OperationContext* opCtx,
- ConfigUpgradeType upgradeType) {
- const auto grid = Grid::get(opCtx);
- auto allDbs = uassertStatusOK(grid->catalogClient()->getAllDBs(
- opCtx, repl::ReadConcernLevel::kLocalReadConcern))
- .value;
-
- // The 'config' database contains the sharded 'config.system.sessions' collection but does not
- // have an entry in config.databases.
- allDbs.emplace_back("config", ShardId("config"), true, DatabaseVersion());
-
- for (const auto& db : allDbs) {
- auto collections = uassertStatusOK(grid->catalogClient()->getCollections(
- opCtx, &db.getName(), nullptr, repl::ReadConcernLevel::kLocalReadConcern));
-
- for (const auto& coll : collections) {
- if (coll.getDropped()) {
- continue;
- }
-
- {
- Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
- forEachConfigDocInBatchedTransactions(
- opCtx, ChunkType::ConfigNS, coll.getNs(), deleteAndInsertChunk, upgradeType);
- }
-
- {
- Lock::ExclusiveLock lk(opCtx->lockState(), _kZoneOpLock);
- forEachConfigDocInBatchedTransactions(
- opCtx, TagsType::ConfigNS, coll.getNs(), deleteAndInsertTag, upgradeType);
- }
- }
- }
-}
-
StatusWith<bool> ShardingCatalogManager::_isShardRequiredByZoneStillInUse(
OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index ad32e5cfe82..8c327def29c 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -94,14 +94,6 @@ public:
~ShardingCatalogManager();
/**
- * Indicates the desired modification to the config.chunks and config.tags collections during
- * setFeatureCompatibilityVersion.
- *
- * TODO SERVER-44034: Remove this enum.
- */
- enum class ConfigUpgradeType { kUpgrade, kDowngrade };
-
- /**
* Instantiates an instance of the sharding catalog manager and installs it on the specified
* service context. This method is not thread-safe and must be called only once when the service
* is starting.
@@ -400,14 +392,6 @@ public:
*/
Status setFeatureCompatibilityVersionOnShards(OperationContext* opCtx, const BSONObj& cmdObj);
- /**
- * Changes the _id format of all documents in config.chunks and config.tags to use either the
- * format introduced in 4.4 or the format expected by a 4.2 binary.
- *
- * TODO SERVER-44034: Remove this method.
- */
- void upgradeOrDowngradeChunksAndTags(OperationContext* opCtx, ConfigUpgradeType upgradeType);
-
//
// For Diagnostics
//
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 10f8ff1ef36..1c2fb915e72 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -64,13 +64,6 @@ MONGO_FAIL_POINT_DEFINE(skipExpiringOldChunkHistory);
const WriteConcernOptions kNoWaitWriteConcern(1, WriteConcernOptions::SyncMode::UNSET, Seconds(0));
-bool isUpgradingOrDowngradingFCV() {
- return (serverGlobalParams.featureCompatibility.getVersion() ==
- ServerGlobalParams::FeatureCompatibility::Version::kUpgradingTo44) ||
- (serverGlobalParams.featureCompatibility.getVersion() ==
- ServerGlobalParams::FeatureCompatibility::Version::kDowngradingTo42);
-}
-
/**
* Append min, max and version information from chunk to the buffer for logChange purposes.
*/
@@ -87,8 +80,6 @@ void appendShortVersion(BufBuilder* out, const ChunkType& chunk) {
BSONArray buildMergeChunksTransactionUpdates(const std::vector<ChunkType>& chunksToMerge,
const ChunkVersion& mergeVersion,
const boost::optional<Timestamp>& validAfter) {
- invariant(!isUpgradingOrDowngradingFCV());
-
BSONArrayBuilder updates;
// Build an update operation to expand the first chunk into the newly merged chunk
@@ -109,18 +100,10 @@ BSONArray buildMergeChunksTransactionUpdates(const std::vector<ChunkType>& chunk
mergedChunk.setHistory({ChunkHistory(validAfter.get(), mergedChunk.getShard())});
// add the new chunk information as the update object
- if (serverGlobalParams.featureCompatibility.getVersion() >=
- ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
- op.append("o", mergedChunk.toConfigBSON());
+ op.append("o", mergedChunk.toConfigBSON());
- // query object
- op.append("o2", BSON(ChunkType::name(mergedChunk.getName())));
- } else {
- op.append("o", mergedChunk.toConfigBSONLegacyID());
-
- // query object
- op.append("o2", BSON(ChunkType::legacyName(mergedChunk.getLegacyName())));
- }
+ // query object
+ op.append("o2", BSON(ChunkType::name(mergedChunk.getName())));
updates.append(op.obj());
}
@@ -132,12 +115,7 @@ BSONArray buildMergeChunksTransactionUpdates(const std::vector<ChunkType>& chunk
op.append("op", "d");
op.append("ns", ChunkType::ConfigNS.ns());
- if (serverGlobalParams.featureCompatibility.getVersion() >=
- ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
- op.append("o", BSON(ChunkType::name(chunksToMerge[i].getName())));
- } else {
- op.append("o", BSON(ChunkType::legacyName(chunksToMerge[i].getLegacyName())));
- }
+ op.append("o", BSON(ChunkType::name(chunksToMerge[i].getName())));
updates.append(op.obj());
}
@@ -198,8 +176,8 @@ Status checkChunkMatchesRequest(OperationContext* opCtx,
const auto currentChunk =
uassertStatusOK(ChunkType::fromConfigBSON(findResponseWith.getValue().docs.front()));
- // In the FCV 4.4 protocol, additionally check that the chunk's version matches what's in the
- // request.
+ // In the FCV 4.4 protocol, additionally check that the chunk's version matches what's in
+ // the request.
if (serverGlobalParams.featureCompatibility.getVersion() ==
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
uassert(ErrorCodes::ConflictingOperationInProgress,
@@ -227,8 +205,6 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
const boost::optional<ChunkType>& controlChunk,
StringData fromShard,
StringData toShard) {
- invariant(!isUpgradingOrDowngradingFCV());
-
// Update migratedChunk's version and shard.
BSONArrayBuilder updates;
{
@@ -238,12 +214,7 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
op.append("ns", ChunkType::ConfigNS.ns());
BSONObjBuilder n(op.subobjStart("o"));
- if (serverGlobalParams.featureCompatibility.getVersion() >=
- ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
- n.append(ChunkType::name(), migratedChunk.getName());
- } else {
- n.append(ChunkType::legacyName(), ChunkType::genLegacyID(nss, migratedChunk.getMin()));
- }
+ n.append(ChunkType::name(), migratedChunk.getName());
migratedChunk.getVersion().appendLegacyWithField(&n, ChunkType::lastmod());
n.append(ChunkType::ns(), nss.ns());
n.append(ChunkType::min(), migratedChunk.getMin());
@@ -253,12 +224,7 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
n.done();
BSONObjBuilder q(op.subobjStart("o2"));
- if (serverGlobalParams.featureCompatibility.getVersion() >=
- ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
- q.append(ChunkType::name(), migratedChunk.getName());
- } else {
- n.append(ChunkType::legacyName(), ChunkType::genLegacyID(nss, migratedChunk.getMin()));
- }
+ q.append(ChunkType::name(), migratedChunk.getName());
q.done();
updates.append(op.obj());
@@ -272,12 +238,7 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
op.append("ns", ChunkType::ConfigNS.ns());
BSONObjBuilder n(op.subobjStart("o"));
- if (serverGlobalParams.featureCompatibility.getVersion() >=
- ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
- n.append(ChunkType::name(), controlChunk->getName());
- } else {
- n.append(ChunkType::legacyName(), ChunkType::genLegacyID(nss, controlChunk->getMin()));
- }
+ n.append(ChunkType::name(), controlChunk->getName());
controlChunk->getVersion().appendLegacyWithField(&n, ChunkType::lastmod());
n.append(ChunkType::ns(), nss.ns());
n.append(ChunkType::min(), controlChunk->getMin());
@@ -288,20 +249,15 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
n.done();
BSONObjBuilder q(op.subobjStart("o2"));
- if (serverGlobalParams.featureCompatibility.getVersion() >=
- ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
- q.append(ChunkType::name(), controlChunk->getName());
- } else {
- q.append(ChunkType::legacyName(), controlChunk->getLegacyName());
- }
+ q.append(ChunkType::name(), controlChunk->getName());
q.done();
updates.append(op.obj());
}
- // Do not give applyOps a write concern. If applyOps tries to wait for replication, it will fail
- // because of the GlobalWrite lock CommitChunkMigration already holds. Replication will not be
- // able to take the lock it requires.
+ // Do not give applyOps a write concern. If applyOps tries to wait for replication, it will
+ // fail because of the GlobalWrite lock CommitChunkMigration already holds. Replication will
+ // not be able to take the lock it requires.
return BSON("applyOps" << updates.arr());
}
@@ -367,14 +323,6 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
// move chunks on different collections to proceed in parallel
Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
- // The _id format for chunk documents changed in 4.4, so during an upgrade or downgrade it is
- // not known which format the chunks are currently in. Splitting a chunk requires knowing the
- // _id of the chunk being split, so to avoid confusing failures, splitting is disabled.
- uassert(ErrorCodes::ConflictingOperationInProgress,
- "Chunks cannot be split while a feature compatibility version upgrade or downgrade is "
- "in progress",
- !isUpgradingOrDowngradingFCV());
-
// Get the max chunk version for this namespace.
auto swCollVersion = getMaxChunkVersionFromQueryResponse(
nss,
@@ -444,7 +392,6 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
auto shouldTakeOriginalChunkID = true;
OID chunkID;
- std::string legacyChunkID;
BSONArrayBuilder updates;
@@ -484,13 +431,7 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
// First chunk takes ID of the original chunk and all other chunks get new IDs. This occurs
// because we perform an update operation below (with upsert true). Keeping the original ID
// ensures we overwrite the old chunk (before the split) without having to perform a delete.
- if (serverGlobalParams.featureCompatibility.getVersion() >=
- ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
- chunkID = shouldTakeOriginalChunkID ? origChunk.getValue().getName() : OID::gen();
- } else {
- legacyChunkID = shouldTakeOriginalChunkID ? origChunk.getValue().getLegacyName()
- : ChunkType::genLegacyID(nss, startKey);
- }
+ chunkID = shouldTakeOriginalChunkID ? origChunk.getValue().getName() : OID::gen();
shouldTakeOriginalChunkID = false;
// build an update operation against the chunks collection of the config database
@@ -502,12 +443,7 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
// add the modified (new) chunk information as the update object
BSONObjBuilder n(op.subobjStart("o"));
- if (serverGlobalParams.featureCompatibility.getVersion() >=
- ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
- n.append(ChunkType::name(), chunkID);
- } else {
- n.append(ChunkType::legacyName(), legacyChunkID);
- }
+ n.append(ChunkType::name(), chunkID);
currentMaxVersion.appendLegacyWithField(&n, ChunkType::lastmod());
n.append(ChunkType::ns(), nss.ns());
n.append(ChunkType::min(), startKey);
@@ -520,12 +456,7 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
// add the chunk's _id as the query part of the update statement
BSONObjBuilder q(op.subobjStart("o2"));
- if (serverGlobalParams.featureCompatibility.getVersion() >=
- ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
- q.append(ChunkType::name(), chunkID);
- } else {
- q.append(ChunkType::legacyName(), legacyChunkID);
- }
+ q.append(ChunkType::name(), chunkID);
q.done();
updates.append(op.obj());
@@ -620,14 +551,6 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
// move chunks on different collections to proceed in parallel
Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
- // The _id format for chunk documents changed in 4.4, so during an upgrade or downgrade it is
- // not known which format the chunks are currently in. Merging a chunk requires knowing the
- // _id of the chunks being merged, so to avoid confusing failures, merging is disabled.
- uassert(ErrorCodes::ConflictingOperationInProgress,
- "Chunks cannot be merged while a feature compatibility version upgrade or downgrade is "
- "in progress",
- !isUpgradingOrDowngradingFCV());
-
if (!validAfter) {
return {ErrorCodes::IllegalOperation, "chunk operation requires validAfter timestamp"};
}
@@ -682,10 +605,7 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
return itOrigChunk.getStatus();
}
- if (serverGlobalParams.featureCompatibility.getVersion() >=
- ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
- itChunk.setName(itOrigChunk.getValue().getName());
- }
+ itChunk.setName(itOrigChunk.getValue().getName());
// Ensure the chunk boundaries are strictly increasing
if (chunkBoundaries[i].woCompare(itChunk.getMin()) <= 0) {
@@ -779,14 +699,6 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
// (Note: This is not needed while we have a global lock, taken here only for consistency.)
Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
- // The _id format for chunk documents changed in 4.4, so during an upgrade or downgrade it is
- // not known which format the chunks are currently in. Moving a chunk requires knowing the
- // _id of the chunks being moved, so to avoid confusing failures, migrations are disabled.
- uassert(ErrorCodes::ConflictingOperationInProgress,
- "Chunks cannot be migrated while a feature compatibility version upgrade or downgrade "
- "is in progress",
- !isUpgradingOrDowngradingFCV());
-
if (!validAfter) {
return {ErrorCodes::IllegalOperation, "chunk operation requires validAfter timestamp"};
}
@@ -857,10 +769,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
// Generate the new versions of migratedChunk and controlChunk. Migrating chunk's minor version
// will be 0.
ChunkType newMigratedChunk = migratedChunk;
- if (serverGlobalParams.featureCompatibility.getVersion() >=
- ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
- newMigratedChunk.setName(origChunk.getValue().getName());
- }
+ newMigratedChunk.setName(origChunk.getValue().getName());
newMigratedChunk.setShard(toShard);
newMigratedChunk.setVersion(ChunkVersion(
currentCollectionVersion.majorVersion() + 1, 0, currentCollectionVersion.epoch()));
@@ -904,10 +813,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
}
newControlChunk = origControlChunk.getValue();
- if (serverGlobalParams.featureCompatibility.getVersion() >=
- ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
- newControlChunk->setName(origControlChunk.getValue().getName());
- }
+ newControlChunk->setName(origControlChunk.getValue().getName());
newControlChunk->setVersion(ChunkVersion(
currentCollectionVersion.majorVersion() + 1, 1, currentCollectionVersion.epoch()));
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
index 2c199a69337..712662776d7 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
@@ -37,7 +37,6 @@
#include "mongo/client/read_preference.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/s/balancer/balancer_policy.h"
-#include "mongo/db/server_options.h"
#include "mongo/db/write_concern_options.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_collection.h"
@@ -332,11 +331,6 @@ Status ShardingCatalogManager::assignKeyRangeToZone(OperationContext* opCtx,
BSONObj updateQuery(BSON(TagsType::ns(nss.ns()) << TagsType::min(fullShardKeyRange.getMin())));
BSONObjBuilder updateBuilder;
- if (serverGlobalParams.featureCompatibility.getVersion() <=
- ServerGlobalParams::FeatureCompatibility::Version::kDowngradingTo42) {
- updateBuilder.append(
- "_id", BSON(TagsType::ns(nss.ns()) << TagsType::min(fullShardKeyRange.getMin())));
- }
updateBuilder.append(TagsType::ns(), nss.ns());
updateBuilder.append(TagsType::min(), fullShardKeyRange.getMin());
updateBuilder.append(TagsType::max(), fullShardKeyRange.getMax());
diff --git a/src/mongo/db/s/shardsvr_shard_collection.cpp b/src/mongo/db/s/shardsvr_shard_collection.cpp
index 2bbf98f14b5..005c82005bb 100644
--- a/src/mongo/db/s/shardsvr_shard_collection.cpp
+++ b/src/mongo/db/s/shardsvr_shard_collection.cpp
@@ -76,7 +76,6 @@ MONGO_FAIL_POINT_DEFINE(pauseShardCollectionBeforeCriticalSection);
MONGO_FAIL_POINT_DEFINE(pauseShardCollectionReadOnlyCriticalSection);
MONGO_FAIL_POINT_DEFINE(pauseShardCollectionCommitPhase);
MONGO_FAIL_POINT_DEFINE(pauseShardCollectionAfterCriticalSection);
-MONGO_FAIL_POINT_DEFINE(pauseShardCollectionBeforeReturning);
struct ShardCollectionTargetState {
UUID uuid;
@@ -434,12 +433,7 @@ void writeFirstChunksToConfig(OperationContext* opCtx,
std::vector<BSONObj> chunkObjs;
chunkObjs.reserve(initialChunks.chunks.size());
for (const auto& chunk : initialChunks.chunks) {
- if (serverGlobalParams.featureCompatibility.getVersion() >=
- ServerGlobalParams::FeatureCompatibility::Version::kUpgradingTo44) {
- chunkObjs.push_back(chunk.toConfigBSON());
- } else {
- chunkObjs.push_back(chunk.toConfigBSONLegacyID());
- }
+ chunkObjs.push_back(chunk.toConfigBSON());
}
Grid::get(opCtx)->catalogClient()->insertConfigDocumentsAsRetryableWrite(
@@ -691,11 +685,6 @@ public:
str::stream() << "Collection " << nss << " is sharded without UUID",
uuid);
- if (MONGO_unlikely(pauseShardCollectionBeforeReturning.shouldFail())) {
- LOGV2(22102, "Hit pauseShardCollectionBeforeReturning");
- pauseShardCollectionBeforeReturning.pauseWhileSet(opCtx);
- }
-
scopedShardCollection.emplaceUUID(uuid);
}
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index b9736e478b4..9e78e767649 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -47,7 +47,6 @@ const NamespaceString ChunkType::ConfigNS("config.chunks");
const std::string ChunkType::ShardNSPrefix = "config.cache.chunks.";
const BSONField<OID> ChunkType::name("_id");
-const BSONField<std::string> ChunkType::legacyName("_id");
const BSONField<BSONObj> ChunkType::minShardID("_id");
const BSONField<std::string> ChunkType::ns("ns");
const BSONField<BSONObj> ChunkType::min("min");
@@ -216,14 +215,9 @@ StatusWith<ChunkType> ChunkType::parseFromConfigBSONCommand(const BSONObj& sourc
Status status = bsonExtractOIDField(source, name.name(), &chunkID);
if (status.isOK()) {
chunk._id = chunkID;
- } else if (status == ErrorCodes::NoSuchKey || status == ErrorCodes::TypeMismatch) {
+ } else if (status == ErrorCodes::NoSuchKey) {
// Ignore NoSuchKey because when chunks are sent in commands they are not required to
// include it.
- //
- // Ignore TypeMismatch for compatibility with binaries 4.2 and earlier, since the _id
- // type was changed from string to OID.
- //
- // TODO SERVER-44034: Stop ignoring TypeMismatch.
} else {
return status;
}
@@ -308,11 +302,6 @@ StatusWith<ChunkType> ChunkType::fromConfigBSON(const BSONObj& source) {
Status status = bsonExtractOIDField(source, name.name(), &chunkID);
if (status.isOK()) {
chunk._id = chunkID;
- } else if (status == ErrorCodes::TypeMismatch) {
- // The format of _id changed between 4.2 and 4.4 so for compatibility with chunks
- // created in earlier versions we ignore TypeMismatch.
- //
- // TODO SERVER-44034: Stop ignoring TypeMismatch.
} else {
return status;
}
@@ -342,26 +331,6 @@ BSONObj ChunkType::toConfigBSON() const {
return builder.obj();
}
-BSONObj ChunkType::toConfigBSONLegacyID() const {
- BSONObjBuilder builder;
- if (_nss && _min)
- builder.append(name.name(), genLegacyID(*_nss, *_min));
- if (_nss)
- builder.append(ns.name(), getNS().ns());
- if (_min)
- builder.append(min.name(), getMin());
- if (_max)
- builder.append(max.name(), getMax());
- if (_shard)
- builder.append(shard.name(), getShard().toString());
- if (_version)
- _version->appendLegacyWithField(&builder, ChunkType::lastmod());
- if (_jumbo)
- builder.append(jumbo.name(), getJumbo());
- addHistoryToBSON(builder);
- return builder.obj();
-}
-
StatusWith<ChunkType> ChunkType::fromShardBSON(const BSONObj& source, const OID& epoch) {
ChunkType chunk;
@@ -547,22 +516,4 @@ std::string ChunkType::toString() const {
return toConfigBSON().toString();
}
-std::string ChunkType::genLegacyID(const NamespaceString& nss, const BSONObj& o) {
- StringBuilder buf;
- buf << nss.ns() << "-";
-
- BSONObjIterator i(o);
- while (i.more()) {
- BSONElement e = i.next();
- buf << e.fieldName() << "_" << e.toString(false, true);
- }
-
- return buf.str();
-}
-
-std::string ChunkType::getLegacyName() const {
- invariant(_nss && _min);
- return genLegacyID(*_nss, *_min);
-}
-
} // namespace mongo
diff --git a/src/mongo/s/catalog/type_chunk.h b/src/mongo/s/catalog/type_chunk.h
index 2e64e86d3dc..3e602cb191c 100644
--- a/src/mongo/s/catalog/type_chunk.h
+++ b/src/mongo/s/catalog/type_chunk.h
@@ -194,7 +194,6 @@ public:
// Field names and types in the chunks collections.
static const BSONField<OID> name;
- static const BSONField<std::string> legacyName; // TODO SERVER-44034: Remove legacyName.
static const BSONField<BSONObj> minShardID;
static const BSONField<std::string> ns;
static const BSONField<BSONObj> min;
@@ -225,14 +224,6 @@ public:
BSONObj toConfigBSON() const;
/**
- * Returns the BSON representation of the entry for the config server's config.chunks
- * collection using the _id format expected by binaries in 4.2 and earlier.
- *
- * TODO SERVER-44034: Remove when 4.4 becomes last-stable.
- */
- BSONObj toConfigBSONLegacyID() const;
-
- /**
* Constructs a new ChunkType object from BSON that has a shard server's config.chunks.<epoch>
* collection format.
*
@@ -241,26 +232,11 @@ public:
static StatusWith<ChunkType> fromShardBSON(const BSONObj& source, const OID& epoch);
/**
- * Generates the chunk id that would be expected in binaries 4.2 and earlier based on the
- * namespace and lower chunk bound.
- *
- * TODO SERVER-44034: Remove when 4.4 becomes last-stable.
- */
- static std::string genLegacyID(const NamespaceString& nss, const BSONObj& o);
-
- /**
* Returns the BSON representation of the entry for a shard server's config.chunks.<epoch>
* collection.
*/
BSONObj toShardBSON() const;
- /**
- * Returns the _id that would be used for this chunk in binaries 4.2 and earlier.
- *
- * TODO SERVER-44034: Remove when 4.4 becomes last-stable.
- */
- std::string getLegacyName() const;
-
const OID& getName() const;
void setName(const OID& id);
diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp
index aeabb3749c5..42f7b43264d 100644
--- a/src/mongo/s/catalog/type_chunk_test.cpp
+++ b/src/mongo/s/catalog/type_chunk_test.cpp
@@ -267,69 +267,5 @@ TEST(ChunkRange, MinGreaterThanMaxShouldError) {
ASSERT_EQ(ErrorCodes::FailedToParse, parseStatus.getStatus());
}
-// TODO SERVER-44034: Delete this test.
-TEST(ChunkType, FromConfigBSONParsesIgnores42_idFormat) {
- NamespaceString nss("test.mycol");
- auto minBound = BSON("a" << 10);
- ChunkVersion chunkVersion(1, 2, OID::gen());
-
- BSONObj obj = BSON("_id" << ChunkType::genLegacyID(nss, minBound) << ChunkType::ns(nss.ns())
- << ChunkType::min(minBound) << ChunkType::max(BSON("a" << 20))
- << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch"
- << chunkVersion.epoch() << ChunkType::shard("shard0001"));
-
- // Parsing will succeed despite the string _id.
- auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(obj));
-
- // Attempting to get the 4.4 _id will throw since it hasn't been set.
- ASSERT_THROWS_CODE(chunk.getName(), AssertionException, 51264);
-}
-
-// TODO SERVER-44034: Delete this test.
-TEST(ChunkType, LegacyNameBSONFieldIs_id) {
- auto obj = BSON(ChunkType::legacyName("dummyId"));
- ASSERT_BSONOBJ_EQ(obj,
- BSON("_id"
- << "dummyId"));
-}
-
-// TODO SERVER-44034: Delete this test.
-TEST(ChunkType, GetLegacyNameAndGenLegacyIDReturn42_idFormat) {
- NamespaceString nss("test.mycol");
- auto minBound = BSON("a" << 10);
- ChunkVersion chunkVersion(1, 2, OID::gen());
-
- BSONObj obj =
- BSON(ChunkType::name(OID::gen())
- << ChunkType::ns(nss.ns()) << ChunkType::min(minBound)
- << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
- auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(obj));
-
- ASSERT_EQ("test.mycol-a_10", ChunkType::genLegacyID(nss, minBound));
- ASSERT_EQ(ChunkType::genLegacyID(nss, minBound), chunk.getLegacyName());
-}
-
-// TODO SERVER-44034: Delete this test.
-TEST(ChunkType, ToConfigBSONLegacyIDUses42_idFormat) {
- NamespaceString nss("test.mycol");
- auto minBound = BSON("a" << 10);
- ChunkVersion chunkVersion(1, 2, OID::gen());
-
- BSONObj obj =
- BSON(ChunkType::name(OID::gen())
- << ChunkType::ns(nss.ns()) << ChunkType::min(minBound)
- << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
- auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(obj));
-
- ASSERT_BSONOBJ_EQ(chunk.toConfigBSONLegacyID(),
- BSON("_id" << ChunkType::genLegacyID(nss, minBound)
- << ChunkType::ns("test.mycol") << ChunkType::min(minBound)
- << ChunkType::max(BSON("a" << 20)) << ChunkType::shard("shard0001")
- << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch"
- << chunkVersion.epoch()));
-}
-
} // namespace
} // namespace mongo
diff --git a/src/mongo/s/catalog/type_tags.cpp b/src/mongo/s/catalog/type_tags.cpp
index 9f21a2a5dfa..4f949c5b754 100644
--- a/src/mongo/s/catalog/type_tags.cpp
+++ b/src/mongo/s/catalog/type_tags.cpp
@@ -147,13 +147,6 @@ BSONObj TagsType::toBSON() const {
return builder.obj();
}
-BSONObj TagsType::toBSONLegacyID() const {
- // Note that toBSON() doesn't append an _id.
- BSONObjBuilder bob(toBSON());
- bob.append("_id", BSON(TagsType::ns(_ns->ns()) << TagsType::min(*_minKey)));
- return bob.obj();
-}
-
std::string TagsType::toString() const {
return toBSON().toString();
}
diff --git a/src/mongo/s/catalog/type_tags.h b/src/mongo/s/catalog/type_tags.h
index 6e1f3e33dc1..d69d9eeb057 100644
--- a/src/mongo/s/catalog/type_tags.h
+++ b/src/mongo/s/catalog/type_tags.h
@@ -81,14 +81,6 @@ public:
BSONObj toBSON() const;
/**
- * Returns the BSON representation of the tag with an _id in the format expected by binaries 4.2
- * and below.
- *
- * TODO SERVER-44034: Remove this method.
- */
- BSONObj toBSONLegacyID() const;
-
- /**
* Returns a std::string representation of the current internal state.
*/
std::string toString() const;
diff --git a/src/mongo/s/catalog/type_tags_test.cpp b/src/mongo/s/catalog/type_tags_test.cpp
index e78f6eedeea..1cd8ed6d276 100644
--- a/src/mongo/s/catalog/type_tags_test.cpp
+++ b/src/mongo/s/catalog/type_tags_test.cpp
@@ -129,18 +129,4 @@ TEST(TagsType, BadType) {
ASSERT_EQUALS(ErrorCodes::NoSuchKey, status.getStatus());
}
-TEST(TagsType, ToBSONLegacyID) {
- BSONObj obj =
- BSON(TagsType::ns("test.mycol") << TagsType::tag("tag") << TagsType::min(BSON("a" << 10))
- << TagsType::max(BSON("a" << 20)));
-
- auto tag = uassertStatusOK(TagsType::fromBSON(obj));
-
- ASSERT_BSONOBJ_EQ(tag.toBSONLegacyID(),
- BSON(TagsType::ns("test.mycol")
- << TagsType::tag("tag") << TagsType::min(BSON("a" << 10))
- << TagsType::max(BSON("a" << 20)) << "_id"
- << BSON(TagsType::ns("test.mycol") << TagsType::min(BSON("a" << 10)))));
-}
-
} // namespace