summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/multiVersion/config_chunks_tags_set_fcv.js142
-rw-r--r--jstests/multiVersion/config_chunks_tags_upgrade_downgrade_cluster.js124
-rw-r--r--jstests/multiVersion/libs/config_chunks_tags_shared.js236
-rw-r--r--jstests/sharding/libs/sharded_transactions_helpers.js4
-rw-r--r--jstests/sharding/mongos_get_shard_version.js9
-rw-r--r--src/mongo/db/commands/set_feature_compatibility_version_command.cpp31
-rw-r--r--src/mongo/db/s/active_shard_collection_registry.cpp17
-rw-r--r--src/mongo/db/s/active_shard_collection_registry.h8
-rw-r--r--src/mongo/db/s/config/initial_split_policy.cpp1
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp245
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h22
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp126
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp6
-rw-r--r--src/mongo/db/s/shardsvr_shard_collection.cpp15
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp72
-rw-r--r--src/mongo/s/catalog/type_chunk.h34
-rw-r--r--src/mongo/s/catalog/type_chunk_test.cpp80
-rw-r--r--src/mongo/s/catalog/type_tags.cpp7
-rw-r--r--src/mongo/s/catalog/type_tags.h8
-rw-r--r--src/mongo/s/catalog/type_tags_test.cpp14
20 files changed, 1151 insertions, 50 deletions
diff --git a/jstests/multiVersion/config_chunks_tags_set_fcv.js b/jstests/multiVersion/config_chunks_tags_set_fcv.js
new file mode 100644
index 00000000000..cc58a6c8faf
--- /dev/null
+++ b/jstests/multiVersion/config_chunks_tags_set_fcv.js
@@ -0,0 +1,142 @@
+/**
+ * Tests that config.chunks and config.tags documents are correctly modified on FCV
+ * upgrade/downgrade.
+ */
+(function() {
+"use strict";
+
+load("jstests/libs/parallelTester.js"); // for Thread.
+load("jstests/multiVersion/libs/config_chunks_tags_shared.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+// Assumes ns has the following chunk layout: [-inf, -50), [-50, 0) on shard0 and [0, inf) on
+// shard 1.
+function verifyChunkOperationsFailDuringSetFCV(st, ns) {
+ assert.commandFailedWithCode(st.s.adminCommand({split: ns, middle: {_id: 50}}),
+ ErrorCodes.ConflictingOperationInProgress);
+ verifyChunkDistribution(st, ns, [2, 1]);
+
+ assert.commandFailedWithCode(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard0.shardName}),
+ ErrorCodes.ConflictingOperationInProgress);
+ verifyChunkDistribution(st, ns, [2, 1]);
+
+ assert.commandFailedWithCode(
+ st.s.adminCommand({mergeChunks: ns, bounds: [{_id: MinKey}, {_id: 0}]}),
+ ErrorCodes.ConflictingOperationInProgress);
+ verifyChunkDistribution(st, ns, [2, 1]);
+}
+
+// Assumes shard0 is in zone0 which contains [-inf, 0) and is not in zone1.
+function verifyZoneOperationsSucceedDuringSetFCV(st, ns) {
+ assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: null}));
+ assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: "zone0"}));
+
+ assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard0.shardName, zone: "zone1"}));
+ assert.commandWorked(
+ st.s.adminCommand({removeShardFromZone: st.shard0.shardName, zone: "zone1"}));
+}
+
+const dbName = "test";
+const chunkNs = dbName + ".chunk_coll";
+const zoneNs = dbName + ".zone_coll";
+
+const st = new ShardingTest({shards: 2});
+const configPrimary = st.configRS.getPrimary();
+
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+
+setUpCollectionForChunksTesting(st, chunkNs);
+setUpCollectionForZoneTesting(st, zoneNs);
+
+//
+// Verify chunk and tag documents are updated by setFeatureCompatibilityVersion.
+//
+
+checkFCV(configPrimary.getDB("admin"), latestFCV);
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: true});
+
+jsTestLog("Downgrading FCV to last stable");
+assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+checkFCV(configPrimary.getDB("admin"), lastStableFCV);
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
+
+jsTestLog("Upgrading FCV to latest");
+assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+checkFCV(configPrimary.getDB("admin"), latestFCV);
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: true});
+
+//
+// Verify operations during setFeatureCompabitilityVersion use the correct format and that setFCV
+// blocks behind in-progress shard collections on shard servers.
+//
+
+function runInProgressSetFCVTest(st, {initialFCV, desiredFCV}) {
+ const pauseInSetFCVFailPointName = desiredFCV === lastStableFCV
+ ? "pauseBeforeDowngradingConfigMetadata"
+ : "pauseBeforeUpgradingConfigMetadata";
+
+ clearRawMongoProgramOutput();
+ checkFCV(configPrimary.getDB("admin"), initialFCV);
+
+ // Pause setFCV to test the in-progress states.
+ assert.commandWorked(configPrimary.adminCommand(
+ {configureFailPoint: pauseInSetFCVFailPointName, mode: "alwaysOn"}));
+
+ // Start and pause a shard collection, and verify that the setFCV blocks behind it.
+ const shardCollDuringSetFCV = new Thread((host, ns) => {
+ const mongosConn = new Mongo(host);
+ return mongosConn.adminCommand({shardCollection: ns, key: {_id: 1}});
+ }, st.s.host, dbName + ".setFCVTo" + desiredFCV);
+ assert.commandWorked(st.rs0.getPrimary().adminCommand(
+ {configureFailPoint: "pauseShardCollectionBeforeReturning", mode: "alwaysOn"}));
+ shardCollDuringSetFCV.start();
+ waitForFailpoint("Hit pauseShardCollectionBeforeReturning", 1 /* numTimes */);
+
+ // Assert setFCV can't hit the failpoint until the shard collection completes.
+ const changeFCV = new Thread((host, fcv) => {
+ const mongosConn = new Mongo(host);
+ return mongosConn.adminCommand({setFeatureCompatibilityVersion: fcv});
+ }, st.s.host, desiredFCV);
+ changeFCV.start();
+ assert.throws(() => {
+ waitForFailpoint("Hit " + pauseInSetFCVFailPointName, 1 /* numTimes */, 3000 /* timeout */);
+ });
+
+ // Unpause the shard collection and wait for setFCV to reach the failpoint.
+ assert.commandWorked(st.rs0.getPrimary().adminCommand(
+ {configureFailPoint: "pauseShardCollectionBeforeReturning", mode: "off"}));
+ shardCollDuringSetFCV.join();
+ waitForFailpoint("Hit " + pauseInSetFCVFailPointName, 1 /* numTimes */);
+
+ // Verify behavior while setFCV is in progress.
+ verifyChunkOperationsFailDuringSetFCV(st, chunkNs);
+ verifyZoneOperationsSucceedDuringSetFCV(st, zoneNs);
+ testCRUDOperations(st, chunkNs);
+ testCRUDOperations(st, zoneNs);
+
+ // A collection can still be sharded during setFCV and should write chunks correctly.
+ verifyInitialChunks(
+ st, dbName + ".newCollDuringFCV" + desiredFCV, {expectNewFormat: desiredFCV === latestFCV});
+
+ // Unset the setFCV failpoint and allow setFCV to finish.
+ assert.commandWorked(
+ configPrimary.adminCommand({configureFailPoint: pauseInSetFCVFailPointName, mode: "off"}));
+ changeFCV.join();
+ assert.commandWorked(changeFCV.returnData());
+ checkFCV(configPrimary.getDB("admin"), desiredFCV);
+
+ verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: desiredFCV === latestFCV});
+}
+
+runInProgressSetFCVTest(st, {initialFCV: latestFCV, desiredFCV: lastStableFCV});
+runInProgressSetFCVTest(st, {initialFCV: lastStableFCV, desiredFCV: latestFCV});
+
+st.stop();
+}());
diff --git a/jstests/multiVersion/config_chunks_tags_upgrade_downgrade_cluster.js b/jstests/multiVersion/config_chunks_tags_upgrade_downgrade_cluster.js
new file mode 100644
index 00000000000..75a024b2bf5
--- /dev/null
+++ b/jstests/multiVersion/config_chunks_tags_upgrade_downgrade_cluster.js
@@ -0,0 +1,124 @@
+/**
+ * Tests upgrading a cluster from last stable to the latest version and downgrading it back to last
+ * stable, verifying the behavior of chunk and zone operations throughout the process.
+ */
+
+// Checking UUID consistency uses cached connections, which are not valid across restarts or
+// stepdowns.
+TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
+
+(function() {
+"use strict";
+
+load("jstests/multiVersion/libs/config_chunks_tags_shared.js");
+load("jstests/multiVersion/libs/multi_cluster.js");
+load("jstests/multiVersion/libs/multi_rs.js");
+
+// Runs commands on the config server that will use its RSM to target both shard primaries until
+// they succeed.
+function waitForConfigServerShardRSMRetarget(st) {
+ assert.soonNoExcept(() => {
+ assert.commandWorked(st.s.getDB("unrelated_db").unrelated_coll.insert({x: 1}));
+ st.ensurePrimaryShard("unrelated_db", st.shard0.shardName);
+ st.ensurePrimaryShard("unrelated_db", st.shard1.shardName);
+ st.ensurePrimaryShard("unrelated_db", st.shard0.shardName);
+ assert.commandWorked(st.s.getDB("unrelated_db").dropDatabase());
+ return true;
+ });
+}
+
+const dbName = "test";
+const chunkNs = dbName + ".chunk_coll";
+const zoneNs = dbName + ".zone_coll";
+
+// Start a cluster with two shards at the last stable version and a sharding enabled db.
+const st = new ShardingTest({
+ shards: 2,
+ other: {
+ mongosOptions: {binVersion: "last-stable"},
+ configOptions: {binVersion: "last-stable"},
+ rsOptions: {binVersion: "last-stable"},
+ },
+ rs: {nodes: 3} // Use 3 node replica sets to allow binary changes with no downtime.
+});
+checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+
+// Set up sharded collections for targeted chunk and zone operation testing.
+setUpCollectionForChunksTesting(st, chunkNs);
+setUpCollectionForZoneTesting(st, zoneNs);
+
+// Set up another sharded collection on a different database to verify chunks and zones are updated
+// for every sharded collection.
+setUpExtraShardedCollections(st, "extra_db" /* dbName */);
+
+//
+// Upgrade back to the latest version.
+//
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
+
+jsTestLog("Upgrading config servers.");
+st.upgradeCluster("latest", {upgradeConfigs: true, upgradeMongos: false, upgradeShards: false});
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
+
+jsTestLog("Upgrading shard servers.");
+st.upgradeCluster("latest", {upgradeConfigs: false, upgradeMongos: false, upgradeShards: true});
+
+// Manually moving a chunk will use the config server's replica set monitor to target the primary of
+// the source shard. After upgrading the shard servers above, this RSM may be stale, so run
+// operations through the config server that will use the same RSM so it picks up the new primary.
+waitForConfigServerShardRSMRetarget(st);
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
+
+jsTestLog("Upgrading mongos servers.");
+st.upgradeCluster("latest", {upgradeConfigs: false, upgradeMongos: true, upgradeShards: false});
+checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
+
+jsTestLog("Upgrade feature compatibility version to latest");
+assert.commandWorked(st.s.getDB("admin").runCommand({setFeatureCompatibilityVersion: latestFCV}));
+checkFCV(st.configRS.getPrimary().getDB("admin"), latestFCV);
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: true});
+
+//
+// Downgrade back to the last stable version.
+//
+
+jsTestLog("Downgrade feature compatibility version to last stable");
+assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
+
+jsTestLog("Downgrading mongos servers.");
+st.upgradeCluster("last-stable",
+ {upgradeConfigs: false, upgradeMongos: true, upgradeShards: false});
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
+
+jsTestLog("Downgrading shard servers.");
+st.upgradeCluster("last-stable",
+ {upgradeConfigs: false, upgradeMongos: false, upgradeShards: true});
+
+// Manually moving a chunk will use the config server's replica set monitor to target the primary of
+// the source shard. After upgrading the shard servers above, this RSM may be stale, so run
+// operations through the config server that will use the same RSM so it picks up the new primary.
+waitForConfigServerShardRSMRetarget(st);
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
+
+jsTestLog("Downgrading config servers.");
+st.upgradeCluster("last-stable",
+ {upgradeConfigs: true, upgradeMongos: false, upgradeShards: false});
+checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
+
+st.stop();
+})();
diff --git a/jstests/multiVersion/libs/config_chunks_tags_shared.js b/jstests/multiVersion/libs/config_chunks_tags_shared.js
new file mode 100644
index 00000000000..a876bb0d293
--- /dev/null
+++ b/jstests/multiVersion/libs/config_chunks_tags_shared.js
@@ -0,0 +1,236 @@
+/**
+ * Functions and variables shared between multiversion/config_chunks_tags_upgrade_cluster.js and
+ * multiversion/config_chunks_tags_downgrade_cluster.js.
+ */
+
+// Sets up a collection with chunks in the format expected by the testChunkOperations() helper.
+function setUpCollectionForChunksTesting(st, ns) {
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: -50}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
+}
+
+// Sets up zones and chunks for a collection to work with the testZoneOperations() helper.
+function setUpCollectionForZoneTesting(st, ns) {
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard0.shardName, zone: "zone0"}));
+ assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: "zone1"}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: "zone0"}));
+ assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: ns, min: {_id: 0}, max: {_id: 50}, zone: "zone1"}));
+}
+
+function setUpExtraShardedCollections(st, dbName) {
+ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+ st.ensurePrimaryShard(dbName, st.shard1.shardName);
+
+ // Set up one zone with half the key range and two chunks split at {_id: 0}.
+ const ns = dbName + ".extra_coll";
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(
+ st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: "extra_zone0"}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: "extra_zone0"}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard0.shardName}));
+
+ // Set up a sharded collection with a hashed shard key.
+ const hashedNs = dbName + ".extra_coll_hashed";
+ assert.commandWorked(st.s.adminCommand({shardCollection: hashedNs, key: {_id: "hashed"}}));
+}
+
+function verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat}) {
+ verifyChunks(st, {expectNewFormat});
+ verifyTags(st, {expectNewFormat});
+
+ testChunkOperations(st, chunkNs);
+ testZoneOperations(st, zoneNs);
+ verifyInitialChunks(st, dbName, {expectNewFormat});
+
+ verifyChunks(st, {expectNewFormat});
+ verifyTags(st, {expectNewFormat});
+}
+
+function getChunks(st, ns) {
+ if (ns) {
+ return st.s.getDB("config").chunks.find({ns}).sort({min: 1}).toArray();
+ }
+ return st.s.getDB("config").chunks.find().sort({min: 1}).toArray();
+}
+
+// Asserts all chunk documents have the expected format.
+function verifyChunks(st, {ns, expectNewFormat}) {
+ const chunks = getChunks(st, ns);
+ assert.lte(1, chunks.length, tojson(chunks));
+ chunks.forEach((chunk) => {
+ if (expectNewFormat) {
+ assert(chunk._id.isObjectId, tojson(chunk));
+ assert.neq("string", typeof chunk._id, tojson(chunk));
+ } else {
+ assert(!chunk._id.isObjectId, tojson(chunk));
+ assert.eq("string", typeof chunk._id, tojson(chunk));
+ }
+
+ let expectedChunkFields =
+ ["_id", "ns", "min", "max", "shard", "lastmod", "lastmodEpoch", "history"];
+
+ // Jumbo is an optional field.
+ if (chunk.hasOwnProperty("jumbo")) {
+ expectedChunkFields = expectedChunkFields.concat("jumbo");
+ }
+
+ assert.eq(Object.keys(chunk).length, expectedChunkFields.length, tojson(chunk));
+ assert.hasFields(chunk, expectedChunkFields);
+ });
+}
+
+function getTags(st) {
+ return st.s.getDB("config").tags.find().sort({min: 1}).toArray();
+}
+
+// Asserts all tag documents have the expected format.
+function verifyTags(st, {expectNewFormat}) {
+ const tags = getTags(st);
+ assert.lt(1, tags.length, tojson(tags));
+ tags.forEach((tag) => {
+ if (expectNewFormat) {
+ assert(tag._id.isObjectId, tojson(tag));
+ // ObjectId returns "object" from typeof...
+ // assert.neq("object", typeof tag._id, tojson(tag));
+ } else {
+ assert(!tag._id.isObjectId, tojson(tag));
+ assert.eq("object", typeof tag._id, tojson(tag));
+ }
+
+ const expectedTagFields = ["_id", "ns", "tag", "min", "max"];
+ assert.eq(Object.keys(tag).length, expectedTagFields.length, tojson(tag));
+ assert.hasFields(tag, expectedTagFields);
+ });
+}
+
+// Runs basic crud operations against the given namespace.
+function testCRUDOperations(st, ns) {
+ const coll = st.s.getCollection(ns);
+ assert.eq(0, coll.find().itcount());
+
+ assert.commandWorked(coll.insert({_id: -5}));
+ assert.commandWorked(coll.insert({_id: 5}));
+
+ assert.commandWorked(coll.update({_id: -5}, {$set: {updated: true}}));
+ assert.commandWorked(coll.update({_id: 5}, {$set: {updated: true}}));
+
+ assert.docEq({_id: -5, updated: true}, coll.findOne({_id: -5}));
+ assert.docEq({_id: 5, updated: true}, coll.findOne({_id: 5}));
+
+ assert.commandWorked(coll.remove({_id: -5}, true /* justOne */));
+ assert.commandWorked(coll.remove({_id: 5}, true /* justOne */));
+ assert.eq(0, coll.find().itcount());
+}
+
+// Helper to verify chunks are owned by the expected shards.
+function verifyChunkDistribution(st, ns, expectedChunkDistribution) {
+ for (let i = 0; i < expectedChunkDistribution.length; i++) {
+ assert.eq(expectedChunkDistribution[i],
+ st.s.getDB("config").chunks.count({ns: ns, shard: st["shard" + i].shardName}),
+ "unexpected number of chunks on shard " + i);
+ }
+}
+
+// Assumes ns has the following chunk layout: [-inf, -50), [-50, 0) on shard0 and [0, inf) on
+// shard 1.
+function testChunkOperations(st, ns) {
+ verifyChunkDistribution(st, ns, [2, 1]);
+
+ // Split chunk should work.
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 50}}));
+ verifyChunkDistribution(st, ns, [2, 2]);
+
+ testCRUDOperations(st, ns);
+
+ // Move chunk should work with a control chunk.
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard0.shardName}));
+ verifyChunkDistribution(st, ns, [3, 1]);
+
+ testCRUDOperations(st, ns);
+
+ // Move chunk should work without a control chunk.
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 50}, to: st.shard0.shardName}));
+ verifyChunkDistribution(st, ns, [4, 0]);
+
+ testCRUDOperations(st, ns);
+
+ // Merge chunk should work.
+ assert.commandWorked(st.s.adminCommand({mergeChunks: ns, bounds: [{_id: -50}, {_id: 50}]}));
+ verifyChunkDistribution(st, ns, [3, 0]);
+
+ testCRUDOperations(st, ns);
+
+ // Reset the chunks to their original state.
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+ verifyChunkDistribution(st, ns, [4, 0]);
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 50}, to: st.shard1.shardName}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
+ verifyChunkDistribution(st, ns, [2, 2]);
+ assert.commandWorked(st.s.adminCommand({mergeChunks: ns, bounds: [{_id: 0}, {_id: MaxKey}]}));
+ verifyChunkDistribution(st, ns, [2, 1]);
+
+ testCRUDOperations(st, ns);
+}
+
+// Assumes ns has two chunks: [-inf, 0), [0, inf), on shards 0 and 1, respectively and that shard0
+// is in zone0 which contains [-inf, 0) and shard1 is in zone1 which contains [0, 50).
+function testZoneOperations(st, ns) {
+ // Verify conflicting zones can't be created.
+ assert.commandFailedWithCode(
+ st.s.adminCommand({updateZoneKeyRange: ns, min: {_id: -10}, max: {_id: 0}, zone: "zone1"}),
+ ErrorCodes.RangeOverlapConflict);
+
+ // Verify zone boundaries are still enforced.
+ assert.commandFailedWithCode(
+ st.s.adminCommand({moveChunk: ns, find: {_id: -1}, to: st.shard1.shardName}),
+ ErrorCodes.IllegalOperation);
+
+ //
+ // Verify zone ranges can be updated.
+ //
+
+ assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: null}));
+ assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: "zone1"}));
+
+ // Now the chunk can be moved to shard1, which is in zone1.
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
+
+ // Reset the chunk and zones.
+ assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: null}));
+ assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: "zone0"}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard0.shardName}));
+}
+
+let uniqueCollCounter = 0;
+
+// Assumes ns is a non-existent namespace on a database that is sharding enabled.
+function verifyInitialChunks(st, dbName, {expectNewFormat}) {
+ const ns = dbName + ".unique_coll" + uniqueCollCounter++;
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+
+ // Assert the chunks created for the new namespace are in the correct format.
+ verifyChunks(st, {ns, expectNewFormat});
+
+ // Clean up the new collection.
+ assert.commandWorked(st.s.adminCommand({drop: ns}));
+}
diff --git a/jstests/sharding/libs/sharded_transactions_helpers.js b/jstests/sharding/libs/sharded_transactions_helpers.js
index a25eef6e7f8..96dc193cd0c 100644
--- a/jstests/sharding/libs/sharded_transactions_helpers.js
+++ b/jstests/sharding/libs/sharded_transactions_helpers.js
@@ -95,11 +95,11 @@ function assertNoSuchTransactionOnConn(conn, lsid, txnNumber) {
", txnNumber: " + tojson(txnNumber) + ", connection: " + tojson(conn));
}
-function waitForFailpoint(hitFailpointStr, numTimes) {
+function waitForFailpoint(hitFailpointStr, numTimes, timeout) {
assert.soon(function() {
const re = new RegExp(hitFailpointStr, 'g' /* find all occurrences */);
return (rawMongoProgramOutput().match(re) || []).length == numTimes;
- }, 'Failed to find "' + hitFailpointStr + '" logged ' + numTimes + ' times');
+ }, 'Failed to find "' + hitFailpointStr + '" logged ' + numTimes + ' times', timeout);
}
// Enables the transaction router to retry on stale version (db or shard version) and snapshot
diff --git a/jstests/sharding/mongos_get_shard_version.js b/jstests/sharding/mongos_get_shard_version.js
index ecd383772e4..a22bdb2d03e 100644
--- a/jstests/sharding/mongos_get_shard_version.js
+++ b/jstests/sharding/mongos_get_shard_version.js
@@ -38,6 +38,15 @@ assert.eq(res.version.t, 1);
assert.eq(res.version.i, 0);
if (jsTestOptions().mongosBinVersion == "last-stable") {
assert.eq(undefined, res.chunks);
+
+ // The _id format for config.chunks documents was changed in 4.4, so in the mixed version suite
+ // the below size arithmetic does not hold and splitting chunks will fail with BSONObjectTooBig.
+ // A mongos with the last-stable binary does not support returning chunks in getShardVersion, so
+ // we can just return early.
+ //
+ // TODO SERVER-44034: Remove this branch when 4.4 becomes last stable.
+ st.stop();
+ return;
} else {
assert.eq(1, res.chunks.length);
assert.eq(min, res.chunks[0][0]);
diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
index 6232dc35601..b49042bc85a 100644
--- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
+++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/repl/repl_client_info.h"
+#include "mongo/db/s/active_shard_collection_registry.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/server_options.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -60,6 +61,8 @@ namespace {
MONGO_FAIL_POINT_DEFINE(featureCompatibilityDowngrade);
MONGO_FAIL_POINT_DEFINE(featureCompatibilityUpgrade);
+MONGO_FAIL_POINT_DEFINE(pauseBeforeDowngradingConfigMetadata); // TODO SERVER-44034: Remove.
+MONGO_FAIL_POINT_DEFINE(pauseBeforeUpgradingConfigMetadata); // TODO SERVER-44034: Remove.
/**
* Sets the minimum allowed version for the cluster. If it is 4.2, then the node should not use 4.4
@@ -173,6 +176,14 @@ public:
Lock::GlobalLock lk(opCtx, MODE_S);
}
+ if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
+ // The primary shard sharding a collection will write the initial chunks for a
+ // collection directly to the config server, so wait for all shard collections to
+ // complete to guarantee no chunks are missed by the update on the config server.
+ ActiveShardCollectionRegistry::get(opCtx).waitForActiveShardCollectionsToComplete(
+ opCtx);
+ }
+
// Upgrade shards before config finishes its upgrade.
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
uassertStatusOK(
@@ -183,6 +194,12 @@ public:
cmdObj,
BSON(FeatureCompatibilityVersionCommandParser::kCommandName
<< requestedVersion)))));
+
+ if (MONGO_unlikely(pauseBeforeUpgradingConfigMetadata.shouldFail())) {
+ log() << "Hit pauseBeforeUpgradingConfigMetadata";
+ pauseBeforeUpgradingConfigMetadata.pauseWhileSet(opCtx);
+ }
+ ShardingCatalogManager::get(opCtx)->upgradeChunksAndTags(opCtx);
}
FeatureCompatibilityVersion::unsetTargetUpgradeOrDowngrade(opCtx, requestedVersion);
@@ -215,6 +232,14 @@ public:
Lock::GlobalLock lk(opCtx, MODE_S);
}
+ if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
+ // The primary shard sharding a collection will write the initial chunks for a
+ // collection directly to the config server, so wait for all shard collections to
+ // complete to guarantee no chunks are missed by the update on the config server.
+ ActiveShardCollectionRegistry::get(opCtx).waitForActiveShardCollectionsToComplete(
+ opCtx);
+ }
+
// Downgrade shards before config finishes its downgrade.
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
uassertStatusOK(
@@ -225,6 +250,12 @@ public:
cmdObj,
BSON(FeatureCompatibilityVersionCommandParser::kCommandName
<< requestedVersion)))));
+
+ if (MONGO_unlikely(pauseBeforeDowngradingConfigMetadata.shouldFail())) {
+ log() << "Hit pauseBeforeDowngradingConfigMetadata";
+ pauseBeforeDowngradingConfigMetadata.pauseWhileSet(opCtx);
+ }
+ ShardingCatalogManager::get(opCtx)->downgradeChunksAndTags(opCtx);
}
FeatureCompatibilityVersion::unsetTargetUpgradeOrDowngrade(opCtx, requestedVersion);
diff --git a/src/mongo/db/s/active_shard_collection_registry.cpp b/src/mongo/db/s/active_shard_collection_registry.cpp
index d2bda7ece20..9b667370808 100644
--- a/src/mongo/db/s/active_shard_collection_registry.cpp
+++ b/src/mongo/db/s/active_shard_collection_registry.cpp
@@ -139,6 +139,23 @@ Status ActiveShardCollectionRegistry::ActiveShardCollectionState::constructError
<< "collection with arguments: " << activeRequest.toBSON()};
}
+void ActiveShardCollectionRegistry::waitForActiveShardCollectionsToComplete(
+ OperationContext* opCtx) {
+ // Take a snapshot of the currently active shard collections.
+ std::vector<SharedSemiFuture<boost::optional<UUID>>> shardCollectionFutures;
+ {
+ stdx::lock_guard<Latch> lk(_mutex);
+ for (const auto& it : _activeShardCollectionMap) {
+ shardCollectionFutures.emplace_back(it.second->_uuidPromise.getFuture());
+ }
+ }
+
+ // Synchronously wait for all futures to resolve.
+ for (const auto& fut : shardCollectionFutures) {
+ fut.wait(opCtx);
+ }
+}
+
ScopedShardCollection::ScopedShardCollection(std::string nss,
ActiveShardCollectionRegistry* registry,
bool shouldExecute,
diff --git a/src/mongo/db/s/active_shard_collection_registry.h b/src/mongo/db/s/active_shard_collection_registry.h
index 91423d65d7c..ba932d455e2 100644
--- a/src/mongo/db/s/active_shard_collection_registry.h
+++ b/src/mongo/db/s/active_shard_collection_registry.h
@@ -74,6 +74,14 @@ public:
StatusWith<ScopedShardCollection> registerShardCollection(
const ShardsvrShardCollection& request);
+ /**
+ * Takes a snapshot of all currently active shard collections and synchronously waits for each
+ * to complete.
+ *
+ * TODO SERVER-44034: Remove this method.
+ */
+ void waitForActiveShardCollectionsToComplete(OperationContext* opCtx);
+
private:
friend class ScopedShardCollection;
diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp
index 1b5f6fcf965..fe10ee8c6bf 100644
--- a/src/mongo/db/s/config/initial_split_policy.cpp
+++ b/src/mongo/db/s/config/initial_split_policy.cpp
@@ -58,7 +58,6 @@ void appendChunk(const NamespaceString& nss,
std::vector<ChunkType>* chunks) {
chunks->emplace_back(nss, ChunkRange(min, max), *version, shardId);
auto& chunk = chunks->back();
- chunk.setName(OID::gen()); // TODO SERVER-42299: Remove this line.
chunk.setHistory({ChunkHistory(validAfter, shardId)});
version->incMinor();
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index 557529099ff..da229c14b5b 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -33,8 +33,12 @@
#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/auth/authorization_session_impl.h"
+#include "mongo/db/commands/txn_cmds_gen.h"
+#include "mongo/db/logical_session_cache.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/s/balancer/type_migration.h"
+#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/catalog/config_server_version.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
@@ -46,6 +50,9 @@
#include "mongo/s/catalog/type_tags.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
+#include "mongo/s/write_ops/batched_command_request.h"
+#include "mongo/s/write_ops/batched_command_response.h"
+#include "mongo/transport/service_entry_point.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -57,6 +64,75 @@ const WriteConcernOptions kNoWaitWriteConcern(1, WriteConcernOptions::SyncMode::
const auto getShardingCatalogManager =
ServiceContext::declareDecoration<boost::optional<ShardingCatalogManager>>();
+OpMsg runCommandInLocalTxn(OperationContext* opCtx,
+ StringData db,
+ bool startTransaction,
+ TxnNumber txnNumber,
+ BSONObj cmdObj) {
+ BSONObjBuilder bob(std::move(cmdObj));
+ if (startTransaction) {
+ bob.append("startTransaction", true);
+ }
+ bob.append("autocommit", false);
+ bob.append(OperationSessionInfo::kTxnNumberFieldName, txnNumber);
+
+ BSONObjBuilder lsidBuilder(bob.subobjStart("lsid"));
+ opCtx->getLogicalSessionId()->serialize(&bob);
+ lsidBuilder.doneFast();
+
+ return OpMsg::parseOwned(
+ opCtx->getServiceContext()
+ ->getServiceEntryPoint()
+ ->handleRequest(opCtx,
+ OpMsgRequest::fromDBAndBody(db.toString(), bob.obj()).serialize())
+ .response);
+}
+
+void insertDocumentsInLocalTxn(OperationContext* opCtx,
+ const NamespaceString& nss,
+ std::vector<BSONObj> docs,
+ bool startTransaction,
+ TxnNumber txnNumber) {
+ BatchedCommandRequest request([&] {
+ write_ops::Insert insertOp(nss);
+ insertOp.setDocuments(std::move(docs));
+ return insertOp;
+ }());
+
+ uassertStatusOK(getStatusFromWriteCommandReply(
+ runCommandInLocalTxn(opCtx, nss.db(), startTransaction, txnNumber, request.toBSON()).body));
+}
+
+void removeDocumentsInLocalTxn(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const BSONObj& query,
+ bool startTransaction,
+ TxnNumber txnNumber) {
+ BatchedCommandRequest request([&] {
+ write_ops::Delete deleteOp(nss);
+ deleteOp.setDeletes({[&] {
+ write_ops::DeleteOpEntry entry;
+ entry.setQ(query);
+ entry.setMulti(true);
+ return entry;
+ }()});
+ return deleteOp;
+ }());
+
+ uassertStatusOK(getStatusFromWriteCommandReply(
+ runCommandInLocalTxn(opCtx, nss.db(), startTransaction, txnNumber, request.toBSON()).body));
+}
+
+void commitLocalTxn(OperationContext* opCtx, TxnNumber txnNumber) {
+ uassertStatusOK(
+ getStatusFromCommandResult(runCommandInLocalTxn(opCtx,
+ NamespaceString::kAdminDb,
+ false /* startTransaction */,
+ txnNumber,
+ BSON(CommitTransaction::kCommandName << 1))
+ .body));
+}
+
} // namespace
void ShardingCatalogManager::create(ServiceContext* serviceContext,
@@ -329,4 +405,173 @@ Lock::ExclusiveLock ShardingCatalogManager::lockZoneMutex(OperationContext* opCt
return lk;
}
+void ShardingCatalogManager::upgradeChunksAndTags(OperationContext* opCtx) {
+ // Upgrade each chunk document by deleting and re-inserting with the 4.4 _id format.
+ {
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
+
+ auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
+ auto findResponse = uassertStatusOK(
+ configShard->exhaustiveFindOnConfig(opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ ChunkType::ConfigNS,
+ {},
+ {},
+ boost::none /* limit */));
+
+ AlternativeSessionRegion asr(opCtx);
+ AuthorizationSession::get(asr.opCtx()->getClient())
+ ->grantInternalAuthorization(asr.opCtx()->getClient());
+ TxnNumber txnNumber = 0;
+ for (const auto& chunkObj : findResponse.docs) {
+ auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(chunkObj));
+
+ removeDocumentsInLocalTxn(
+ asr.opCtx(),
+ ChunkType::ConfigNS,
+ BSON(ChunkType::ns(chunk.getNS().ns()) << ChunkType::min(chunk.getMin())),
+ true /* startTransaction */,
+ txnNumber);
+
+ // Note that ChunkType::toConfigBSON() will not include an _id if one hasn't been set,
+ // which will be the case for chunks written in the 4.2 format because parsing ignores
+ // _ids in the 4.2 format, so the insert path will generate one for us.
+ insertDocumentsInLocalTxn(asr.opCtx(),
+ ChunkType::ConfigNS,
+ {chunk.toConfigBSON()},
+ false /* startTransaction */,
+ txnNumber);
+
+ commitLocalTxn(asr.opCtx(), txnNumber);
+
+ txnNumber += 1;
+ }
+ }
+
+ // Upgrade each tag document by deleting and re-inserting with the 4.4 _id format.
+ {
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kZoneOpLock);
+
+ auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
+ auto findResponse = uassertStatusOK(
+ configShard->exhaustiveFindOnConfig(opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ TagsType::ConfigNS,
+ {},
+ {},
+ boost::none /* limit */));
+
+ AlternativeSessionRegion asr(opCtx);
+ AuthorizationSession::get(asr.opCtx()->getClient())
+ ->grantInternalAuthorization(asr.opCtx()->getClient());
+ TxnNumber txnNumber = 0;
+ for (const auto& tagObj : findResponse.docs) {
+ auto tag = uassertStatusOK(TagsType::fromBSON(tagObj));
+
+ removeDocumentsInLocalTxn(
+ asr.opCtx(),
+ TagsType::ConfigNS,
+ BSON(TagsType::ns(tag.getNS().ns()) << TagsType::min(tag.getMinKey())),
+ true /* startTransaction */,
+ txnNumber);
+
+ // Note that TagsType::toBSON() will not include an _id, so the insert path will
+ // generate one for us.
+ insertDocumentsInLocalTxn(asr.opCtx(),
+ TagsType::ConfigNS,
+ {tag.toBSON()},
+ false /* startTransaction */,
+ txnNumber);
+
+ commitLocalTxn(asr.opCtx(), txnNumber);
+
+ txnNumber += 1;
+ }
+ }
+}
+
+void ShardingCatalogManager::downgradeChunksAndTags(OperationContext* opCtx) {
+ // Downgrade each chunk document by deleting and re-inserting with the 4.2 _id format.
+ {
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
+
+ auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
+ auto findResponse = uassertStatusOK(
+ configShard->exhaustiveFindOnConfig(opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ ChunkType::ConfigNS,
+ {},
+ {},
+ boost::none /* limit */));
+
+ AlternativeSessionRegion asr(opCtx);
+ AuthorizationSession::get(asr.opCtx()->getClient())
+ ->grantInternalAuthorization(asr.opCtx()->getClient());
+ TxnNumber txnNumber = 0;
+ for (const auto& chunkObj : findResponse.docs) {
+ auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(chunkObj));
+
+ removeDocumentsInLocalTxn(
+ asr.opCtx(),
+ ChunkType::ConfigNS,
+ BSON(ChunkType::ns(chunk.getNS().ns()) << ChunkType::min(chunk.getMin())),
+ true /* startTransaction */,
+ txnNumber);
+
+ insertDocumentsInLocalTxn(asr.opCtx(),
+ ChunkType::ConfigNS,
+ {chunk.toConfigBSONLegacyID()},
+ false /* startTransaction */,
+ txnNumber);
+
+ commitLocalTxn(asr.opCtx(), txnNumber);
+
+ txnNumber += 1;
+ }
+ }
+
+ // Downgrade each tag document by deleting and re-inserting with the 4.2 _id format.
+ {
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kZoneOpLock);
+
+ auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
+ auto findResponse = uassertStatusOK(
+ configShard->exhaustiveFindOnConfig(opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ TagsType::ConfigNS,
+ {},
+ {},
+ boost::none /* limit */));
+
+ AlternativeSessionRegion asr(opCtx);
+ AuthorizationSession::get(asr.opCtx()->getClient())
+ ->grantInternalAuthorization(asr.opCtx()->getClient());
+ TxnNumber txnNumber = 0;
+ for (const auto& tagObj : findResponse.docs) {
+ auto tag = uassertStatusOK(TagsType::fromBSON(tagObj));
+
+ removeDocumentsInLocalTxn(
+ asr.opCtx(),
+ TagsType::ConfigNS,
+ BSON(TagsType::ns(tag.getNS().ns()) << TagsType::min(tag.getMinKey())),
+ true /* startTransaction */,
+ txnNumber);
+
+ insertDocumentsInLocalTxn(asr.opCtx(),
+ TagsType::ConfigNS,
+ {tag.toBSONLegacyID()},
+ false /* startTransaction */,
+ txnNumber);
+
+ commitLocalTxn(asr.opCtx(), txnNumber);
+
+ txnNumber += 1;
+ }
+ }
+}
+
} // namespace mongo
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index 586ad21c85f..6ebbdf264b3 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -416,6 +416,28 @@ public:
*/
static void clearForTests(ServiceContext* serviceContext);
+ /**
+ * Changes the _id format of all documents in config.chunks and config.tags to use the format
+ * introduced in 4.4.
+ *
+ * TODO SERVER-44034: Remove this method.
+ *
+ * TODO SERVER-42299: Optimize this method by batching inserts and deletes into larger
+ * transactions.
+ */
+ void upgradeChunksAndTags(OperationContext* opCtx);
+
+ /**
+ * Changes the _id format of all documents in config.chunks and config.tags to use the format
+ * expected by a 4.2 binary.
+ *
+ * TODO SERVER-44034: Remove this method.
+ *
+ * TODO SERVER-42299: Optimize this method by batching inserts and deletes into larger
+ * transactions.
+ */
+ void downgradeChunksAndTags(OperationContext* opCtx);
+
Lock::ExclusiveLock lockZoneMutex(OperationContext* opCtx);
private:
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 13040b176b9..e8e6fca56c9 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/s/sharding_logging.h"
+#include "mongo/db/server_options.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
@@ -62,6 +63,13 @@ MONGO_FAIL_POINT_DEFINE(skipExpiringOldChunkHistory);
const WriteConcernOptions kNoWaitWriteConcern(1, WriteConcernOptions::SyncMode::UNSET, Seconds(0));
+bool isUpgradingOrDowngradingFCV() {
+ return (serverGlobalParams.featureCompatibility.getVersion() ==
+ ServerGlobalParams::FeatureCompatibility::Version::kUpgradingTo44) ||
+ (serverGlobalParams.featureCompatibility.getVersion() ==
+ ServerGlobalParams::FeatureCompatibility::Version::kDowngradingTo42);
+}
+
/**
* Append min, max and version information from chunk to the buffer for logChange purposes.
*/
@@ -78,6 +86,8 @@ void appendShortVersion(BufBuilder* out, const ChunkType& chunk) {
BSONArray buildMergeChunksTransactionUpdates(const std::vector<ChunkType>& chunksToMerge,
const ChunkVersion& mergeVersion,
const boost::optional<Timestamp>& validAfter) {
+ invariant(!isUpgradingOrDowngradingFCV());
+
BSONArrayBuilder updates;
// Build an update operation to expand the first chunk into the newly merged chunk
@@ -98,10 +108,18 @@ BSONArray buildMergeChunksTransactionUpdates(const std::vector<ChunkType>& chunk
mergedChunk.setHistory({ChunkHistory(validAfter.get(), mergedChunk.getShard())});
// add the new chunk information as the update object
- op.append("o", mergedChunk.toConfigBSON());
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ op.append("o", mergedChunk.toConfigBSON());
- // query object
- op.append("o2", BSON(ChunkType::name(mergedChunk.getName())));
+ // query object
+ op.append("o2", BSON(ChunkType::name(mergedChunk.getName())));
+ } else {
+ op.append("o", mergedChunk.toConfigBSONLegacyID());
+
+ // query object
+ op.append("o2", BSON(ChunkType::legacyName(mergedChunk.getLegacyName())));
+ }
updates.append(op.obj());
}
@@ -113,7 +131,12 @@ BSONArray buildMergeChunksTransactionUpdates(const std::vector<ChunkType>& chunk
op.append("op", "d");
op.append("ns", ChunkType::ConfigNS.ns());
- op.append("o", BSON(ChunkType::name(chunksToMerge[i].getName())));
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ op.append("o", BSON(ChunkType::name(chunksToMerge[i].getName())));
+ } else {
+ op.append("o", BSON(ChunkType::legacyName(chunksToMerge[i].getLegacyName())));
+ }
updates.append(op.obj());
}
@@ -179,6 +202,7 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
const boost::optional<ChunkType>& controlChunk,
StringData fromShard,
StringData toShard) {
+ invariant(!isUpgradingOrDowngradingFCV());
// Update migratedChunk's version and shard.
BSONArrayBuilder updates;
@@ -189,7 +213,12 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
op.append("ns", ChunkType::ConfigNS.ns());
BSONObjBuilder n(op.subobjStart("o"));
- n.append(ChunkType::name(), migratedChunk.getName());
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ n.append(ChunkType::name(), migratedChunk.getName());
+ } else {
+ n.append(ChunkType::legacyName(), ChunkType::genLegacyID(nss, migratedChunk.getMin()));
+ }
migratedChunk.getVersion().appendLegacyWithField(&n, ChunkType::lastmod());
n.append(ChunkType::ns(), nss.ns());
n.append(ChunkType::min(), migratedChunk.getMin());
@@ -199,7 +228,12 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
n.done();
BSONObjBuilder q(op.subobjStart("o2"));
- q.append(ChunkType::name(), migratedChunk.getName());
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ q.append(ChunkType::name(), migratedChunk.getName());
+ } else {
+ n.append(ChunkType::legacyName(), ChunkType::genLegacyID(nss, migratedChunk.getMin()));
+ }
q.done();
updates.append(op.obj());
@@ -213,7 +247,12 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
op.append("ns", ChunkType::ConfigNS.ns());
BSONObjBuilder n(op.subobjStart("o"));
- n.append(ChunkType::name(), controlChunk->getName());
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ n.append(ChunkType::name(), controlChunk->getName());
+ } else {
+ n.append(ChunkType::legacyName(), ChunkType::genLegacyID(nss, controlChunk->getMin()));
+ }
controlChunk->getVersion().appendLegacyWithField(&n, ChunkType::lastmod());
n.append(ChunkType::ns(), nss.ns());
n.append(ChunkType::min(), controlChunk->getMin());
@@ -224,7 +263,12 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
n.done();
BSONObjBuilder q(op.subobjStart("o2"));
- q.append(ChunkType::name(), controlChunk->getName());
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ q.append(ChunkType::name(), controlChunk->getName());
+ } else {
+ q.append(ChunkType::legacyName(), controlChunk->getLegacyName());
+ }
q.done();
updates.append(op.obj());
@@ -298,6 +342,14 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
// move chunks on different collections to proceed in parallel
Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
+ // The _id format for chunk documents changed in 4.4, so during an upgrade or downgrade it is
+ // not known which format the chunks are currently in. Splitting a chunk requires knowing the
+ // _id of the chunk being split, so to avoid confusing failures, splitting is disabled.
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ "Chunks cannot be split while a feature compatibility version upgrade or downgrade is "
+ "in progress",
+ !isUpgradingOrDowngradingFCV());
+
// Get the max chunk version for this namespace.
auto swCollVersion = getMaxChunkVersionFromQueryResponse(
nss,
@@ -366,7 +418,8 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
newChunkBounds.push_back(range.getMax());
auto shouldTakeOriginalChunkID = true;
- std::string chunkID;
+ OID chunkID;
+ std::string legacyChunkID;
BSONArrayBuilder updates;
@@ -406,8 +459,13 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
// First chunk takes ID of the original chunk and all other chunks get new IDs. This occurs
// because we perform an update operation below (with upsert true). Keeping the original ID
// ensures we overwrite the old chunk (before the split) without having to perform a delete.
- chunkID =
- shouldTakeOriginalChunkID ? origChunk.getValue().getName() : OID::gen().toString();
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ chunkID = shouldTakeOriginalChunkID ? origChunk.getValue().getName() : OID::gen();
+ } else {
+ legacyChunkID = shouldTakeOriginalChunkID ? origChunk.getValue().getLegacyName()
+ : ChunkType::genLegacyID(nss, startKey);
+ }
shouldTakeOriginalChunkID = false;
// build an update operation against the chunks collection of the config database
@@ -419,7 +477,12 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
// add the modified (new) chunk information as the update object
BSONObjBuilder n(op.subobjStart("o"));
- n.append(ChunkType::name(), chunkID);
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ n.append(ChunkType::name(), chunkID);
+ } else {
+ n.append(ChunkType::legacyName(), legacyChunkID);
+ }
currentMaxVersion.appendLegacyWithField(&n, ChunkType::lastmod());
n.append(ChunkType::ns(), nss.ns());
n.append(ChunkType::min(), startKey);
@@ -432,7 +495,12 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
// add the chunk's _id as the query part of the update statement
BSONObjBuilder q(op.subobjStart("o2"));
- q.append(ChunkType::name(), chunkID);
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ q.append(ChunkType::name(), chunkID);
+ } else {
+ q.append(ChunkType::legacyName(), legacyChunkID);
+ }
q.done();
updates.append(op.obj());
@@ -527,6 +595,14 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
// move chunks on different collections to proceed in parallel
Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
+ // The _id format for chunk documents changed in 4.4, so during an upgrade or downgrade it is
+ // not known which format the chunks are currently in. Merging a chunk requires knowing the
+ // _id of the chunks being merged, so to avoid confusing failures, merging is disabled.
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ "Chunks cannot be merged while a feature compatibility version upgrade or downgrade is "
+ "in progress",
+ !isUpgradingOrDowngradingFCV());
+
if (!validAfter) {
return {ErrorCodes::IllegalOperation, "chunk operation requires validAfter timestamp"};
}
@@ -580,7 +656,11 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
if (!itOrigChunk.isOK()) {
return itOrigChunk.getStatus();
}
- itChunk.setName(itOrigChunk.getValue().getName());
+
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ itChunk.setName(itOrigChunk.getValue().getName());
+ }
// Ensure the chunk boundaries are strictly increasing
if (chunkBoundaries[i].woCompare(itChunk.getMin()) <= 0) {
@@ -654,6 +734,14 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
// (Note: This is not needed while we have a global lock, taken here only for consistency.)
Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
+ // The _id format for chunk documents changed in 4.4, so during an upgrade or downgrade it is
+ // not known which format the chunks are currently in. Moving a chunk requires knowing the
+ // _id of the chunks being moved, so to avoid confusing failures, migrations are disabled.
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ "Chunks cannot be migrated while a feature compatibility version upgrade or downgrade "
+ "is in progress",
+ !isUpgradingOrDowngradingFCV());
+
if (!validAfter) {
return {ErrorCodes::IllegalOperation, "chunk operation requires validAfter timestamp"};
}
@@ -724,7 +812,10 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
// Generate the new versions of migratedChunk and controlChunk. Migrating chunk's minor version
// will be 0.
ChunkType newMigratedChunk = migratedChunk;
- newMigratedChunk.setName(origChunk.getValue().getName());
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ newMigratedChunk.setName(origChunk.getValue().getName());
+ }
newMigratedChunk.setShard(toShard);
newMigratedChunk.setVersion(ChunkVersion(
currentCollectionVersion.majorVersion() + 1, 0, currentCollectionVersion.epoch()));
@@ -768,7 +859,10 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
}
newControlChunk = origControlChunk.getValue();
- newControlChunk->setName(origControlChunk.getValue().getName());
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ newControlChunk->setName(origControlChunk.getValue().getName());
+ }
newControlChunk->setVersion(ChunkVersion(
currentCollectionVersion.majorVersion() + 1, 1, currentCollectionVersion.epoch()));
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
index 9b5b8eb0f8a..153319734e4 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
@@ -37,6 +37,7 @@
#include "mongo/client/read_preference.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/s/balancer/balancer_policy.h"
+#include "mongo/db/server_options.h"
#include "mongo/db/write_concern_options.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_collection.h"
@@ -372,6 +373,11 @@ Status ShardingCatalogManager::assignKeyRangeToZone(OperationContext* opCtx,
BSONObj updateQuery(BSON(TagsType::ns(nss.ns()) << TagsType::min(fullShardKeyRange.getMin())));
BSONObjBuilder updateBuilder;
+ if (serverGlobalParams.featureCompatibility.getVersion() <=
+ ServerGlobalParams::FeatureCompatibility::Version::kDowngradingTo42) {
+ updateBuilder.append(
+ "_id", BSON(TagsType::ns(nss.ns()) << TagsType::min(fullShardKeyRange.getMin())));
+ }
updateBuilder.append(TagsType::ns(), nss.ns());
updateBuilder.append(TagsType::min(), fullShardKeyRange.getMin());
updateBuilder.append(TagsType::max(), fullShardKeyRange.getMax());
diff --git a/src/mongo/db/s/shardsvr_shard_collection.cpp b/src/mongo/db/s/shardsvr_shard_collection.cpp
index 211717731aa..448f3d1b451 100644
--- a/src/mongo/db/s/shardsvr_shard_collection.cpp
+++ b/src/mongo/db/s/shardsvr_shard_collection.cpp
@@ -62,6 +62,7 @@
#include "mongo/s/request_types/clone_collection_options_from_primary_shard_gen.h"
#include "mongo/s/request_types/shard_collection_gen.h"
#include "mongo/s/shard_util.h"
+#include "mongo/util/fail_point.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
#include "mongo/util/str.h"
@@ -70,6 +71,8 @@ namespace mongo {
namespace {
+MONGO_FAIL_POINT_DEFINE(pauseShardCollectionBeforeReturning);
+
struct ShardCollectionTargetState {
UUID uuid;
ShardKeyPattern shardKeyPattern;
@@ -607,7 +610,12 @@ void writeFirstChunksToConfig(OperationContext* opCtx,
std::vector<BSONObj> chunkObjs;
chunkObjs.reserve(initialChunks.chunks.size());
for (const auto& chunk : initialChunks.chunks) {
- chunkObjs.push_back(chunk.toConfigBSON());
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kUpgradingTo44) {
+ chunkObjs.push_back(chunk.toConfigBSON());
+ } else {
+ chunkObjs.push_back(chunk.toConfigBSONLegacyID());
+ }
}
Grid::get(opCtx)->catalogClient()->insertConfigDocumentsAsRetryableWrite(
@@ -856,6 +864,11 @@ public:
str::stream() << "Collection " << nss << " is sharded without UUID",
uuid);
+ if (MONGO_unlikely(pauseShardCollectionBeforeReturning.shouldFail())) {
+ log() << "Hit pauseShardCollectionBeforeReturning";
+ pauseShardCollectionBeforeReturning.pauseWhileSet(opCtx);
+ }
+
scopedShardCollection.emplaceUUID(uuid);
}
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index 099c667bb41..fd7e9313148 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -46,7 +46,8 @@ namespace mongo {
const NamespaceString ChunkType::ConfigNS("config.chunks");
const std::string ChunkType::ShardNSPrefix = "config.cache.chunks.";
-const BSONField<std::string> ChunkType::name("_id");
+const BSONField<OID> ChunkType::name("_id");
+const BSONField<std::string> ChunkType::legacyName("_id");
const BSONField<BSONObj> ChunkType::minShardID("_id");
const BSONField<std::string> ChunkType::ns("ns");
const BSONField<BSONObj> ChunkType::min("min");
@@ -214,12 +215,18 @@ StatusWith<ChunkType> ChunkType::parseFromConfigBSONCommand(const BSONObj& sourc
ChunkType chunk;
{
- std::string chunkID;
- Status status = bsonExtractStringField(source, name.name(), &chunkID);
+ OID chunkID;
+ Status status = bsonExtractOIDField(source, name.name(), &chunkID);
if (status.isOK()) {
chunk._id = chunkID;
} else if (status == ErrorCodes::NoSuchKey || status == ErrorCodes::TypeMismatch) {
- // ID status is missing or of type objectid, so we just ignore it.
+ // Ignore NoSuchKey because when chunks are sent in commands they are not required to
+ // include it.
+ //
+ // Ignore TypeMismatch for compatibility with binaries 4.2 and earlier, since the _id
+ // type was changed from string to OID.
+ //
+ // TODO SERVER-44034: Stop ignoring TypeMismatch.
} else {
return status;
}
@@ -300,12 +307,15 @@ StatusWith<ChunkType> ChunkType::fromConfigBSON(const BSONObj& source) {
if (!chunk._id) {
{
- std::string chunkID;
- Status status = bsonExtractStringField(source, name.name(), &chunkID);
+ OID chunkID;
+ Status status = bsonExtractOIDField(source, name.name(), &chunkID);
if (status.isOK()) {
chunk._id = chunkID;
} else if (status == ErrorCodes::TypeMismatch) {
- // ID status is of type objectid, so we just ignore it.
+ // The format of _id changed between 4.2 and 4.4 so for compatibility with chunks
+ // created in earlier versions we ignore TypeMismatch.
+ //
+ // TODO SERVER-44034: Stop ignoring TypeMismatch.
} else {
return status;
}
@@ -335,6 +345,26 @@ BSONObj ChunkType::toConfigBSON() const {
return builder.obj();
}
+BSONObj ChunkType::toConfigBSONLegacyID() const {
+ BSONObjBuilder builder;
+ if (_nss && _min)
+ builder.append(name.name(), genLegacyID(*_nss, *_min));
+ if (_nss)
+ builder.append(ns.name(), getNS().ns());
+ if (_min)
+ builder.append(min.name(), getMin());
+ if (_max)
+ builder.append(max.name(), getMax());
+ if (_shard)
+ builder.append(shard.name(), getShard().toString());
+ if (_version)
+ _version->appendLegacyWithField(&builder, ChunkType::lastmod());
+ if (_jumbo)
+ builder.append(jumbo.name(), getJumbo());
+ addHistoryToBSON(builder);
+ return builder.obj();
+}
+
StatusWith<ChunkType> ChunkType::fromShardBSON(const BSONObj& source, const OID& epoch) {
ChunkType chunk;
@@ -412,17 +442,13 @@ BSONObj ChunkType::toShardBSON() const {
return builder.obj();
}
-std::string ChunkType::getName() const {
- invariant(_id);
+const OID& ChunkType::getName() const {
+ uassert(51264, "Chunk name is not set", _id);
return *_id;
}
-void ChunkType::setName(const std::string& id) {
- _id = id;
-}
-
void ChunkType::setName(const OID& id) {
- _id = id.toString();
+ _id = id;
}
void ChunkType::setNS(const NamespaceString& nss) {
@@ -524,4 +550,22 @@ std::string ChunkType::toString() const {
return toConfigBSON().toString();
}
+std::string ChunkType::genLegacyID(const NamespaceString& nss, const BSONObj& o) {
+ StringBuilder buf;
+ buf << nss.ns() << "-";
+
+ BSONObjIterator i(o);
+ while (i.more()) {
+ BSONElement e = i.next();
+ buf << e.fieldName() << "_" << e.toString(false, true);
+ }
+
+ return buf.str();
+}
+
+std::string ChunkType::getLegacyName() const {
+ invariant(_nss && _min);
+ return genLegacyID(*_nss, *_min);
+}
+
} // namespace mongo
diff --git a/src/mongo/s/catalog/type_chunk.h b/src/mongo/s/catalog/type_chunk.h
index f517cc65c38..ce5526b01b0 100644
--- a/src/mongo/s/catalog/type_chunk.h
+++ b/src/mongo/s/catalog/type_chunk.h
@@ -183,7 +183,8 @@ public:
static const std::string ShardNSPrefix;
// Field names and types in the chunks collections.
- static const BSONField<std::string> name;
+ static const BSONField<OID> name;
+ static const BSONField<std::string> legacyName; // TODO SERVER-44034: Remove legacyName.
static const BSONField<BSONObj> minShardID;
static const BSONField<std::string> ns;
static const BSONField<BSONObj> min;
@@ -214,6 +215,14 @@ public:
BSONObj toConfigBSON() const;
/**
+ * Returns the BSON representation of the entry for the config server's config.chunks
+ * collection using the _id format expected by binaries in 4.2 and earlier.
+ *
+ * TODO SERVER-44034: Remove when 4.4 becomes last-stable.
+ */
+ BSONObj toConfigBSONLegacyID() const;
+
+ /**
* Constructs a new ChunkType object from BSON that has a shard server's config.chunks.<epoch>
* collection format.
*
@@ -222,19 +231,28 @@ public:
static StatusWith<ChunkType> fromShardBSON(const BSONObj& source, const OID& epoch);
/**
+ * Generates the chunk id that would be expected in binaries 4.2 and earlier based on the
+ * namespace and lower chunk bound.
+ *
+ * TODO SERVER-44034: Remove when 4.4 becomes last-stable.
+ */
+ static std::string genLegacyID(const NamespaceString& nss, const BSONObj& o);
+
+ /**
* Returns the BSON representation of the entry for a shard server's config.chunks.<epoch>
* collection.
*/
BSONObj toShardBSON() const;
- std::string getName() const;
- void setName(const OID& id);
-
/**
- * TODO SERVER-42299: Remove this method once _id is stored as an OID on disk instead of as a
- * string.
+ * Returns the _id that would be used for this chunk in binaries 4.2 and earlier.
+ *
+ * TODO SERVER-44034: Remove when 4.4 becomes last-stable.
*/
- void setName(const std::string& id);
+ std::string getLegacyName() const;
+
+ const OID& getName() const;
+ void setName(const OID& id);
/**
* Getters and setters.
@@ -303,7 +321,7 @@ private:
// Convention: (M)andatory, (O)ptional, (S)pecial; (C)onfig, (S)hard.
// (M)(C) auto-generated object id
- boost::optional<std::string> _id;
+ boost::optional<OID> _id;
// (O)(C) collection this chunk is in
boost::optional<NamespaceString> _nss;
// (M)(C)(S) first key of the range, inclusive
diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp
index 3c424f815cc..aeabb3749c5 100644
--- a/src/mongo/s/catalog/type_chunk_test.cpp
+++ b/src/mongo/s/catalog/type_chunk_test.cpp
@@ -50,7 +50,7 @@ TEST(ChunkType, MissingConfigRequiredFields) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj objModNS =
- BSON(ChunkType::name(OID::gen().toString())
+ BSON(ChunkType::name(OID::gen())
<< ChunkType::min(BSON("a" << 10 << "b" << 10)) << ChunkType::max(BSON("a" << 20))
<< "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch"
<< chunkVersion.epoch() << ChunkType::shard("shard0001"));
@@ -58,14 +58,14 @@ TEST(ChunkType, MissingConfigRequiredFields) {
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModKeys =
- BSON(ChunkType::name(OID::gen().toString())
+ BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << "lastmod" << Timestamp(chunkVersion.toLong())
<< "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
chunkRes = ChunkType::fromConfigBSON(objModKeys);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModShard =
- BSON(ChunkType::name(OID::gen().toString())
+ BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
<< ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
<< "lastmodEpoch" << chunkVersion.epoch());
@@ -73,7 +73,7 @@ TEST(ChunkType, MissingConfigRequiredFields) {
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModVersion =
- BSON(ChunkType::name(OID::gen().toString())
+ BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
<< ChunkType::max(BSON("a" << 20)) << ChunkType::shard("shard0001"));
chunkRes = ChunkType::fromConfigBSON(objModVersion);
@@ -130,7 +130,7 @@ TEST(ChunkType, ToFromShardBSON) {
TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj obj =
- BSON(ChunkType::name(OID::gen().toString())
+ BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
<< ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
<< "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
@@ -142,7 +142,7 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj obj =
- BSON(ChunkType::name(OID::gen().toString())
+ BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10))
<< ChunkType::max(BSON("b" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
<< "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
@@ -154,7 +154,7 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
TEST(ChunkType, MinToMaxNotAscending) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj obj =
- BSON(ChunkType::name(OID::gen().toString())
+ BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 20))
<< ChunkType::max(BSON("a" << 10)) << "lastmod" << Timestamp(chunkVersion.toLong())
<< "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
@@ -163,7 +163,7 @@ TEST(ChunkType, MinToMaxNotAscending) {
}
TEST(ChunkType, ToFromConfigBSON) {
- const std::string chunkID = OID::gen().toString();
+ const auto chunkID = OID::gen();
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj obj =
BSON(ChunkType::name(chunkID)
@@ -267,5 +267,69 @@ TEST(ChunkRange, MinGreaterThanMaxShouldError) {
ASSERT_EQ(ErrorCodes::FailedToParse, parseStatus.getStatus());
}
+// TODO SERVER-44034: Delete this test.
+TEST(ChunkType, FromConfigBSONParsesIgnores42_idFormat) {
+ NamespaceString nss("test.mycol");
+ auto minBound = BSON("a" << 10);
+ ChunkVersion chunkVersion(1, 2, OID::gen());
+
+ BSONObj obj = BSON("_id" << ChunkType::genLegacyID(nss, minBound) << ChunkType::ns(nss.ns())
+ << ChunkType::min(minBound) << ChunkType::max(BSON("a" << 20))
+ << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch"
+ << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+
+ // Parsing will succeed despite the string _id.
+ auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(obj));
+
+ // Attempting to get the 4.4 _id will throw since it hasn't been set.
+ ASSERT_THROWS_CODE(chunk.getName(), AssertionException, 51264);
+}
+
+// TODO SERVER-44034: Delete this test.
+TEST(ChunkType, LegacyNameBSONFieldIs_id) {
+ auto obj = BSON(ChunkType::legacyName("dummyId"));
+ ASSERT_BSONOBJ_EQ(obj,
+ BSON("_id"
+ << "dummyId"));
+}
+
+// TODO SERVER-44034: Delete this test.
+TEST(ChunkType, GetLegacyNameAndGenLegacyIDReturn42_idFormat) {
+ NamespaceString nss("test.mycol");
+ auto minBound = BSON("a" << 10);
+ ChunkVersion chunkVersion(1, 2, OID::gen());
+
+ BSONObj obj =
+ BSON(ChunkType::name(OID::gen())
+ << ChunkType::ns(nss.ns()) << ChunkType::min(minBound)
+ << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(obj));
+
+ ASSERT_EQ("test.mycol-a_10", ChunkType::genLegacyID(nss, minBound));
+ ASSERT_EQ(ChunkType::genLegacyID(nss, minBound), chunk.getLegacyName());
+}
+
+// TODO SERVER-44034: Delete this test.
+TEST(ChunkType, ToConfigBSONLegacyIDUses42_idFormat) {
+ NamespaceString nss("test.mycol");
+ auto minBound = BSON("a" << 10);
+ ChunkVersion chunkVersion(1, 2, OID::gen());
+
+ BSONObj obj =
+ BSON(ChunkType::name(OID::gen())
+ << ChunkType::ns(nss.ns()) << ChunkType::min(minBound)
+ << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(obj));
+
+ ASSERT_BSONOBJ_EQ(chunk.toConfigBSONLegacyID(),
+ BSON("_id" << ChunkType::genLegacyID(nss, minBound)
+ << ChunkType::ns("test.mycol") << ChunkType::min(minBound)
+ << ChunkType::max(BSON("a" << 20)) << ChunkType::shard("shard0001")
+ << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch"
+ << chunkVersion.epoch()));
+}
+
} // namespace
} // namespace mongo
diff --git a/src/mongo/s/catalog/type_tags.cpp b/src/mongo/s/catalog/type_tags.cpp
index 4f949c5b754..9f21a2a5dfa 100644
--- a/src/mongo/s/catalog/type_tags.cpp
+++ b/src/mongo/s/catalog/type_tags.cpp
@@ -147,6 +147,13 @@ BSONObj TagsType::toBSON() const {
return builder.obj();
}
+BSONObj TagsType::toBSONLegacyID() const {
+ // Note that toBSON() doesn't append an _id.
+ BSONObjBuilder bob(toBSON());
+ bob.append("_id", BSON(TagsType::ns(_ns->ns()) << TagsType::min(*_minKey)));
+ return bob.obj();
+}
+
std::string TagsType::toString() const {
return toBSON().toString();
}
diff --git a/src/mongo/s/catalog/type_tags.h b/src/mongo/s/catalog/type_tags.h
index d69d9eeb057..6e1f3e33dc1 100644
--- a/src/mongo/s/catalog/type_tags.h
+++ b/src/mongo/s/catalog/type_tags.h
@@ -81,6 +81,14 @@ public:
BSONObj toBSON() const;
/**
+ * Returns the BSON representation of the tag with an _id in the format expected by binaries 4.2
+ * and below.
+ *
+ * TODO SERVER-44034: Remove this method.
+ */
+ BSONObj toBSONLegacyID() const;
+
+ /**
* Returns a std::string representation of the current internal state.
*/
std::string toString() const;
diff --git a/src/mongo/s/catalog/type_tags_test.cpp b/src/mongo/s/catalog/type_tags_test.cpp
index 1cd8ed6d276..e78f6eedeea 100644
--- a/src/mongo/s/catalog/type_tags_test.cpp
+++ b/src/mongo/s/catalog/type_tags_test.cpp
@@ -129,4 +129,18 @@ TEST(TagsType, BadType) {
ASSERT_EQUALS(ErrorCodes::NoSuchKey, status.getStatus());
}
+TEST(TagsType, ToBSONLegacyID) {
+ BSONObj obj =
+ BSON(TagsType::ns("test.mycol") << TagsType::tag("tag") << TagsType::min(BSON("a" << 10))
+ << TagsType::max(BSON("a" << 20)));
+
+ auto tag = uassertStatusOK(TagsType::fromBSON(obj));
+
+ ASSERT_BSONOBJ_EQ(tag.toBSONLegacyID(),
+ BSON(TagsType::ns("test.mycol")
+ << TagsType::tag("tag") << TagsType::min(BSON("a" << 10))
+ << TagsType::max(BSON("a" << 20)) << "_id"
+ << BSON(TagsType::ns("test.mycol") << TagsType::min(BSON("a" << 10)))));
+}
+
} // namespace