summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorJack Mulrow <jack.mulrow@mongodb.com>2019-10-15 21:10:56 +0000
committerevergreen <evergreen@mongodb.com>2019-10-15 21:10:56 +0000
commitc2af35bc34c6ee187f05246cd8eefcac42fc01c9 (patch)
tree8dcad8384d07644cc39ea406a74e51fcd42fbda3 /jstests
parentf4c495f0848ba8bf6200b966c3bb4235fee7a7d9 (diff)
downloadmongo-c2af35bc34c6ee187f05246cd8eefcac42fc01c9.tar.gz
SERVER-42299 Upgrade/downgrade for config.chunks and config.tags
Diffstat (limited to 'jstests')
-rw-r--r--jstests/multiVersion/config_chunks_tags_set_fcv.js142
-rw-r--r--jstests/multiVersion/config_chunks_tags_upgrade_downgrade_cluster.js124
-rw-r--r--jstests/multiVersion/libs/config_chunks_tags_shared.js236
-rw-r--r--jstests/sharding/libs/sharded_transactions_helpers.js4
-rw-r--r--jstests/sharding/mongos_get_shard_version.js9
5 files changed, 513 insertions, 2 deletions
diff --git a/jstests/multiVersion/config_chunks_tags_set_fcv.js b/jstests/multiVersion/config_chunks_tags_set_fcv.js
new file mode 100644
index 00000000000..cc58a6c8faf
--- /dev/null
+++ b/jstests/multiVersion/config_chunks_tags_set_fcv.js
@@ -0,0 +1,142 @@
+/**
+ * Tests that config.chunks and config.tags documents are correctly modified on FCV
+ * upgrade/downgrade.
+ */
+(function() {
+"use strict";
+
+load("jstests/libs/parallelTester.js"); // for Thread.
+load("jstests/multiVersion/libs/config_chunks_tags_shared.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+// Assumes ns has the following chunk layout: [-inf, -50), [-50, 0) on shard0 and [0, inf) on
+// shard 1.
+function verifyChunkOperationsFailDuringSetFCV(st, ns) {
+ assert.commandFailedWithCode(st.s.adminCommand({split: ns, middle: {_id: 50}}),
+ ErrorCodes.ConflictingOperationInProgress);
+ verifyChunkDistribution(st, ns, [2, 1]);
+
+ assert.commandFailedWithCode(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard0.shardName}),
+ ErrorCodes.ConflictingOperationInProgress);
+ verifyChunkDistribution(st, ns, [2, 1]);
+
+ assert.commandFailedWithCode(
+ st.s.adminCommand({mergeChunks: ns, bounds: [{_id: MinKey}, {_id: 0}]}),
+ ErrorCodes.ConflictingOperationInProgress);
+ verifyChunkDistribution(st, ns, [2, 1]);
+}
+
+// Assumes shard0 is in zone0 which contains [-inf, 0) and is not in zone1.
+function verifyZoneOperationsSucceedDuringSetFCV(st, ns) {
+ assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: null}));
+ assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: "zone0"}));
+
+ assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard0.shardName, zone: "zone1"}));
+ assert.commandWorked(
+ st.s.adminCommand({removeShardFromZone: st.shard0.shardName, zone: "zone1"}));
+}
+
+const dbName = "test";
+const chunkNs = dbName + ".chunk_coll";
+const zoneNs = dbName + ".zone_coll";
+
+const st = new ShardingTest({shards: 2});
+const configPrimary = st.configRS.getPrimary();
+
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+
+setUpCollectionForChunksTesting(st, chunkNs);
+setUpCollectionForZoneTesting(st, zoneNs);
+
+//
+// Verify chunk and tag documents are updated by setFeatureCompatibilityVersion.
+//
+
+checkFCV(configPrimary.getDB("admin"), latestFCV);
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: true});
+
+jsTestLog("Downgrading FCV to last stable");
+assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+checkFCV(configPrimary.getDB("admin"), lastStableFCV);
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
+
+jsTestLog("Upgrading FCV to latest");
+assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+checkFCV(configPrimary.getDB("admin"), latestFCV);
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: true});
+
+//
+// Verify operations during setFeatureCompabitilityVersion use the correct format and that setFCV
+// blocks behind in-progress shard collections on shard servers.
+//
+
+function runInProgressSetFCVTest(st, {initialFCV, desiredFCV}) {
+ const pauseInSetFCVFailPointName = desiredFCV === lastStableFCV
+ ? "pauseBeforeDowngradingConfigMetadata"
+ : "pauseBeforeUpgradingConfigMetadata";
+
+ clearRawMongoProgramOutput();
+ checkFCV(configPrimary.getDB("admin"), initialFCV);
+
+ // Pause setFCV to test the in-progress states.
+ assert.commandWorked(configPrimary.adminCommand(
+ {configureFailPoint: pauseInSetFCVFailPointName, mode: "alwaysOn"}));
+
+ // Start and pause a shard collection, and verify that the setFCV blocks behind it.
+ const shardCollDuringSetFCV = new Thread((host, ns) => {
+ const mongosConn = new Mongo(host);
+ return mongosConn.adminCommand({shardCollection: ns, key: {_id: 1}});
+ }, st.s.host, dbName + ".setFCVTo" + desiredFCV);
+ assert.commandWorked(st.rs0.getPrimary().adminCommand(
+ {configureFailPoint: "pauseShardCollectionBeforeReturning", mode: "alwaysOn"}));
+ shardCollDuringSetFCV.start();
+ waitForFailpoint("Hit pauseShardCollectionBeforeReturning", 1 /* numTimes */);
+
+ // Assert setFCV can't hit the failpoint until the shard collection completes.
+ const changeFCV = new Thread((host, fcv) => {
+ const mongosConn = new Mongo(host);
+ return mongosConn.adminCommand({setFeatureCompatibilityVersion: fcv});
+ }, st.s.host, desiredFCV);
+ changeFCV.start();
+ assert.throws(() => {
+ waitForFailpoint("Hit " + pauseInSetFCVFailPointName, 1 /* numTimes */, 3000 /* timeout */);
+ });
+
+ // Unpause the shard collection and wait for setFCV to reach the failpoint.
+ assert.commandWorked(st.rs0.getPrimary().adminCommand(
+ {configureFailPoint: "pauseShardCollectionBeforeReturning", mode: "off"}));
+ shardCollDuringSetFCV.join();
+ waitForFailpoint("Hit " + pauseInSetFCVFailPointName, 1 /* numTimes */);
+
+ // Verify behavior while setFCV is in progress.
+ verifyChunkOperationsFailDuringSetFCV(st, chunkNs);
+ verifyZoneOperationsSucceedDuringSetFCV(st, zoneNs);
+ testCRUDOperations(st, chunkNs);
+ testCRUDOperations(st, zoneNs);
+
+ // A collection can still be sharded during setFCV and should write chunks correctly.
+ verifyInitialChunks(
+ st, dbName + ".newCollDuringFCV" + desiredFCV, {expectNewFormat: desiredFCV === latestFCV});
+
+ // Unset the setFCV failpoint and allow setFCV to finish.
+ assert.commandWorked(
+ configPrimary.adminCommand({configureFailPoint: pauseInSetFCVFailPointName, mode: "off"}));
+ changeFCV.join();
+ assert.commandWorked(changeFCV.returnData());
+ checkFCV(configPrimary.getDB("admin"), desiredFCV);
+
+ verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: desiredFCV === latestFCV});
+}
+
+runInProgressSetFCVTest(st, {initialFCV: latestFCV, desiredFCV: lastStableFCV});
+runInProgressSetFCVTest(st, {initialFCV: lastStableFCV, desiredFCV: latestFCV});
+
+st.stop();
+}());
diff --git a/jstests/multiVersion/config_chunks_tags_upgrade_downgrade_cluster.js b/jstests/multiVersion/config_chunks_tags_upgrade_downgrade_cluster.js
new file mode 100644
index 00000000000..75a024b2bf5
--- /dev/null
+++ b/jstests/multiVersion/config_chunks_tags_upgrade_downgrade_cluster.js
@@ -0,0 +1,124 @@
+/**
+ * Tests upgrading a cluster from last stable to the latest version and downgrading it back to last
+ * stable, verifying the behavior of chunk and zone operations throughout the process.
+ */
+
+// Checking UUID consistency uses cached connections, which are not valid across restarts or
+// stepdowns.
+TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
+
+(function() {
+"use strict";
+
+load("jstests/multiVersion/libs/config_chunks_tags_shared.js");
+load("jstests/multiVersion/libs/multi_cluster.js");
+load("jstests/multiVersion/libs/multi_rs.js");
+
+// Runs commands on the config server that will use its RSM to target both shard primaries until
+// they succeed.
+function waitForConfigServerShardRSMRetarget(st) {
+ assert.soonNoExcept(() => {
+ assert.commandWorked(st.s.getDB("unrelated_db").unrelated_coll.insert({x: 1}));
+ st.ensurePrimaryShard("unrelated_db", st.shard0.shardName);
+ st.ensurePrimaryShard("unrelated_db", st.shard1.shardName);
+ st.ensurePrimaryShard("unrelated_db", st.shard0.shardName);
+ assert.commandWorked(st.s.getDB("unrelated_db").dropDatabase());
+ return true;
+ });
+}
+
+const dbName = "test";
+const chunkNs = dbName + ".chunk_coll";
+const zoneNs = dbName + ".zone_coll";
+
+// Start a cluster with two shards at the last stable version and a sharding enabled db.
+const st = new ShardingTest({
+ shards: 2,
+ other: {
+ mongosOptions: {binVersion: "last-stable"},
+ configOptions: {binVersion: "last-stable"},
+ rsOptions: {binVersion: "last-stable"},
+ },
+ rs: {nodes: 3} // Use 3 node replica sets to allow binary changes with no downtime.
+});
+checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+
+// Set up sharded collections for targeted chunk and zone operation testing.
+setUpCollectionForChunksTesting(st, chunkNs);
+setUpCollectionForZoneTesting(st, zoneNs);
+
+// Set up another sharded collection on a different database to verify chunks and zones are updated
+// for every sharded collection.
+setUpExtraShardedCollections(st, "extra_db" /* dbName */);
+
+//
+// Upgrade back to the latest version.
+//
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
+
+jsTestLog("Upgrading config servers.");
+st.upgradeCluster("latest", {upgradeConfigs: true, upgradeMongos: false, upgradeShards: false});
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
+
+jsTestLog("Upgrading shard servers.");
+st.upgradeCluster("latest", {upgradeConfigs: false, upgradeMongos: false, upgradeShards: true});
+
+// Manually moving a chunk will use the config server's replica set monitor to target the primary of
+// the source shard. After upgrading the shard servers above, this RSM may be stale, so run
+// operations through the config server that will use the same RSM so it picks up the new primary.
+waitForConfigServerShardRSMRetarget(st);
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
+
+jsTestLog("Upgrading mongos servers.");
+st.upgradeCluster("latest", {upgradeConfigs: false, upgradeMongos: true, upgradeShards: false});
+checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
+
+jsTestLog("Upgrade feature compatibility version to latest");
+assert.commandWorked(st.s.getDB("admin").runCommand({setFeatureCompatibilityVersion: latestFCV}));
+checkFCV(st.configRS.getPrimary().getDB("admin"), latestFCV);
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: true});
+
+//
+// Downgrade back to the last stable version.
+//
+
+jsTestLog("Downgrade feature compatibility version to last stable");
+assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
+
+jsTestLog("Downgrading mongos servers.");
+st.upgradeCluster("last-stable",
+ {upgradeConfigs: false, upgradeMongos: true, upgradeShards: false});
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
+
+jsTestLog("Downgrading shard servers.");
+st.upgradeCluster("last-stable",
+ {upgradeConfigs: false, upgradeMongos: false, upgradeShards: true});
+
+// Manually moving a chunk will use the config server's replica set monitor to target the primary of
+// the source shard. After upgrading the shard servers above, this RSM may be stale, so run
+// operations through the config server that will use the same RSM so it picks up the new primary.
+waitForConfigServerShardRSMRetarget(st);
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
+
+jsTestLog("Downgrading config servers.");
+st.upgradeCluster("last-stable",
+ {upgradeConfigs: true, upgradeMongos: false, upgradeShards: false});
+checkFCV(st.configRS.getPrimary().getDB("admin"), lastStableFCV);
+
+verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat: false});
+
+st.stop();
+})();
diff --git a/jstests/multiVersion/libs/config_chunks_tags_shared.js b/jstests/multiVersion/libs/config_chunks_tags_shared.js
new file mode 100644
index 00000000000..a876bb0d293
--- /dev/null
+++ b/jstests/multiVersion/libs/config_chunks_tags_shared.js
@@ -0,0 +1,236 @@
+/**
+ * Functions and variables shared between multiversion/config_chunks_tags_upgrade_cluster.js and
+ * multiversion/config_chunks_tags_downgrade_cluster.js.
+ */
+
+// Sets up a collection with chunks in the format expected by the testChunkOperations() helper.
+function setUpCollectionForChunksTesting(st, ns) {
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: -50}}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
+}
+
+// Sets up zones and chunks for a collection to work with the testZoneOperations() helper.
+function setUpCollectionForZoneTesting(st, ns) {
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard0.shardName, zone: "zone0"}));
+ assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: "zone1"}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: "zone0"}));
+ assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: ns, min: {_id: 0}, max: {_id: 50}, zone: "zone1"}));
+}
+
+function setUpExtraShardedCollections(st, dbName) {
+ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+ st.ensurePrimaryShard(dbName, st.shard1.shardName);
+
+ // Set up one zone with half the key range and two chunks split at {_id: 0}.
+ const ns = dbName + ".extra_coll";
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(
+ st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: "extra_zone0"}));
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: "extra_zone0"}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard0.shardName}));
+
+ // Set up a sharded collection with a hashed shard key.
+ const hashedNs = dbName + ".extra_coll_hashed";
+ assert.commandWorked(st.s.adminCommand({shardCollection: hashedNs, key: {_id: "hashed"}}));
+}
+
+function verifyChunksAndTags(st, dbName, chunkNs, zoneNs, {expectNewFormat}) {
+ verifyChunks(st, {expectNewFormat});
+ verifyTags(st, {expectNewFormat});
+
+ testChunkOperations(st, chunkNs);
+ testZoneOperations(st, zoneNs);
+ verifyInitialChunks(st, dbName, {expectNewFormat});
+
+ verifyChunks(st, {expectNewFormat});
+ verifyTags(st, {expectNewFormat});
+}
+
+function getChunks(st, ns) {
+ if (ns) {
+ return st.s.getDB("config").chunks.find({ns}).sort({min: 1}).toArray();
+ }
+ return st.s.getDB("config").chunks.find().sort({min: 1}).toArray();
+}
+
+// Asserts all chunk documents have the expected format.
+function verifyChunks(st, {ns, expectNewFormat}) {
+ const chunks = getChunks(st, ns);
+ assert.lte(1, chunks.length, tojson(chunks));
+ chunks.forEach((chunk) => {
+ if (expectNewFormat) {
+ assert(chunk._id.isObjectId, tojson(chunk));
+ assert.neq("string", typeof chunk._id, tojson(chunk));
+ } else {
+ assert(!chunk._id.isObjectId, tojson(chunk));
+ assert.eq("string", typeof chunk._id, tojson(chunk));
+ }
+
+ let expectedChunkFields =
+ ["_id", "ns", "min", "max", "shard", "lastmod", "lastmodEpoch", "history"];
+
+ // Jumbo is an optional field.
+ if (chunk.hasOwnProperty("jumbo")) {
+ expectedChunkFields = expectedChunkFields.concat("jumbo");
+ }
+
+ assert.eq(Object.keys(chunk).length, expectedChunkFields.length, tojson(chunk));
+ assert.hasFields(chunk, expectedChunkFields);
+ });
+}
+
+function getTags(st) {
+ return st.s.getDB("config").tags.find().sort({min: 1}).toArray();
+}
+
+// Asserts all tag documents have the expected format.
+function verifyTags(st, {expectNewFormat}) {
+ const tags = getTags(st);
+ assert.lt(1, tags.length, tojson(tags));
+ tags.forEach((tag) => {
+ if (expectNewFormat) {
+ assert(tag._id.isObjectId, tojson(tag));
+ // ObjectId returns "object" from typeof...
+ // assert.neq("object", typeof tag._id, tojson(tag));
+ } else {
+ assert(!tag._id.isObjectId, tojson(tag));
+ assert.eq("object", typeof tag._id, tojson(tag));
+ }
+
+ const expectedTagFields = ["_id", "ns", "tag", "min", "max"];
+ assert.eq(Object.keys(tag).length, expectedTagFields.length, tojson(tag));
+ assert.hasFields(tag, expectedTagFields);
+ });
+}
+
+// Runs basic crud operations against the given namespace.
+function testCRUDOperations(st, ns) {
+ const coll = st.s.getCollection(ns);
+ assert.eq(0, coll.find().itcount());
+
+ assert.commandWorked(coll.insert({_id: -5}));
+ assert.commandWorked(coll.insert({_id: 5}));
+
+ assert.commandWorked(coll.update({_id: -5}, {$set: {updated: true}}));
+ assert.commandWorked(coll.update({_id: 5}, {$set: {updated: true}}));
+
+ assert.docEq({_id: -5, updated: true}, coll.findOne({_id: -5}));
+ assert.docEq({_id: 5, updated: true}, coll.findOne({_id: 5}));
+
+ assert.commandWorked(coll.remove({_id: -5}, true /* justOne */));
+ assert.commandWorked(coll.remove({_id: 5}, true /* justOne */));
+ assert.eq(0, coll.find().itcount());
+}
+
+// Helper to verify chunks are owned by the expected shards.
+function verifyChunkDistribution(st, ns, expectedChunkDistribution) {
+ for (let i = 0; i < expectedChunkDistribution.length; i++) {
+ assert.eq(expectedChunkDistribution[i],
+ st.s.getDB("config").chunks.count({ns: ns, shard: st["shard" + i].shardName}),
+ "unexpected number of chunks on shard " + i);
+ }
+}
+
+// Assumes ns has the following chunk layout: [-inf, -50), [-50, 0) on shard0 and [0, inf) on
+// shard 1.
+function testChunkOperations(st, ns) {
+ verifyChunkDistribution(st, ns, [2, 1]);
+
+ // Split chunk should work.
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 50}}));
+ verifyChunkDistribution(st, ns, [2, 2]);
+
+ testCRUDOperations(st, ns);
+
+ // Move chunk should work with a control chunk.
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard0.shardName}));
+ verifyChunkDistribution(st, ns, [3, 1]);
+
+ testCRUDOperations(st, ns);
+
+ // Move chunk should work without a control chunk.
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 50}, to: st.shard0.shardName}));
+ verifyChunkDistribution(st, ns, [4, 0]);
+
+ testCRUDOperations(st, ns);
+
+ // Merge chunk should work.
+ assert.commandWorked(st.s.adminCommand({mergeChunks: ns, bounds: [{_id: -50}, {_id: 50}]}));
+ verifyChunkDistribution(st, ns, [3, 0]);
+
+ testCRUDOperations(st, ns);
+
+ // Reset the chunks to their original state.
+ assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+ verifyChunkDistribution(st, ns, [4, 0]);
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 50}, to: st.shard1.shardName}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
+ verifyChunkDistribution(st, ns, [2, 2]);
+ assert.commandWorked(st.s.adminCommand({mergeChunks: ns, bounds: [{_id: 0}, {_id: MaxKey}]}));
+ verifyChunkDistribution(st, ns, [2, 1]);
+
+ testCRUDOperations(st, ns);
+}
+
+// Assumes ns has two chunks: [-inf, 0), [0, inf), on shards 0 and 1, respectively and that shard0
+// is in zone0 which contains [-inf, 0) and shard1 is in zone1 which contains [0, 50).
+function testZoneOperations(st, ns) {
+ // Verify conflicting zones can't be created.
+ assert.commandFailedWithCode(
+ st.s.adminCommand({updateZoneKeyRange: ns, min: {_id: -10}, max: {_id: 0}, zone: "zone1"}),
+ ErrorCodes.RangeOverlapConflict);
+
+ // Verify zone boundaries are still enforced.
+ assert.commandFailedWithCode(
+ st.s.adminCommand({moveChunk: ns, find: {_id: -1}, to: st.shard1.shardName}),
+ ErrorCodes.IllegalOperation);
+
+ //
+ // Verify zone ranges can be updated.
+ //
+
+ assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: null}));
+ assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: "zone1"}));
+
+ // Now the chunk can be moved to shard1, which is in zone1.
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
+
+ // Reset the chunk and zones.
+ assert.commandWorked(
+ st.s.adminCommand({updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: null}));
+ assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: ns, min: {_id: MinKey}, max: {_id: 0}, zone: "zone0"}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard0.shardName}));
+}
+
+let uniqueCollCounter = 0;
+
+// Assumes ns is a non-existent namespace on a database that is sharding enabled.
+function verifyInitialChunks(st, dbName, {expectNewFormat}) {
+ const ns = dbName + ".unique_coll" + uniqueCollCounter++;
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+
+ // Assert the chunks created for the new namespace are in the correct format.
+ verifyChunks(st, {ns, expectNewFormat});
+
+ // Clean up the new collection.
+ assert.commandWorked(st.s.adminCommand({drop: ns}));
+}
diff --git a/jstests/sharding/libs/sharded_transactions_helpers.js b/jstests/sharding/libs/sharded_transactions_helpers.js
index a25eef6e7f8..96dc193cd0c 100644
--- a/jstests/sharding/libs/sharded_transactions_helpers.js
+++ b/jstests/sharding/libs/sharded_transactions_helpers.js
@@ -95,11 +95,11 @@ function assertNoSuchTransactionOnConn(conn, lsid, txnNumber) {
", txnNumber: " + tojson(txnNumber) + ", connection: " + tojson(conn));
}
-function waitForFailpoint(hitFailpointStr, numTimes) {
+function waitForFailpoint(hitFailpointStr, numTimes, timeout) {
assert.soon(function() {
const re = new RegExp(hitFailpointStr, 'g' /* find all occurrences */);
return (rawMongoProgramOutput().match(re) || []).length == numTimes;
- }, 'Failed to find "' + hitFailpointStr + '" logged ' + numTimes + ' times');
+ }, 'Failed to find "' + hitFailpointStr + '" logged ' + numTimes + ' times', timeout);
}
// Enables the transaction router to retry on stale version (db or shard version) and snapshot
diff --git a/jstests/sharding/mongos_get_shard_version.js b/jstests/sharding/mongos_get_shard_version.js
index ecd383772e4..a22bdb2d03e 100644
--- a/jstests/sharding/mongos_get_shard_version.js
+++ b/jstests/sharding/mongos_get_shard_version.js
@@ -38,6 +38,15 @@ assert.eq(res.version.t, 1);
assert.eq(res.version.i, 0);
if (jsTestOptions().mongosBinVersion == "last-stable") {
assert.eq(undefined, res.chunks);
+
+ // The _id format for config.chunks documents was changed in 4.4, so in the mixed version suite
+ // the below size arithmetic does not hold and splitting chunks will fail with BSONObjectTooBig.
+ // A mongos with the last-stable binary does not support returning chunks in getShardVersion, so
+ // we can just return early.
+ //
+ // TODO SERVER-44034: Remove this branch when 4.4 becomes last stable.
+ st.stop();
+ return;
} else {
assert.eq(1, res.chunks.length);
assert.eq(min, res.chunks[0][0]);