summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Saltz <matthew.saltz@mongodb.com>2020-07-29 19:57:51 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-08-03 15:57:52 +0000
commit96006491b33820538a5dd850fd0df9fdb6aaa1c9 (patch)
treeb0fa2a12ee9914da4b08f249f3ee46ca41446a02
parentdbd1e4182a6a2ba5efe7d9d8ae4b04afc6aa03bc (diff)
downloadmongo-96006491b33820538a5dd850fd0df9fdb6aaa1c9.tar.gz
SERVER-49233 Introduce a flag to toggle the logic for bumping collection's major version during split
-rw-r--r--jstests/sharding/major_version_check.js149
-rw-r--r--jstests/sharding/migration_failure.js192
-rw-r--r--jstests/sharding/zero_shard_version.js457
-rw-r--r--src/mongo/db/s/SConscript1
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp14
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp234
-rw-r--r--src/mongo/db/s/sharding_runtime_d_params.idl8
7 files changed, 789 insertions, 266 deletions
diff --git a/jstests/sharding/major_version_check.js b/jstests/sharding/major_version_check.js
index baa01aa7966..0a55fdc8702 100644
--- a/jstests/sharding/major_version_check.js
+++ b/jstests/sharding/major_version_check.js
@@ -4,65 +4,126 @@
(function() {
'use strict';
-var st = new ShardingTest({shards: 1, mongos: 2});
+// Test with default value of incrementChunkMajorVersionOnChunkSplits, which is
+// false>
+(() => {
+ var st = new ShardingTest({shards: 1, mongos: 2});
-var mongos = st.s0;
-var staleMongos = st.s1;
-var admin = mongos.getDB("admin");
-var config = mongos.getDB("config");
-var coll = mongos.getCollection("foo.bar");
+ var mongos = st.s0;
+ var staleMongos = st.s1;
+ var admin = mongos.getDB("admin");
+ var config = mongos.getDB("config");
+ var coll = mongos.getCollection("foo.bar");
-// Shard collection
-assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
-assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+ // Shard collection
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-// Make sure our stale mongos is up-to-date with no splits
-staleMongos.getCollection(coll + "").findOne();
+ // Make sure our stale mongos is up-to-date with no splits
+ staleMongos.getCollection(coll + "").findOne();
-// Run one split
-assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+ // Run one split
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
-// Make sure our stale mongos is not up-to-date with the split
-printjson(admin.runCommand({getShardVersion: coll + ""}));
-printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+ // Make sure our stale mongos is not up-to-date with the split
+ printjson(admin.runCommand({getShardVersion: coll + ""}));
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
-// Compare strings b/c timestamp comparison is a bit weird
-assert.eq(Timestamp(2, 2), admin.runCommand({getShardVersion: coll + ""}).version);
-assert.eq(Timestamp(1, 0),
- staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+ // Compare strings b/c timestamp comparison is a bit weird
+ assert.eq(Timestamp(1, 2), admin.runCommand({getShardVersion: coll + ""}).version);
+ assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
-// See if our stale mongos is required to catch up to run a findOne on an existing connection
-staleMongos.getCollection(coll + "").findOne();
+ // See if our stale mongos is required to catch up to run a findOne on an existing connection
+ staleMongos.getCollection(coll + "").findOne();
-printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
-assert.eq(Timestamp(1, 0),
- staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+ assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
-// See if our stale mongos is required to catch up to run a findOne on a new connection
-staleMongos = new Mongo(staleMongos.host);
-staleMongos.getCollection(coll + "").findOne();
+ // See if our stale mongos is required to catch up to run a findOne on a new connection
+ staleMongos = new Mongo(staleMongos.host);
+ staleMongos.getCollection(coll + "").findOne();
-printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
-assert.eq(Timestamp(1, 0),
- staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+ assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
-// Run another split on the original chunk, which does not exist anymore (but the stale mongos
-// thinks it exists). This should fail and cause a refresh on the shard, updating its shard
-// version.
-assert.commandFailed(staleMongos.getDB("admin").runCommand(
- {split: coll + "", bounds: [{_id: MinKey}, {_id: MaxKey}]}));
+ st.stop();
+})();
+
+// Test with incrementChunkMajorVersionOnChunkSplits = true and FCV last
+// stable.
+(() => {
+ load("jstests/libs/feature_compatibility_version.js");
+ var st = new ShardingTest({
+ shards: 1,
+ mongos: 2,
+ other: {configOptions: {setParameter: {incrementChunkMajorVersionOnChunkSplits: true}}}
+ });
+ assert.commandWorked(
+ st.configRS.getPrimary().adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+
+ var mongos = st.s0;
+ var staleMongos = st.s1;
+ var admin = mongos.getDB("admin");
+ var config = mongos.getDB("config");
+ var coll = mongos.getCollection("foo.bar");
+
+ // Shard collection
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+
+ // Make sure our stale mongos is up-to-date with no splits
+ staleMongos.getCollection(coll + "").findOne();
+
+ // Run one split
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+
+ // Make sure our stale mongos is not up-to-date with the split
+ printjson(admin.runCommand({getShardVersion: coll + ""}));
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+
+ // Compare strings b/c timestamp comparison is a bit weird
+ assert.eq(Timestamp(2, 2), admin.runCommand({getShardVersion: coll + ""}).version);
+ assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
-// This findOne will cause a refresh on the router since the shard version has now been
-// increased.
-staleMongos.getCollection(coll + "").findOne();
+ // See if our stale mongos is required to catch up to run a findOne on an existing connection
+ staleMongos.getCollection(coll + "").findOne();
-printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
-// The previously stale mongos should now be up-to-date.
-assert.eq(Timestamp(2, 2),
- staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+ assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
-st.stop();
+ // See if our stale mongos is required to catch up to run a findOne on a new connection
+ staleMongos = new Mongo(staleMongos.host);
+ staleMongos.getCollection(coll + "").findOne();
+
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+
+ assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+
+ // Run another split on the original chunk, which does not exist anymore (but the stale mongos
+ // thinks it exists). This should fail and cause a refresh on the shard, updating its shard
+ // version.
+ assert.commandFailed(staleMongos.getDB("admin").runCommand(
+ {split: coll + "", bounds: [{_id: MinKey}, {_id: MaxKey}]}));
+
+ // This findOne will cause a refresh on the router since the shard version has now been
+ // increased.
+ staleMongos.getCollection(coll + "").findOne();
+
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+
+ // The previously stale mongos should now be up-to-date.
+ assert.eq(Timestamp(2, 2),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+
+ st.stop();
+})();
})();
diff --git a/jstests/sharding/migration_failure.js b/jstests/sharding/migration_failure.js
index 7f05dd1f9bb..7e3fbff0f05 100644
--- a/jstests/sharding/migration_failure.js
+++ b/jstests/sharding/migration_failure.js
@@ -7,88 +7,116 @@
(function() {
'use strict';
-var st = new ShardingTest({shards: 2, mongos: 1});
+load("jstests/libs/feature_compatibility_version.js");
+
+function runTest(shouldIncrementChunkMajorVersionOnChunkSplits) {
+ var st = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ other: {
+ configOptions: {
+ setParameter: {
+ incrementChunkMajorVersionOnChunkSplits:
+ shouldIncrementChunkMajorVersionOnChunkSplits
+ }
+ }
+ }
+ });
+
+ if (shouldIncrementChunkMajorVersionOnChunkSplits) {
+ assert.commandWorked(
+ st.configRS.getPrimary().adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+ }
+
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var coll = mongos.getCollection("foo.bar");
+
+ assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+ printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName}));
+ assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
+ assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
+
+ st.printShardingStatus();
+
+ jsTest.log("Testing failed migrations...");
+
+ var oldVersion = null;
+ var newVersion = null;
+
+ // failMigrationCommit -- this creates an error that aborts the migration before the commit
+ // migration command is sent.
+ assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'failMigrationCommit', mode: 'alwaysOn'}));
+
+ if (shouldIncrementChunkMajorVersionOnChunkSplits) {
+ // The split command above bumps the shard version, and this is obtained by the router via a
+ // refresh at the end of the command, but the shard does not know about it yet. This find
+ // will cause the shard to refresh so that this next check for 'oldVersion' sees the most
+ // recent version prior to the migration.
+ coll.findOne();
+ }
+
+ oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+
+ assert.commandFailed(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName}));
+
+ newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+
+ assert.eq(oldVersion.t,
+ newVersion.t,
+ "The shard version major value should not change after a failed migration");
+ // Split does not cause a shard routing table refresh, but the moveChunk attempt will.
+ assert.eq(2,
+ newVersion.i,
+ "The shard routing table should refresh on a failed migration and show the split");
+
+ assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'failMigrationCommit', mode: 'off'}));
+
+ // migrationCommitNetworkError -- mimic migration commit command returning a network error,
+ // whereupon the config server is queried to determine that this commit was successful.
+ assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'migrationCommitNetworkError', mode: 'alwaysOn'}));
+
+ // Run a migration where there will still be chunks in the collection remaining on the shard
+ // afterwards. This will cause the collection's shardVersion to be bumped higher.
+ oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+
+ assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 1}, to: st.shard1.shardName}));
+
+ newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+
+ assert.lt(
+ oldVersion.t, newVersion.t, "The major value in the shard version should have increased");
+ assert.eq(1, newVersion.i, "The minor value in the shard version should be 1");
+
+ // Run a migration to move off the shard's last chunk in the collection. The collection's
+ // shardVersion will be reset.
+ oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+
+ assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: st.shard1.shardName}));
+
+ newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+
+ assert.gt(oldVersion.t,
+ newVersion.t,
+ "The version prior to the migration should be greater than the reset value");
+
+ assert.eq(
+ 0, newVersion.t, "The shard version should have reset, but the major value is not zero");
+ assert.eq(
+ 0, newVersion.i, "The shard version should have reset, but the minor value is not zero");
+
+ assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'migrationCommitNetworkError', mode: 'off'}));
-var mongos = st.s0;
-var admin = mongos.getDB("admin");
-var coll = mongos.getCollection("foo.bar");
+ st.stop();
+}
-assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
-printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName}));
-assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
-assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
-
-st.printShardingStatus();
-
-jsTest.log("Testing failed migrations...");
-
-var oldVersion = null;
-var newVersion = null;
-
-// failMigrationCommit -- this creates an error that aborts the migration before the commit
-// migration command is sent.
-assert.commandWorked(st.shard0.getDB("admin").runCommand(
- {configureFailPoint: 'failMigrationCommit', mode: 'alwaysOn'}));
-
-// The split command above bumps the shard version, and this is obtained by the router via a
-// refresh at the end of the command, but the shard does not know about it yet. This find will
-// cause the shard to refresh so that this next check for 'oldVersion' sees the most recent
-// version prior to the migration.
-coll.findOne();
-
-oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
-
-assert.commandFailed(
- admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName}));
-
-newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
-
-assert.eq(oldVersion.t,
- newVersion.t,
- "The shard version major value should not change after a failed migration");
-// Split does not cause a shard routing table refresh, but the moveChunk attempt will.
-assert.eq(2,
- newVersion.i,
- "The shard routing table should refresh on a failed migration and show the split");
-
-assert.commandWorked(
- st.shard0.getDB("admin").runCommand({configureFailPoint: 'failMigrationCommit', mode: 'off'}));
-
-// migrationCommitNetworkError -- mimic migration commit command returning a network error,
-// whereupon the config server is queried to determine that this commit was successful.
-assert.commandWorked(st.shard0.getDB("admin").runCommand(
- {configureFailPoint: 'migrationCommitNetworkError', mode: 'alwaysOn'}));
-
-// Run a migration where there will still be chunks in the collection remaining on the shard
-// afterwards. This will cause the collection's shardVersion to be bumped higher.
-oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
-
-assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: 1}, to: st.shard1.shardName}));
-
-newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
-
-assert.lt(oldVersion.t, newVersion.t, "The major value in the shard version should have increased");
-assert.eq(1, newVersion.i, "The minor value in the shard version should be 1");
-
-// Run a migration to move off the shard's last chunk in the collection. The collection's
-// shardVersion will be reset.
-oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
-
-assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: st.shard1.shardName}));
-
-newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
-
-assert.gt(oldVersion.t,
- newVersion.t,
- "The version prior to the migration should be greater than the reset value");
-
-assert.eq(0, newVersion.t, "The shard version should have reset, but the major value is not zero");
-assert.eq(0, newVersion.i, "The shard version should have reset, but the minor value is not zero");
-
-assert.commandWorked(st.shard0.getDB("admin").runCommand(
- {configureFailPoint: 'migrationCommitNetworkError', mode: 'off'}));
-
-st.stop();
+runTest(true);
+runTest(false);
})();
diff --git a/jstests/sharding/zero_shard_version.js b/jstests/sharding/zero_shard_version.js
index 209eff8e49c..263a1854744 100644
--- a/jstests/sharding/zero_shard_version.js
+++ b/jstests/sharding/zero_shard_version.js
@@ -5,174 +5,365 @@
(function() {
'use strict';
-var st = new ShardingTest({shards: 2, mongos: 4});
+// Test with default value of incrementChunkMajorVersionOnChunkSplits, which is
+// false>
+(() => {
+ var st = new ShardingTest({shards: 2, mongos: 4});
+
+ var testDB_s0 = st.s.getDB('test');
+ assert.commandWorked(testDB_s0.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', st.shard1.shardName);
+ assert.commandWorked(testDB_s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+
+ var checkShardMajorVersion = function(conn, expectedVersion) {
+ var shardVersionInfo = conn.adminCommand({getShardVersion: 'test.user'});
+ assert.eq(expectedVersion, shardVersionInfo.global.getTime());
+ };
-var testDB_s0 = st.s.getDB('test');
-assert.commandWorked(testDB_s0.adminCommand({enableSharding: 'test'}));
-st.ensurePrimaryShard('test', st.shard1.shardName);
-assert.commandWorked(testDB_s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+ ///////////////////////////////////////////////////////
+ // Test shard with empty chunk
-var checkShardMajorVersion = function(conn, expectedVersion) {
- var shardVersionInfo = conn.adminCommand({getShardVersion: 'test.user'});
- assert.eq(expectedVersion, shardVersionInfo.global.getTime());
-};
+ // shard0: 0|0|a
+ // shard1: 1|0|a, [-inf, inf)
+ // mongos0: 1|0|a
-///////////////////////////////////////////////////////
-// Test shard with empty chunk
+ var testDB_s1 = st.s1.getDB('test');
+ assert.writeOK(testDB_s1.user.insert({x: 1}));
+ assert.commandWorked(
+ testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
+
+ st.configRS.awaitLastOpCommitted();
+
+ // Official config:
+ // shard0: 2|0|a, [-inf, inf)
+ // shard1: 0|0|a
+ //
+ // Shard metadata:
+ // shard0: 0|0|a
+ // shard1: 0|0|a
+ // mongos0: 1|0|a
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ // mongos0 still thinks that { x: 1 } belong to st.shard1.shardName, but should be able to
+ // refresh it's metadata correctly.
+ assert.neq(null, testDB_s0.user.findOne({x: 1}));
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 2);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ // Set mongos2 & mongos3 to version 2|0|a
+ var testDB_s2 = st.s2.getDB('test');
+ assert.neq(null, testDB_s2.user.findOne({x: 1}));
+
+ var testDB_s3 = st.s3.getDB('test');
+ assert.neq(null, testDB_s3.user.findOne({x: 1}));
-// shard0: 0|0|a
-// shard1: 1|0|a, [-inf, inf)
-// mongos0: 1|0|a
+ ///////////////////////////////////////////////////////
+ // Test unsharded collection
+ // mongos versions: s0, s2, s3: 2|0|a
-var testDB_s1 = st.s1.getDB('test');
-assert.writeOK(testDB_s1.user.insert({x: 1}));
-assert.commandWorked(
- testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
-
-st.configRS.awaitLastOpCommitted();
-
-// Official config:
-// shard0: 2|0|a, [-inf, inf)
-// shard1: 0|0|a
-//
-// Shard metadata:
-// shard0: 0|0|a
-// shard1: 0|0|a
-// mongos0: 1|0|a
+ testDB_s1.user.drop();
+ assert.writeOK(testDB_s1.user.insert({x: 10}));
+
+ // shard0: 0|0|0
+ // shard1: 0|0|0
+ // mongos0: 2|0|a
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ // mongos0 still thinks { x: 10 } belong to st.shard0.shardName, but since coll is dropped,
+ // query should be routed to primary shard.
+ assert.neq(null, testDB_s0.user.findOne({x: 10}));
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ ///////////////////////////////////////////////////////
+ // Test 2 shards with 1 chunk
+ // mongos versions: s0: 0|0|0, s2, s3: 2|0|a
+
+ testDB_s1.user.drop();
+ testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+ testDB_s1.adminCommand({split: 'test.user', middle: {x: 0}});
+
+ // shard0: 0|0|b,
+ // shard1: 1|1|b, [-inf, 0), [0, inf)
+
+ testDB_s1.user.insert({x: 1});
+ testDB_s1.user.insert({x: -11});
+ assert.commandWorked(
+ testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: -1}, to: st.shard0.shardName}));
+
+ st.configRS.awaitLastOpCommitted();
+
+ // Official config:
+ // shard0: 2|0|b, [-inf, 0)
+ // shard1: 2|1|b, [0, inf)
+ //
+ // Shard metadata:
+ // shard0: 0|0|b
+ // shard1: 2|1|b
+ //
+ // mongos2: 2|0|a
-checkShardMajorVersion(st.rs0.getPrimary(), 0);
-checkShardMajorVersion(st.rs1.getPrimary(), 0);
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 2);
-// mongos0 still thinks that { x: 1 } belong to st.shard1.shardName, but should be able to
-// refresh it's metadata correctly.
-assert.neq(null, testDB_s0.user.findOne({x: 1}));
+ // mongos2 still thinks that { x: 1 } belong to st.shard0.shardName, but should be able to
+ // refresh it's metadata correctly.
+ assert.neq(null, testDB_s2.user.findOne({x: 1}));
-checkShardMajorVersion(st.rs0.getPrimary(), 2);
-checkShardMajorVersion(st.rs1.getPrimary(), 0);
+ checkShardMajorVersion(st.rs0.getPrimary(), 2);
+ checkShardMajorVersion(st.rs1.getPrimary(), 2);
-// Set mongos2 & mongos3 to version 2|0|a
-var testDB_s2 = st.s2.getDB('test');
-assert.neq(null, testDB_s2.user.findOne({x: 1}));
+ // Set shard metadata to 2|0|b
+ assert.neq(null, testDB_s2.user.findOne({x: -11}));
-var testDB_s3 = st.s3.getDB('test');
-assert.neq(null, testDB_s3.user.findOne({x: 1}));
+ checkShardMajorVersion(st.rs0.getPrimary(), 2);
+ checkShardMajorVersion(st.rs1.getPrimary(), 2);
-///////////////////////////////////////////////////////
-// Test unsharded collection
-// mongos versions: s0, s2, s3: 2|0|a
-
-testDB_s1.user.drop();
-assert.writeOK(testDB_s1.user.insert({x: 10}));
-
-// shard0: 0|0|0
-// shard1: 0|0|0
-// mongos0: 2|0|a
+ // Official config:
+ // shard0: 2|0|b, [-inf, 0)
+ // shard1: 2|1|b, [0, inf)
+ //
+ // Shard metadata:
+ // shard0: 2|0|b
+ // shard1: 2|1|b
+ //
+ // mongos3: 2|0|a
+
+ // 4th mongos still thinks that { x: 1 } belong to st.shard0.shardName, but should be able to
+ // refresh it's metadata correctly.
+ assert.neq(null, testDB_s3.user.findOne({x: 1}));
-checkShardMajorVersion(st.rs0.getPrimary(), 0);
-checkShardMajorVersion(st.rs1.getPrimary(), 0);
+ ///////////////////////////////////////////////////////
+ // Test mongos thinks unsharded when it's actually sharded
+ // mongos current versions: s0: 0|0|0, s2, s3: 2|0|b
-// mongos0 still thinks { x: 10 } belong to st.shard0.shardName, but since coll is dropped,
-// query should be routed to primary shard.
-assert.neq(null, testDB_s0.user.findOne({x: 10}));
+ // Set mongos0 to version 0|0|0
+ testDB_s0.user.drop();
-checkShardMajorVersion(st.rs0.getPrimary(), 0);
-checkShardMajorVersion(st.rs1.getPrimary(), 0);
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
-///////////////////////////////////////////////////////
-// Test 2 shards with 1 chunk
-// mongos versions: s0: 0|0|0, s2, s3: 2|0|a
+ assert.eq(null, testDB_s0.user.findOne({x: 1}));
-testDB_s1.user.drop();
-testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}});
-testDB_s1.adminCommand({split: 'test.user', middle: {x: 0}});
+ // Needs to also set mongos1 to version 0|0|0, otherwise it'll complain that collection is
+ // already sharded.
+ assert.eq(null, testDB_s1.user.findOne({x: 1}));
+ assert.commandWorked(testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+ testDB_s1.user.insert({x: 1});
-// shard0: 0|0|b,
-// shard1: 2|1|b, [-inf, 0), [0, inf)
+ assert.commandWorked(
+ testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
-testDB_s1.user.insert({x: 1});
-testDB_s1.user.insert({x: -11});
-assert.commandWorked(
- testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: -1}, to: st.shard0.shardName}));
+ st.configRS.awaitLastOpCommitted();
-st.configRS.awaitLastOpCommitted();
+ // Official config:
+ // shard0: 2|0|c, [-inf, inf)
+ // shard1: 0|0|c
+ //
+ // Shard metadata:
+ // shard0: 0|0|c
+ // shard1: 0|0|c
+ //
+ // mongos0: 0|0|0
-// Official config:
-// shard0: 3|0|b, [-inf, 0)
-// shard1: 3|1|b, [0, inf)
-//
-// Shard metadata:
-// shard0: 0|0|b
-// shard1: 3|1|b
-//
-// mongos2: 2|0|a
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ // 1st mongos thinks that collection is unshareded and will attempt to query primary shard.
+ assert.neq(null, testDB_s0.user.findOne({x: 1}));
-checkShardMajorVersion(st.rs0.getPrimary(), 0);
-checkShardMajorVersion(st.rs1.getPrimary(), 3);
+ checkShardMajorVersion(st.rs0.getPrimary(), 2);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
-// mongos2 still thinks that { x: 1 } belong to st.shard0.shardName, but should be able to
-// refresh it's metadata correctly.
-assert.neq(null, testDB_s2.user.findOne({x: 1}));
+ st.stop();
+})();
+
+// Test with incrementChunkMajorVersionOnChunkSplits = true and FCV last
+// stable.
+(() => {
+ load("jstests/libs/feature_compatibility_version.js");
+
+ var st = new ShardingTest({
+ shards: 2,
+ mongos: 4,
+ other: {configOptions: {setParameter: {incrementChunkMajorVersionOnChunkSplits: true}}}
+ });
+ assert.commandWorked(
+ st.configRS.getPrimary().adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+
+ var testDB_s0 = st.s.getDB('test');
+ assert.commandWorked(testDB_s0.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', st.shard1.shardName);
+ assert.commandWorked(testDB_s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+
+ var checkShardMajorVersion = function(conn, expectedVersion) {
+ var shardVersionInfo = conn.adminCommand({getShardVersion: 'test.user'});
+ assert.eq(expectedVersion, shardVersionInfo.global.getTime());
+ };
+
+ ///////////////////////////////////////////////////////
+ // Test shard with empty chunk
+
+ // shard0: 0|0|a
+ // shard1: 1|0|a, [-inf, inf)
+ // mongos0: 1|0|a
-checkShardMajorVersion(st.rs0.getPrimary(), 3);
-checkShardMajorVersion(st.rs1.getPrimary(), 3);
+ var testDB_s1 = st.s1.getDB('test');
+ assert.writeOK(testDB_s1.user.insert({x: 1}));
+ assert.commandWorked(
+ testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
+
+ st.configRS.awaitLastOpCommitted();
+
+ // Official config:
+ // shard0: 2|0|a, [-inf, inf)
+ // shard1: 0|0|a
+ //
+ // Shard metadata:
+ // shard0: 0|0|a
+ // shard1: 0|0|a
+ // mongos0: 1|0|a
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ // mongos0 still thinks that { x: 1 } belong to st.shard1.shardName, but should be able to
+ // refresh it's metadata correctly.
+ assert.neq(null, testDB_s0.user.findOne({x: 1}));
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 2);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ // Set mongos2 & mongos3 to version 2|0|a
+ var testDB_s2 = st.s2.getDB('test');
+ assert.neq(null, testDB_s2.user.findOne({x: 1}));
+
+ var testDB_s3 = st.s3.getDB('test');
+ assert.neq(null, testDB_s3.user.findOne({x: 1}));
+
+ ///////////////////////////////////////////////////////
+ // Test unsharded collection
+ // mongos versions: s0, s2, s3: 2|0|a
+
+ testDB_s1.user.drop();
+ assert.writeOK(testDB_s1.user.insert({x: 10}));
+
+ // shard0: 0|0|0
+ // shard1: 0|0|0
+ // mongos0: 2|0|a
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ // mongos0 still thinks { x: 10 } belong to st.shard0.shardName, but since coll is dropped,
+ // query should be routed to primary shard.
+ assert.neq(null, testDB_s0.user.findOne({x: 10}));
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ ///////////////////////////////////////////////////////
+ // Test 2 shards with 1 chunk
+ // mongos versions: s0: 0|0|0, s2, s3: 2|0|a
+
+ testDB_s1.user.drop();
+ testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+ testDB_s1.adminCommand({split: 'test.user', middle: {x: 0}});
+
+ // shard0: 0|0|b,
+ // shard1: 2|1|b, [-inf, 0), [0, inf)
+
+ testDB_s1.user.insert({x: 1});
+ testDB_s1.user.insert({x: -11});
+ assert.commandWorked(
+ testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: -1}, to: st.shard0.shardName}));
+
+ st.configRS.awaitLastOpCommitted();
+
+ // Official config:
+ // shard0: 3|0|b, [-inf, 0)
+ // shard1: 3|1|b, [0, inf)
+ //
+ // Shard metadata:
+ // shard0: 0|0|b
+ // shard1: 3|1|b
+ //
+ // mongos2: 2|0|a
-// Set shard metadata to 2|0|b
-assert.neq(null, testDB_s2.user.findOne({x: -11}));
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 3);
-checkShardMajorVersion(st.rs0.getPrimary(), 3);
-checkShardMajorVersion(st.rs1.getPrimary(), 3);
+ // mongos2 still thinks that { x: 1 } belong to st.shard0.shardName, but should be able to
+ // refresh it's metadata correctly.
+ assert.neq(null, testDB_s2.user.findOne({x: 1}));
-// Official config:
-// shard0: 3|0|b, [-inf, 0)
-// shard1: 3|1|b, [0, inf)
-//
-// Shard metadata:
-// shard0: 3|0|b
-// shard1: 3|1|b
-//
-// mongos3: 2|0|a
-
-///////////////////////////////////////////////////////
-// Test mongos thinks unsharded when it's actually sharded
-// mongos current versions: s0: 0|0|0, s2, s3: 2|0|b
+ checkShardMajorVersion(st.rs0.getPrimary(), 3);
+ checkShardMajorVersion(st.rs1.getPrimary(), 3);
-// Set mongos0 to version 0|0|0
-testDB_s0.user.drop();
+ // Set shard metadata to 2|0|b
+ assert.neq(null, testDB_s2.user.findOne({x: -11}));
-checkShardMajorVersion(st.rs0.getPrimary(), 0);
-checkShardMajorVersion(st.rs1.getPrimary(), 0);
+ checkShardMajorVersion(st.rs0.getPrimary(), 3);
+ checkShardMajorVersion(st.rs1.getPrimary(), 3);
-assert.eq(null, testDB_s0.user.findOne({x: 1}));
+ // Official config:
+ // shard0: 3|0|b, [-inf, 0)
+ // shard1: 3|1|b, [0, inf)
+ //
+ // Shard metadata:
+ // shard0: 3|0|b
+ // shard1: 3|1|b
+ //
+ // mongos3: 2|0|a
+
+ ///////////////////////////////////////////////////////
+ // Test mongos thinks unsharded when it's actually sharded
+ // mongos current versions: s0: 0|0|0, s2, s3: 2|0|b
-// Needs to also set mongos1 to version 0|0|0, otherwise it'll complain that collection is
-// already sharded.
-assert.eq(null, testDB_s1.user.findOne({x: 1}));
-assert.commandWorked(testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
-testDB_s1.user.insert({x: 1});
+ // Set mongos0 to version 0|0|0
+ testDB_s0.user.drop();
-assert.commandWorked(
- testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
-st.configRS.awaitLastOpCommitted();
+ assert.eq(null, testDB_s0.user.findOne({x: 1}));
-// Official config:
-// shard0: 2|0|c, [-inf, inf)
-// shard1: 0|0|c
-//
-// Shard metadata:
-// shard0: 0|0|c
-// shard1: 0|0|c
-//
-// mongos0: 0|0|0
+ // Needs to also set mongos1 to version 0|0|0, otherwise it'll complain that collection is
+ // already sharded.
+ assert.eq(null, testDB_s1.user.findOne({x: 1}));
+ assert.commandWorked(testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+ testDB_s1.user.insert({x: 1});
-checkShardMajorVersion(st.rs0.getPrimary(), 0);
-checkShardMajorVersion(st.rs1.getPrimary(), 0);
+ assert.commandWorked(
+ testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
-// 1st mongos thinks that collection is unshareded and will attempt to query primary shard.
-assert.neq(null, testDB_s0.user.findOne({x: 1}));
+ st.configRS.awaitLastOpCommitted();
-checkShardMajorVersion(st.rs0.getPrimary(), 2);
-checkShardMajorVersion(st.rs1.getPrimary(), 0);
+ // Official config:
+ // shard0: 2|0|c, [-inf, inf)
+ // shard1: 0|0|c
+ //
+ // Shard metadata:
+ // shard0: 0|0|c
+ // shard1: 0|0|c
+ //
+ // mongos0: 0|0|0
-st.stop();
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ // 1st mongos thinks that collection is unshareded and will attempt to query primary shard.
+ assert.neq(null, testDB_s0.user.findOne({x: 1}));
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 2);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ st.stop();
+})();
})();
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 730f0ca33a9..9878bb9fbe3 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -293,6 +293,7 @@ env.Library(
'$BUILD_DIR/mongo/s/client/sharding_client',
'$BUILD_DIR/mongo/s/coreshard',
'$BUILD_DIR/mongo/s/sharding_legacy_api',
+ 'sharding_runtime_d_params',
'balancer',
'type_shard_identity',
],
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 7b325b7213e..83d3eafb10f 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/s/sharding_logging.h"
+#include "mongo/db/s/sharding_runtime_d_params_gen.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
@@ -355,9 +356,16 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
std::vector<ChunkType> newChunks;
ChunkVersion currentMaxVersion = collVersion;
- // Increment the major version only if the shard that owns the chunk being split has version ==
- // collection version. See SERVER-41480 for details.
- if (shardVersion == collVersion) {
+
+ // If the incrementChunkMajorVersionOnChunkSplits flag is set, increment
+ // the major version only if the shard that owns the chunk being split has
+ // version == collection version. See SERVER-41480 for details.
+ //
+ // This flag is only useful if there are some 4.0 routers still in the
+ // cluster, so we only use it if FCV is not fully upgraded.
+ const auto currentFCV = serverGlobalParams.featureCompatibility.getVersion();
+ if (currentFCV != ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo42 &&
+ incrementChunkMajorVersionOnChunkSplits.load() && shardVersion == collVersion) {
currentMaxVersion.incMajor();
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
index a2f0a293482..ff62eabb0ca 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
@@ -42,6 +42,34 @@ const NamespaceString kNamespace("TestDB", "TestColl");
using SplitChunkTest = ConfigServerTestFixture;
+/**
+ * A fixture which sets the incrementChunkMajorVersionOnChunkSplits server parameter to true.
+ */
+class SplitChunkWithMajorVersionIncrementTest : public ConfigServerTestFixture {
+public:
+ void setUp() override {
+ ConfigServerTestFixture::setUp();
+ // Ignore the return status.
+ std::ignore = ServerParameterSet::getGlobal()
+ ->getMap()
+ .find("incrementChunkMajorVersionOnChunkSplits")
+ ->second->setFromString("true");
+ serverGlobalParams.featureCompatibility.setVersion(
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo40);
+ }
+
+ void tearDown() override {
+ serverGlobalParams.featureCompatibility.setVersion(
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo42);
+ // Ignore the return status.
+ std::ignore = ServerParameterSet::getGlobal()
+ ->getMap()
+ .find("incrementChunkMajorVersionOnChunkSplits")
+ ->second->setFromString("false");
+ ConfigServerTestFixture::tearDown();
+ }
+};
+
TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
ChunkType chunk;
chunk.setNS(kNamespace);
@@ -77,6 +105,202 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
auto chunkDoc = chunkDocStatus.getValue();
ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax());
+ // Check for increment on first chunkDoc's minor version
+ ASSERT_EQ(origVersion.majorVersion(), chunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion());
+
+ // Make sure the history is there
+ ASSERT_EQ(2UL, chunkDoc.getHistory().size());
+
+ // Second chunkDoc should have range [chunkSplitPoint, chunkMax]
+ auto otherChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint);
+ ASSERT_OK(otherChunkDocStatus.getStatus());
+
+ auto otherChunkDoc = otherChunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkMax, otherChunkDoc.getMax());
+
+ // Check for increment on second chunkDoc's minor version
+ ASSERT_EQ(origVersion.majorVersion(), otherChunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 2, otherChunkDoc.getVersion().minorVersion());
+
+ // Make sure the history is there
+ ASSERT_EQ(2UL, otherChunkDoc.getHistory().size());
+
+ // Both chunks should have the same history
+ ASSERT(chunkDoc.getHistory() == otherChunkDoc.getHistory());
+}
+
+TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
+ ChunkType chunk;
+ chunk.setNS(kNamespace);
+
+ auto origVersion = ChunkVersion(1, 0, OID::gen());
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId("shard0000"));
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkMax = BSON("a" << 10);
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkMax);
+ chunk.setHistory({ChunkHistory(Timestamp(100, 0), ShardId("shard0000")),
+ ChunkHistory(Timestamp(90, 0), ShardId("shardY"))});
+
+ auto chunkSplitPoint = BSON("a" << 5);
+ auto chunkSplitPoint2 = BSON("a" << 7);
+ std::vector<BSONObj> splitPoints{chunkSplitPoint, chunkSplitPoint2};
+
+ setupChunks({chunk});
+
+ ASSERT_OK(ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ kNamespace,
+ origVersion.epoch(),
+ ChunkRange(chunkMin, chunkMax),
+ splitPoints,
+ "shard0000"));
+
+ // First chunkDoc should have range [chunkMin, chunkSplitPoint]
+ auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin);
+ ASSERT_OK(chunkDocStatus.getStatus());
+
+ auto chunkDoc = chunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax());
+
+ // Check for increment on first chunkDoc's minor version
+ ASSERT_EQ(origVersion.majorVersion(), chunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion());
+
+ // Make sure the history is there
+ ASSERT_EQ(2UL, chunkDoc.getHistory().size());
+
+ // Second chunkDoc should have range [chunkSplitPoint, chunkSplitPoint2]
+ auto midChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint);
+ ASSERT_OK(midChunkDocStatus.getStatus());
+
+ auto midChunkDoc = midChunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkSplitPoint2, midChunkDoc.getMax());
+
+ // Check for increment on second chunkDoc's minor version
+ ASSERT_EQ(origVersion.majorVersion(), midChunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 2, midChunkDoc.getVersion().minorVersion());
+
+ // Make sure the history is there
+ ASSERT_EQ(2UL, midChunkDoc.getHistory().size());
+
+ // Third chunkDoc should have range [chunkSplitPoint2, chunkMax]
+ auto lastChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint2);
+ ASSERT_OK(lastChunkDocStatus.getStatus());
+
+ auto lastChunkDoc = lastChunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkMax, lastChunkDoc.getMax());
+
+ // Check for increment on third chunkDoc's minor version
+ ASSERT_EQ(origVersion.majorVersion(), lastChunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 3, lastChunkDoc.getVersion().minorVersion());
+
+ // Make sure the history is there
+ ASSERT_EQ(2UL, lastChunkDoc.getHistory().size());
+
+ // Both chunks should have the same history
+ ASSERT(chunkDoc.getHistory() == midChunkDoc.getHistory());
+ ASSERT(midChunkDoc.getHistory() == lastChunkDoc.getHistory());
+}
+
+TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
+ ChunkType chunk, chunk2;
+ chunk.setNS(kNamespace);
+ chunk2.setNS(kNamespace);
+ auto collEpoch = OID::gen();
+
+ // set up first chunk
+ auto origVersion = ChunkVersion(1, 2, collEpoch);
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId("shard0000"));
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkMax = BSON("a" << 10);
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkMax);
+
+ std::vector<BSONObj> splitPoints;
+ auto chunkSplitPoint = BSON("a" << 5);
+ splitPoints.push_back(chunkSplitPoint);
+
+ // set up second chunk (chunk2)
+ auto competingVersion = ChunkVersion(2, 1, collEpoch);
+ chunk2.setVersion(competingVersion);
+ chunk2.setShard(ShardId("shard0000"));
+ chunk2.setMin(BSON("a" << 10));
+ chunk2.setMax(BSON("a" << 20));
+
+ setupChunks({chunk, chunk2});
+
+ ASSERT_OK(ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ kNamespace,
+ collEpoch,
+ ChunkRange(chunkMin, chunkMax),
+ splitPoints,
+ "shard0000"));
+
+ // First chunkDoc should have range [chunkMin, chunkSplitPoint]
+ auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin);
+ ASSERT_OK(chunkDocStatus.getStatus());
+
+ auto chunkDoc = chunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax());
+
+ // Check for increment based on the competing chunk version
+ ASSERT_EQ(competingVersion.majorVersion(), chunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(competingVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion());
+
+ // Second chunkDoc should have range [chunkSplitPoint, chunkMax]
+ auto otherChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint);
+ ASSERT_OK(otherChunkDocStatus.getStatus());
+
+ auto otherChunkDoc = otherChunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkMax, otherChunkDoc.getMax());
+
+ // Check for increment based on the competing chunk version
+ ASSERT_EQ(competingVersion.majorVersion(), otherChunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(competingVersion.minorVersion() + 2, otherChunkDoc.getVersion().minorVersion());
+}
+
+TEST_F(SplitChunkWithMajorVersionIncrementTest, SplitExistingChunkCorrectlyShouldSucceed) {
+ ChunkType chunk;
+ chunk.setNS(kNamespace);
+
+ auto origVersion = ChunkVersion(1, 0, OID::gen());
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId("shard0000"));
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkMax = BSON("a" << 10);
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkMax);
+ chunk.setHistory({ChunkHistory(Timestamp(100, 0), ShardId("shard0000")),
+ ChunkHistory(Timestamp(90, 0), ShardId("shardY"))});
+
+ auto chunkSplitPoint = BSON("a" << 5);
+ std::vector<BSONObj> splitPoints{chunkSplitPoint};
+
+ setupChunks({chunk});
+
+ ASSERT_OK(ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ kNamespace,
+ origVersion.epoch(),
+ ChunkRange(chunkMin, chunkMax),
+ splitPoints,
+ "shard0000"));
+
+ // First chunkDoc should have range [chunkMin, chunkSplitPoint]
+ auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin);
+ ASSERT_OK(chunkDocStatus.getStatus());
+
+ auto chunkDoc = chunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax());
+
// Check for increment on first chunkDoc's major version.
ASSERT_EQ(origVersion.majorVersion() + 1, chunkDoc.getVersion().majorVersion());
ASSERT_EQ(1u, chunkDoc.getVersion().minorVersion());
@@ -102,7 +326,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
ASSERT(chunkDoc.getHistory() == otherChunkDoc.getHistory());
}
-TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
+TEST_F(SplitChunkWithMajorVersionIncrementTest, MultipleSplitsOnExistingChunkShouldSucceed) {
ChunkType chunk;
chunk.setNS(kNamespace);
@@ -178,7 +402,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
ASSERT(midChunkDoc.getHistory() == lastChunkDoc.getHistory());
}
-TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
+TEST_F(SplitChunkWithMajorVersionIncrementTest, NewSplitShouldClaimHighestVersion) {
ChunkType chunk, chunk2;
chunk.setNS(kNamespace);
chunk2.setNS(kNamespace);
@@ -244,7 +468,8 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
ASSERT_EQ(2u, otherChunkDoc.getVersion().minorVersion());
}
-TEST_F(SplitChunkTest, SplitsOnShardWithLowerShardVersionDoesNotIncreaseCollectionVersion) {
+TEST_F(SplitChunkWithMajorVersionIncrementTest,
+ SplitsOnShardWithLowerShardVersionDoesNotIncreaseCollectionVersion) {
ChunkType chunk, chunk2;
chunk.setNS(kNamespace);
chunk2.setNS(kNamespace);
@@ -296,7 +521,8 @@ TEST_F(SplitChunkTest, SplitsOnShardWithLowerShardVersionDoesNotIncreaseCollecti
ASSERT_EQ(competingVersion.minorVersion() + 2u, otherChunkDoc.getVersion().minorVersion());
}
-TEST_F(SplitChunkTest, SplitsOnShardWithHighestShardVersionIncreasesCollectionVersion) {
+TEST_F(SplitChunkWithMajorVersionIncrementTest,
+ SplitsOnShardWithHighestShardVersionIncreasesCollectionVersion) {
ChunkType chunk, chunk2;
chunk.setNS(kNamespace);
chunk2.setNS(kNamespace);
diff --git a/src/mongo/db/s/sharding_runtime_d_params.idl b/src/mongo/db/s/sharding_runtime_d_params.idl
index bc0a5d04d24..ee943867ba9 100644
--- a/src/mongo/db/s/sharding_runtime_d_params.idl
+++ b/src/mongo/db/s/sharding_runtime_d_params.idl
@@ -115,3 +115,11 @@ server_parameters:
cpp_varname: minNumChunksForSessionsCollection
default: 1024
validator: { gte: 1, lte: 1000000 }
+
+ incrementChunkMajorVersionOnChunkSplits:
+ description: >-
+ If true, causes chunk splits to increment the collection major version in certain cases.
+ set_at: [startup, runtime]
+ cpp_vartype: AtomicWord<bool>
+ cpp_varname: incrementChunkMajorVersionOnChunkSplits
+ default: false