summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Saltz <matthew.saltz@mongodb.com>2020-07-02 22:52:06 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-07-14 16:03:37 +0000
commitb2d22916f3f28b1534ce3613685713ca027827e7 (patch)
treed2fb060d40c168e912afe5e122540a55ac10ff64
parent658fccc18c525d0d9ad7aaa32a08b692b19bf0dd (diff)
downloadmongo-b2d22916f3f28b1534ce3613685713ca027827e7.tar.gz
SERVER-49233 Introduce a flag to toggle the logic for bumping collection's major version during split
-rw-r--r--jstests/sharding/major_version_check.js148
-rw-r--r--jstests/sharding/migration_failure.js196
-rw-r--r--jstests/sharding/zero_shard_version.js492
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp7
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp226
5 files changed, 783 insertions, 286 deletions
diff --git a/jstests/sharding/major_version_check.js b/jstests/sharding/major_version_check.js
index 546d9eb0c33..2ae32fe6cae 100644
--- a/jstests/sharding/major_version_check.js
+++ b/jstests/sharding/major_version_check.js
@@ -4,65 +4,125 @@
(function() {
'use strict';
- var st = new ShardingTest({shards: 1, mongos: 2});
+ // Test with default value of incrementChunkMajorVersionOnChunkSplits, which is
+ // false.
+ (() => {
+ var st = new ShardingTest({shards: 1, mongos: 2});
- var mongos = st.s0;
- var staleMongos = st.s1;
- var admin = mongos.getDB("admin");
- var config = mongos.getDB("config");
- var coll = mongos.getCollection("foo.bar");
+ var mongos = st.s0;
+ var staleMongos = st.s1;
+ var admin = mongos.getDB("admin");
+ var config = mongos.getDB("config");
+ var coll = mongos.getCollection("foo.bar");
- // Shard collection
- assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
- assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+ // Shard collection
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- // Make sure our stale mongos is up-to-date with no splits
- staleMongos.getCollection(coll + "").findOne();
+ // Make sure our stale mongos is up-to-date with no splits
+ staleMongos.getCollection(coll + "").findOne();
- // Run one split
- assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+ // Run one split
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
- // Make sure our stale mongos is not up-to-date with the split
- printjson(admin.runCommand({getShardVersion: coll + ""}));
- printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+ // Make sure our stale mongos is not up-to-date with the split
+ printjson(admin.runCommand({getShardVersion: coll + ""}));
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
- // Compare strings b/c timestamp comparison is a bit weird
- assert.eq(Timestamp(2, 2), admin.runCommand({getShardVersion: coll + ""}).version);
- assert.eq(Timestamp(1, 0),
- staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+ // Compare strings b/c timestamp comparison is a bit weird
+ assert.eq(Timestamp(1, 2), admin.runCommand({getShardVersion: coll + ""}).version);
+ assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
- // See if our stale mongos is required to catch up to run a findOne on an existing connection
- staleMongos.getCollection(coll + "").findOne();
+ // See if our stale mongos is required to catch up to run a findOne on an existing
+ // connection
+ staleMongos.getCollection(coll + "").findOne();
- printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
- assert.eq(Timestamp(1, 0),
- staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+ assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
- // See if our stale mongos is required to catch up to run a findOne on a new connection
- staleMongos = new Mongo(staleMongos.host);
- staleMongos.getCollection(coll + "").findOne();
+ // See if our stale mongos is required to catch up to run a findOne on a new connection
+ staleMongos = new Mongo(staleMongos.host);
+ staleMongos.getCollection(coll + "").findOne();
- printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
- assert.eq(Timestamp(1, 0),
- staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+ assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
- // Run another split on the original chunk, which does not exist anymore (but the stale mongos
- // thinks it exists). This should fail and cause a refresh on the shard, updating its shard
- // version.
- assert.commandFailed(staleMongos.getDB("admin").runCommand(
- {split: coll + "", bounds: [{_id: MinKey}, {_id: MaxKey}]}));
+ st.stop();
+ })();
- // This findOne will cause a refresh on the router since the shard version has now been
- // increased.
- staleMongos.getCollection(coll + "").findOne();
+ // Test with incrementChunkMajorVersionOnChunkSplits set to true.
+ (() => {
+ var st = new ShardingTest({
+ shards: 1,
+ mongos: 2,
+ other:
+ {configOptions: {setParameter: {incrementChunkMajorVersionOnChunkSplits: true}}}
+ });
- printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+ var mongos = st.s0;
+ var staleMongos = st.s1;
+ var admin = mongos.getDB("admin");
+ var config = mongos.getDB("config");
+ var coll = mongos.getCollection("foo.bar");
- // The previously stale mongos should now be up-to-date.
- assert.eq(Timestamp(2, 2),
- staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+ // Shard collection
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
- st.stop();
+ // Make sure our stale mongos is up-to-date with no splits
+ staleMongos.getCollection(coll + "").findOne();
+
+ // Run one split
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+
+ // Make sure our stale mongos is not up-to-date with the split
+ printjson(admin.runCommand({getShardVersion: coll + ""}));
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+
+ // Compare strings b/c timestamp comparison is a bit weird
+ assert.eq(Timestamp(2, 2), admin.runCommand({getShardVersion: coll + ""}).version);
+ assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+
+ // See if our stale mongos is required to catch up to run a findOne on an existing
+ // connection
+ staleMongos.getCollection(coll + "").findOne();
+
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+
+ assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+
+ // See if our stale mongos is required to catch up to run a findOne on a new connection
+ staleMongos = new Mongo(staleMongos.host);
+ staleMongos.getCollection(coll + "").findOne();
+
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+
+ assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+
+ // Run another split on the original chunk, which does not exist anymore (but the stale
+ // mongos thinks it exists). This should fail and cause a refresh on
+ // the shard, updating its shard version.
+ assert.commandFailed(staleMongos.getDB("admin").runCommand(
+ {split: coll + "", bounds: [{_id: MinKey}, {_id: MaxKey}]}));
+
+ // This findOne will cause a refresh on the router since the shard version has now been
+ // increased.
+ staleMongos.getCollection(coll + "").findOne();
+
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
+
+ // The previously stale mongos should now be up-to-date.
+ assert.eq(Timestamp(2, 2),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+
+ st.stop();
+ })();
})();
diff --git a/jstests/sharding/migration_failure.js b/jstests/sharding/migration_failure.js
index cd346a02e18..1967f44fb12 100644
--- a/jstests/sharding/migration_failure.js
+++ b/jstests/sharding/migration_failure.js
@@ -6,93 +6,113 @@
(function() {
'use strict';
+ function runTest(shouldIncrementChunkMajorVersionOnChunkSplits) {
+ var st = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ other: {
+ configOptions: {
+ setParameter: {
+ incrementChunkMajorVersionOnChunkSplits:
+ shouldIncrementChunkMajorVersionOnChunkSplits
+ }
+ }
+ }
+ });
+
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var coll = mongos.getCollection("foo.bar");
+
+ assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+ printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName}));
+ assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
+ assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
+
+ st.printShardingStatus();
+
+ jsTest.log("Testing failed migrations...");
+
+ var oldVersion = null;
+ var newVersion = null;
+
+ // failMigrationCommit -- this creates an error that aborts the migration before the commit
+ // migration command is sent.
+ assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'failMigrationCommit', mode: 'alwaysOn'}));
+
+ if (shouldIncrementChunkMajorVersionOnChunkSplits) {
+ // The split command above bumps the shard version, and this is obtained by the router
+ // via a refresh at the end of the command, but the shard does not know about it yet.
+ // This find will cause the shard to refresh so that this next check for 'oldVersion'
+ // sees the most recent version prior to the migration.
+ coll.findOne();
+ }
+
+ oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+
+ assert.commandFailed(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName}));
+
+ newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+
+ assert.eq(oldVersion.t,
+ newVersion.t,
+ "The shard version major value should not change after a failed migration");
+ // Split does not cause a shard routing table refresh, but the moveChunk attempt will.
+ assert.eq(
+ 2,
+ newVersion.i,
+ "The shard routing table should refresh on a failed migration and show the split");
+
+ assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'failMigrationCommit', mode: 'off'}));
+
+ // migrationCommitNetworkError -- mimic migration commit command returning a network error,
+ // whereupon the config server is queried to determine that this commit was successful.
+ assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'migrationCommitNetworkError', mode: 'alwaysOn'}));
+
+ // Run a migration where there will still be chunks in the collection remaining on the shard
+ // afterwards. This will cause the collection's shardVersion to be bumped higher.
+ oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+
+ assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 1}, to: st.shard1.shardName}));
+
+ newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+
+ assert.lt(oldVersion.t,
+ newVersion.t,
+ "The major value in the shard version should have increased");
+ assert.eq(1, newVersion.i, "The minor value in the shard version should be 1");
+
+ // Run a migration to move off the shard's last chunk in the collection. The collection's
+ // shardVersion will be reset.
+ oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+
+ assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: st.shard1.shardName}));
+
+ newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
+
+ assert.gt(oldVersion.t,
+ newVersion.t,
+ "The version prior to the migration should be greater than the reset value");
+
+ assert.eq(0,
+ newVersion.t,
+ "The shard version should have reset, but the major value is not zero");
+ assert.eq(0,
+ newVersion.i,
+ "The shard version should have reset, but the minor value is not zero");
+
+ assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'migrationCommitNetworkError', mode: 'off'}));
+
+ st.stop();
+ }
- var st = new ShardingTest({shards: 2, mongos: 1});
-
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var coll = mongos.getCollection("foo.bar");
-
- assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
- printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName}));
- assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
- assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
-
- st.printShardingStatus();
-
- jsTest.log("Testing failed migrations...");
-
- var oldVersion = null;
- var newVersion = null;
-
- // failMigrationCommit -- this creates an error that aborts the migration before the commit
- // migration command is sent.
- assert.commandWorked(st.shard0.getDB("admin").runCommand(
- {configureFailPoint: 'failMigrationCommit', mode: 'alwaysOn'}));
-
- // The split command above bumps the shard version, and this is obtained by the router via a
- // refresh at the end of the command, but the shard does not know about it yet. This find will
- // cause the shard to refresh so that this next check for 'oldVersion' sees the most recent
- // version prior to the migration.
- coll.findOne();
-
- oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
-
- assert.commandFailed(
- admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName}));
-
- newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
-
- assert.eq(oldVersion.t,
- newVersion.t,
- "The shard version major value should not change after a failed migration");
- // Split does not cause a shard routing table refresh, but the moveChunk attempt will.
- assert.eq(2,
- newVersion.i,
- "The shard routing table should refresh on a failed migration and show the split");
-
- assert.commandWorked(st.shard0.getDB("admin").runCommand(
- {configureFailPoint: 'failMigrationCommit', mode: 'off'}));
-
- // migrationCommitNetworkError -- mimic migration commit command returning a network error,
- // whereupon the config server is queried to determine that this commit was successful.
- assert.commandWorked(st.shard0.getDB("admin").runCommand(
- {configureFailPoint: 'migrationCommitNetworkError', mode: 'alwaysOn'}));
-
- // Run a migration where there will still be chunks in the collection remaining on the shard
- // afterwards. This will cause the collection's shardVersion to be bumped higher.
- oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
-
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: 1}, to: st.shard1.shardName}));
-
- newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
-
- assert.lt(
- oldVersion.t, newVersion.t, "The major value in the shard version should have increased");
- assert.eq(1, newVersion.i, "The minor value in the shard version should be 1");
-
- // Run a migration to move off the shard's last chunk in the collection. The collection's
- // shardVersion will be reset.
- oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
-
- assert.commandWorked(
- admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: st.shard1.shardName}));
-
- newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global;
-
- assert.gt(oldVersion.t,
- newVersion.t,
- "The version prior to the migration should be greater than the reset value");
-
- assert.eq(
- 0, newVersion.t, "The shard version should have reset, but the major value is not zero");
- assert.eq(
- 0, newVersion.i, "The shard version should have reset, but the minor value is not zero");
-
- assert.commandWorked(st.shard0.getDB("admin").runCommand(
- {configureFailPoint: 'migrationCommitNetworkError', mode: 'off'}));
-
- st.stop();
-
+ runTest(false);
+ runTest(true);
})();
diff --git a/jstests/sharding/zero_shard_version.js b/jstests/sharding/zero_shard_version.js
index b658ec66b43..a504b0d1609 100644
--- a/jstests/sharding/zero_shard_version.js
+++ b/jstests/sharding/zero_shard_version.js
@@ -5,179 +5,369 @@
(function() {
'use strict';
- var st = new ShardingTest({shards: 2, mongos: 4});
+ // Test with default value of incrementChunkMajorVersionOnChunkSplits, which is
+ // false.
+ (() => {
+ var st = new ShardingTest({shards: 2, mongos: 4});
+
+ var testDB_s0 = st.s.getDB('test');
+ assert.commandWorked(testDB_s0.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', st.shard1.shardName);
+ assert.commandWorked(testDB_s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+
+ var checkShardMajorVersion = function(conn, expectedVersion) {
+ var shardVersionInfo = conn.adminCommand({getShardVersion: 'test.user'});
+ assert.eq(expectedVersion, shardVersionInfo.global.getTime());
+ };
- var testDB_s0 = st.s.getDB('test');
- assert.commandWorked(testDB_s0.adminCommand({enableSharding: 'test'}));
- st.ensurePrimaryShard('test', st.shard1.shardName);
- assert.commandWorked(testDB_s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+ ///////////////////////////////////////////////////////
+ // Test shard with empty chunk
- var checkShardMajorVersion = function(conn, expectedVersion) {
- var shardVersionInfo = conn.adminCommand({getShardVersion: 'test.user'});
- assert.eq(expectedVersion, shardVersionInfo.global.getTime());
- };
+ // shard0: 0|0|a
+ // shard1: 1|0|a, [-inf, inf)
+ // mongos0: 1|0|a
- ///////////////////////////////////////////////////////
- // Test shard with empty chunk
+ var testDB_s1 = st.s1.getDB('test');
+ assert.writeOK(testDB_s1.user.insert({x: 1}));
+ assert.commandWorked(testDB_s1.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
+
+ st.configRS.awaitLastOpCommitted();
+
+ // Official config:
+ // shard0: 2|0|a, [-inf, inf)
+ // shard1: 0|0|a
+ //
+ // Shard metadata:
+ // shard0: 0|0|a
+ // shard1: 0|0|a
+ // mongos0: 1|0|a
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ // mongos0 still thinks that { x: 1 } belong to st.shard1.shardName, but should be able to
+ // refresh it's metadata correctly.
+ assert.neq(null, testDB_s0.user.findOne({x: 1}));
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 2);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ // Set mongos2 & mongos3 to version 2|0|a
+ var testDB_s2 = st.s2.getDB('test');
+ assert.neq(null, testDB_s2.user.findOne({x: 1}));
+
+ var testDB_s3 = st.s3.getDB('test');
+ assert.neq(null, testDB_s3.user.findOne({x: 1}));
+
+ ///////////////////////////////////////////////////////
+ // Test unsharded collection
+ // mongos versions: s0, s2, s3: 2|0|a
- // shard0: 0|0|a
- // shard1: 1|0|a, [-inf, inf)
- // mongos0: 1|0|a
+ testDB_s1.user.drop();
+ assert.writeOK(testDB_s1.user.insert({x: 10}));
+
+ // shard0: 0|0|0
+ // shard1: 0|0|0
+ // mongos0: 2|0|a
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ // mongos0 still thinks { x: 10 } belong to st.shard0.shardName, but since coll is dropped,
+ // query should be routed to primary shard.
+ assert.neq(null, testDB_s0.user.findOne({x: 10}));
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ ///////////////////////////////////////////////////////
+ // Test 2 shards with 1 chunk
+ // mongos versions: s0: 0|0|0, s2, s3: 2|0|a
+
+ testDB_s1.user.drop();
+ testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+ testDB_s1.adminCommand({split: 'test.user', middle: {x: 0}});
+
+ // shard0: 0|0|b,
+ // shard1: 1|1|b, [-inf, 0), [0, inf)
+
+ testDB_s1.user.insert({x: 1});
+ testDB_s1.user.insert({x: -11});
+ assert.commandWorked(testDB_s1.adminCommand(
+ {moveChunk: 'test.user', find: {x: -1}, to: st.shard0.shardName}));
+
+ st.configRS.awaitLastOpCommitted();
+
+ // Official config:
+ // shard0: 2|0|b, [-inf, 0)
+ // shard1: 2|1|b, [0, inf)
+ //
+ // Shard metadata:
+ // shard0: 0|0|b
+ // shard1: 2|1|b
+ //
+ // mongos2: 2|0|a
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 2);
+
+ // mongos2 still thinks that { x: 1 } belong to st.shard0.shardName, but should be able to
+ // refresh it's metadata correctly.
+ assert.neq(null, testDB_s2.user.findOne({x: 1}));
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 2);
+ checkShardMajorVersion(st.rs1.getPrimary(), 2);
- var testDB_s1 = st.s1.getDB('test');
- assert.writeOK(testDB_s1.user.insert({x: 1}));
- assert.commandWorked(
- testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
-
- st.configRS.awaitLastOpCommitted();
-
- // Official config:
- // shard0: 2|0|a, [-inf, inf)
- // shard1: 0|0|a
- //
- // Shard metadata:
- // shard0: 0|0|a
- // shard1: 0|0|a
- // mongos0: 1|0|a
+ // Set shard metadata to 2|0|b
+ assert.neq(null, testDB_s2.user.findOne({x: -11}));
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 2);
+ checkShardMajorVersion(st.rs1.getPrimary(), 2);
+
+ // Official config:
+ // shard0: 2|0|b, [-inf, 0)
+ // shard1: 2|1|b, [0, inf)
+ //
+ // Shard metadata:
+ // shard0: 2|0|b
+ // shard1: 2|1|b
+ //
+ // mongos3: 2|0|a
+
+ // 4th mongos still thinks that { x: 1 } belong to st.shard0.shardName, but should be able
+ // to
+ // refresh it's metadata correctly.
+ assert.neq(null, testDB_s3.user.findOne({x: 1}));
+
+ ///////////////////////////////////////////////////////
+ // Test mongos thinks unsharded when it's actually sharded
+ // mongos current versions: s0: 0|0|0, s2, s3: 2|0|b
+
+ // Set mongos0 to version 0|0|0
+ testDB_s0.user.drop();
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
- checkShardMajorVersion(st.rs0.getPrimary(), 0);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
+ assert.eq(null, testDB_s0.user.findOne({x: 1}));
- // mongos0 still thinks that { x: 1 } belong to st.shard1.shardName, but should be able to
- // refresh it's metadata correctly.
- assert.neq(null, testDB_s0.user.findOne({x: 1}));
+ // Needs to also set mongos1 to version 0|0|0, otherwise it'll complain that collection is
+ // already sharded.
+ assert.eq(null, testDB_s1.user.findOne({x: 1}));
+ assert.commandWorked(testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+ testDB_s1.user.insert({x: 1});
+
+ assert.commandWorked(testDB_s1.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
+
+ st.configRS.awaitLastOpCommitted();
- checkShardMajorVersion(st.rs0.getPrimary(), 2);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
+ // Official config:
+ // shard0: 2|0|c, [-inf, inf)
+ // shard1: 0|0|c
+ //
+ // Shard metadata:
+ // shard0: 0|0|c
+ // shard1: 0|0|c
+ //
+ // mongos0: 0|0|0
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ // 1st mongos thinks that collection is unshareded and will attempt to query primary shard.
+ assert.neq(null, testDB_s0.user.findOne({x: 1}));
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 2);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ st.stop();
- // Set mongos2 & mongos3 to version 2|0|a
- var testDB_s2 = st.s2.getDB('test');
- assert.neq(null, testDB_s2.user.findOne({x: 1}));
+ })();
+
+ // Test with incrementChunkMajorVersionOnChunkSplits set to true.
+ (() => {
+ var st = new ShardingTest({
+ shards: 2,
+ mongos: 4,
+ other:
+ {configOptions: {setParameter: {incrementChunkMajorVersionOnChunkSplits: true}}}
+ });
+
+ var testDB_s0 = st.s.getDB('test');
+ assert.commandWorked(testDB_s0.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', st.shard1.shardName);
+ assert.commandWorked(testDB_s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+
+ var checkShardMajorVersion = function(conn, expectedVersion) {
+ var shardVersionInfo = conn.adminCommand({getShardVersion: 'test.user'});
+ assert.eq(expectedVersion, shardVersionInfo.global.getTime());
+ };
+
+ ///////////////////////////////////////////////////////
+ // Test shard with empty chunk
- var testDB_s3 = st.s3.getDB('test');
- assert.neq(null, testDB_s3.user.findOne({x: 1}));
+ // shard0: 0|0|a
+ // shard1: 1|0|a, [-inf, inf)
+ // mongos0: 1|0|a
- ///////////////////////////////////////////////////////
- // Test unsharded collection
- // mongos versions: s0, s2, s3: 2|0|a
+ var testDB_s1 = st.s1.getDB('test');
+ assert.writeOK(testDB_s1.user.insert({x: 1}));
+ assert.commandWorked(testDB_s1.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
+
+ st.configRS.awaitLastOpCommitted();
+
+ // Official config:
+ // shard0: 2|0|a, [-inf, inf)
+ // shard1: 0|0|a
+ //
+ // Shard metadata:
+ // shard0: 0|0|a
+ // shard1: 0|0|a
+ // mongos0: 1|0|a
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ // mongos0 still thinks that { x: 1 } belong to st.shard1.shardName, but should be able to
+ // refresh it's metadata correctly.
+ assert.neq(null, testDB_s0.user.findOne({x: 1}));
- testDB_s1.user.drop();
- assert.writeOK(testDB_s1.user.insert({x: 10}));
+ checkShardMajorVersion(st.rs0.getPrimary(), 2);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
- // shard0: 0|0|0
- // shard1: 0|0|0
- // mongos0: 2|0|a
-
- checkShardMajorVersion(st.rs0.getPrimary(), 0);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
-
- // mongos0 still thinks { x: 10 } belong to st.shard0.shardName, but since coll is dropped,
- // query should be routed to primary shard.
- assert.neq(null, testDB_s0.user.findOne({x: 10}));
+ // Set mongos2 & mongos3 to version 2|0|a
+ var testDB_s2 = st.s2.getDB('test');
+ assert.neq(null, testDB_s2.user.findOne({x: 1}));
+
+ var testDB_s3 = st.s3.getDB('test');
+ assert.neq(null, testDB_s3.user.findOne({x: 1}));
+
+ ///////////////////////////////////////////////////////
+ // Test unsharded collection
+ // mongos versions: s0, s2, s3: 2|0|a
- checkShardMajorVersion(st.rs0.getPrimary(), 0);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
+ testDB_s1.user.drop();
+ assert.writeOK(testDB_s1.user.insert({x: 10}));
+
+ // shard0: 0|0|0
+ // shard1: 0|0|0
+ // mongos0: 2|0|a
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ // mongos0 still thinks { x: 10 } belong to st.shard0.shardName, but since coll is dropped,
+ // query should be routed to primary shard.
+ assert.neq(null, testDB_s0.user.findOne({x: 10}));
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ ///////////////////////////////////////////////////////
+ // Test 2 shards with 1 chunk
+ // mongos versions: s0: 0|0|0, s2, s3: 2|0|a
+
+ testDB_s1.user.drop();
+ testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+ testDB_s1.adminCommand({split: 'test.user', middle: {x: 0}});
+
+ // shard0: 0|0|b,
+ // shard1: 2|1|b, [-inf, 0), [0, inf)
+
+ testDB_s1.user.insert({x: 1});
+ testDB_s1.user.insert({x: -11});
+ assert.commandWorked(testDB_s1.adminCommand(
+ {moveChunk: 'test.user', find: {x: -1}, to: st.shard0.shardName}));
+
+ st.configRS.awaitLastOpCommitted();
+
+ // Official config:
+ // shard0: 3|0|b, [-inf, 0)
+ // shard1: 3|1|b, [0, inf)
+ //
+ // Shard metadata:
+ // shard0: 0|0|b
+ // shard1: 3|1|b
+ //
+ // mongos2: 2|0|a
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 3);
+
+ // mongos2 still thinks that { x: 1 } belong to st.shard0.shardName, but should be able to
+ // refresh it's metadata correctly.
+ assert.neq(null, testDB_s2.user.findOne({x: 1}));
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 3);
+ checkShardMajorVersion(st.rs1.getPrimary(), 3);
- ///////////////////////////////////////////////////////
- // Test 2 shards with 1 chunk
- // mongos versions: s0: 0|0|0, s2, s3: 2|0|a
+ // Set shard metadata to 2|0|b
+ assert.neq(null, testDB_s2.user.findOne({x: -11}));
- testDB_s1.user.drop();
- testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}});
- testDB_s1.adminCommand({split: 'test.user', middle: {x: 0}});
+ checkShardMajorVersion(st.rs0.getPrimary(), 3);
+ checkShardMajorVersion(st.rs1.getPrimary(), 3);
+
+ // Official config:
+ // shard0: 3|0|b, [-inf, 0)
+ // shard1: 3|1|b, [0, inf)
+ //
+ // Shard metadata:
+ // shard0: 3|0|b
+ // shard1: 3|1|b
+ //
+ // mongos3: 2|0|a
+
+ // 4th mongos still thinks that { x: 1 } belong to st.shard0.shardName, but should be able
+ // to
+ // refresh it's metadata correctly.
+ assert.neq(null, testDB_s3.user.findOne({x: 1}));
- // shard0: 0|0|b,
- // shard1: 2|1|b, [-inf, 0), [0, inf)
+ ///////////////////////////////////////////////////////
+ // Test mongos thinks unsharded when it's actually sharded
+ // mongos current versions: s0: 0|0|0, s2, s3: 2|0|b
- testDB_s1.user.insert({x: 1});
- testDB_s1.user.insert({x: -11});
- assert.commandWorked(
- testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: -1}, to: st.shard0.shardName}));
+ // Set mongos0 to version 0|0|0
+ testDB_s0.user.drop();
- st.configRS.awaitLastOpCommitted();
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
- // Official config:
- // shard0: 3|0|b, [-inf, 0)
- // shard1: 3|1|b, [0, inf)
- //
- // Shard metadata:
- // shard0: 0|0|b
- // shard1: 3|1|b
- //
- // mongos2: 2|0|a
+ assert.eq(null, testDB_s0.user.findOne({x: 1}));
- checkShardMajorVersion(st.rs0.getPrimary(), 0);
- checkShardMajorVersion(st.rs1.getPrimary(), 3);
+ // Needs to also set mongos1 to version 0|0|0, otherwise it'll complain that collection is
+ // already sharded.
+ assert.eq(null, testDB_s1.user.findOne({x: 1}));
+ assert.commandWorked(testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+ testDB_s1.user.insert({x: 1});
- // mongos2 still thinks that { x: 1 } belong to st.shard0.shardName, but should be able to
- // refresh it's metadata correctly.
- assert.neq(null, testDB_s2.user.findOne({x: 1}));
+ assert.commandWorked(testDB_s1.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
+
+ st.configRS.awaitLastOpCommitted();
- checkShardMajorVersion(st.rs0.getPrimary(), 3);
- checkShardMajorVersion(st.rs1.getPrimary(), 3);
-
- // Set shard metadata to 2|0|b
- assert.neq(null, testDB_s2.user.findOne({x: -11}));
-
- checkShardMajorVersion(st.rs0.getPrimary(), 3);
- checkShardMajorVersion(st.rs1.getPrimary(), 3);
-
- // Official config:
- // shard0: 3|0|b, [-inf, 0)
- // shard1: 3|1|b, [0, inf)
- //
- // Shard metadata:
- // shard0: 3|0|b
- // shard1: 3|1|b
- //
- // mongos3: 2|0|a
-
- // 4th mongos still thinks that { x: 1 } belong to st.shard0.shardName, but should be able to
- // refresh it's metadata correctly.
- assert.neq(null, testDB_s3.user.findOne({x: 1}));
-
- ///////////////////////////////////////////////////////
- // Test mongos thinks unsharded when it's actually sharded
- // mongos current versions: s0: 0|0|0, s2, s3: 2|0|b
-
- // Set mongos0 to version 0|0|0
- testDB_s0.user.drop();
-
- checkShardMajorVersion(st.rs0.getPrimary(), 0);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
-
- assert.eq(null, testDB_s0.user.findOne({x: 1}));
-
- // Needs to also set mongos1 to version 0|0|0, otherwise it'll complain that collection is
- // already sharded.
- assert.eq(null, testDB_s1.user.findOne({x: 1}));
- assert.commandWorked(testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
- testDB_s1.user.insert({x: 1});
-
- assert.commandWorked(
- testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName}));
-
- st.configRS.awaitLastOpCommitted();
-
- // Official config:
- // shard0: 2|0|c, [-inf, inf)
- // shard1: 0|0|c
- //
- // Shard metadata:
- // shard0: 0|0|c
- // shard1: 0|0|c
- //
- // mongos0: 0|0|0
-
- checkShardMajorVersion(st.rs0.getPrimary(), 0);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
-
- // 1st mongos thinks that collection is unshareded and will attempt to query primary shard.
- assert.neq(null, testDB_s0.user.findOne({x: 1}));
-
- checkShardMajorVersion(st.rs0.getPrimary(), 2);
- checkShardMajorVersion(st.rs1.getPrimary(), 0);
-
- st.stop();
+ // Official config:
+ // shard0: 2|0|c, [-inf, inf)
+ // shard1: 0|0|c
+ //
+ // Shard metadata:
+ // shard0: 0|0|c
+ // shard1: 0|0|c
+ //
+ // mongos0: 0|0|0
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 0);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ // 1st mongos thinks that collection is unshareded and will attempt to query primary shard.
+ assert.neq(null, testDB_s0.user.findOne({x: 1}));
+
+ checkShardMajorVersion(st.rs0.getPrimary(), 2);
+ checkShardMajorVersion(st.rs1.getPrimary(), 0);
+
+ st.stop();
+ })();
})();
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 3133094adb4..033a96c6a25 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/server_parameters.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
@@ -55,6 +56,9 @@
#include "mongo/util/mongoutils/str.h"
namespace mongo {
+
+MONGO_EXPORT_SERVER_PARAMETER(incrementChunkMajorVersionOnChunkSplits, bool, false);
+
namespace {
MONGO_FAIL_POINT_DEFINE(migrationCommitVersionError);
@@ -368,10 +372,11 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
ChunkVersion currentMaxVersion = collVersion;
// Increment the major version only if the shard that owns the chunk being split has version ==
// collection version. See SERVER-41480 for details.
- if (shardVersion == collVersion) {
+ if (incrementChunkMajorVersionOnChunkSplits.load() && shardVersion == collVersion) {
currentMaxVersion.incMajor();
}
+
auto startKey = range.getMin();
auto newChunkBounds(splitPoints);
newChunkBounds.push_back(range.getMax());
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
index 0140245652c..6566f7c258a 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
@@ -33,6 +33,7 @@
#include "mongo/client/read_preference.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/server_parameters.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/config_server_test_fixture.h"
@@ -40,6 +41,29 @@ namespace mongo {
namespace {
using SplitChunkTest = ConfigServerTestFixture;
+/**
+ * A fixture which sets the incrementChunkMajorVersionOnChunkSplits server parameter to true.
+ */
+class SplitChunkWithMajorVersionIncrementTest : public ConfigServerTestFixture {
+public:
+ void setUp() override {
+ ConfigServerTestFixture::setUp();
+ // Ignore the return status.
+ std::ignore = ServerParameterSet::getGlobal()
+ ->getMap()
+ .find("incrementChunkMajorVersionOnChunkSplits")
+ ->second->setFromString("true");
+ }
+
+ void tearDown() override {
+ // Ignore the return status.
+ std::ignore = ServerParameterSet::getGlobal()
+ ->getMap()
+ .find("incrementChunkMajorVersionOnChunkSplits")
+ ->second->setFromString("false");
+ ConfigServerTestFixture::tearDown();
+ }
+};
const NamespaceString kNamespace("TestDB", "TestColl");
@@ -78,6 +102,66 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
auto chunkDoc = chunkDocStatus.getValue();
ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax());
+ // Check for increment on first chunkDoc's minor version
+ ASSERT_EQ(origVersion.majorVersion(), chunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion());
+
+ // Make sure the history is there
+ ASSERT_EQ(2UL, chunkDoc.getHistory().size());
+
+ // Second chunkDoc should have range [chunkSplitPoint, chunkMax]
+ auto otherChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint);
+ ASSERT_OK(otherChunkDocStatus.getStatus());
+
+ auto otherChunkDoc = otherChunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkMax, otherChunkDoc.getMax());
+
+ // Check for increment on second chunkDoc's minor version
+ ASSERT_EQ(origVersion.majorVersion(), otherChunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 2, otherChunkDoc.getVersion().minorVersion());
+
+ // Make sure the history is there
+ ASSERT_EQ(2UL, otherChunkDoc.getHistory().size());
+
+ // Both chunks should have the same history
+ ASSERT(chunkDoc.getHistory() == otherChunkDoc.getHistory());
+}
+
+TEST_F(SplitChunkWithMajorVersionIncrementTest, SplitExistingChunkCorrectlyShouldSucceed) {
+ ChunkType chunk;
+ chunk.setNS(kNamespace);
+
+ auto origVersion = ChunkVersion(1, 0, OID::gen());
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId("shard0000"));
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkMax = BSON("a" << 10);
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkMax);
+ chunk.setHistory({ChunkHistory(Timestamp(100, 0), ShardId("shard0000")),
+ ChunkHistory(Timestamp(90, 0), ShardId("shardY"))});
+
+ auto chunkSplitPoint = BSON("a" << 5);
+ std::vector<BSONObj> splitPoints{chunkSplitPoint};
+
+ setupChunks({chunk});
+
+ ASSERT_OK(ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ kNamespace,
+ origVersion.epoch(),
+ ChunkRange(chunkMin, chunkMax),
+ splitPoints,
+ "shard0000"));
+
+ // First chunkDoc should have range [chunkMin, chunkSplitPoint]
+ auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin);
+ ASSERT_OK(chunkDocStatus.getStatus());
+
+ auto chunkDoc = chunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax());
+
// Check for increment on first chunkDoc's major version.
ASSERT_EQ(origVersion.majorVersion() + 1, chunkDoc.getVersion().majorVersion());
ASSERT_EQ(1u, chunkDoc.getVersion().minorVersion());
@@ -139,6 +223,82 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
auto chunkDoc = chunkDocStatus.getValue();
ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax());
+ // Check for increment on first chunkDoc's minor version
+ ASSERT_EQ(origVersion.majorVersion(), chunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion());
+
+ // Make sure the history is there
+ ASSERT_EQ(2UL, chunkDoc.getHistory().size());
+
+ // Second chunkDoc should have range [chunkSplitPoint, chunkSplitPoint2]
+ auto midChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint);
+ ASSERT_OK(midChunkDocStatus.getStatus());
+
+ auto midChunkDoc = midChunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkSplitPoint2, midChunkDoc.getMax());
+
+ // Check for increment on second chunkDoc's minor version
+ ASSERT_EQ(origVersion.majorVersion(), midChunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 2, midChunkDoc.getVersion().minorVersion());
+
+ // Make sure the history is there
+ ASSERT_EQ(2UL, midChunkDoc.getHistory().size());
+
+ // Third chunkDoc should have range [chunkSplitPoint2, chunkMax]
+ auto lastChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint2);
+ ASSERT_OK(lastChunkDocStatus.getStatus());
+
+ auto lastChunkDoc = lastChunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkMax, lastChunkDoc.getMax());
+
+ // Check for increment on third chunkDoc's minor version
+ ASSERT_EQ(origVersion.majorVersion(), lastChunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 3, lastChunkDoc.getVersion().minorVersion());
+
+ // Make sure the history is there
+ ASSERT_EQ(2UL, lastChunkDoc.getHistory().size());
+
+ // Both chunks should have the same history
+ ASSERT(chunkDoc.getHistory() == midChunkDoc.getHistory());
+ ASSERT(midChunkDoc.getHistory() == lastChunkDoc.getHistory());
+}
+
+TEST_F(SplitChunkWithMajorVersionIncrementTest, MultipleSplitsOnExistingChunkShouldSucceed) {
+ ChunkType chunk;
+ chunk.setNS(kNamespace);
+
+ auto origVersion = ChunkVersion(1, 0, OID::gen());
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId("shard0000"));
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkMax = BSON("a" << 10);
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkMax);
+ chunk.setHistory({ChunkHistory(Timestamp(100, 0), ShardId("shard0000")),
+ ChunkHistory(Timestamp(90, 0), ShardId("shardY"))});
+
+ auto chunkSplitPoint = BSON("a" << 5);
+ auto chunkSplitPoint2 = BSON("a" << 7);
+ std::vector<BSONObj> splitPoints{chunkSplitPoint, chunkSplitPoint2};
+
+ setupChunks({chunk});
+
+ ASSERT_OK(ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ kNamespace,
+ origVersion.epoch(),
+ ChunkRange(chunkMin, chunkMax),
+ splitPoints,
+ "shard0000"));
+
+ // First chunkDoc should have range [chunkMin, chunkSplitPoint]
+ auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin);
+ ASSERT_OK(chunkDocStatus.getStatus());
+
+ auto chunkDoc = chunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax());
+
// Check for increment on first chunkDoc's major version.
ASSERT_EQ(origVersion.majorVersion() + 1, chunkDoc.getVersion().majorVersion());
ASSERT_EQ(1u, chunkDoc.getVersion().minorVersion());
@@ -223,6 +383,66 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
auto chunkDoc = chunkDocStatus.getValue();
ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax());
+ // Check for increment based on the competing chunk version
+ ASSERT_EQ(competingVersion.majorVersion(), chunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(competingVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion());
+
+ // Second chunkDoc should have range [chunkSplitPoint, chunkMax]
+ auto otherChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint);
+ ASSERT_OK(otherChunkDocStatus.getStatus());
+
+ auto otherChunkDoc = otherChunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkMax, otherChunkDoc.getMax());
+
+ // Check for increment based on the competing chunk version
+ ASSERT_EQ(competingVersion.majorVersion(), otherChunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(competingVersion.minorVersion() + 2, otherChunkDoc.getVersion().minorVersion());
+}
+
+TEST_F(SplitChunkWithMajorVersionIncrementTest, NewSplitShouldClaimHighestVersion) {
+ ChunkType chunk, chunk2;
+ chunk.setNS(kNamespace);
+ chunk2.setNS(kNamespace);
+ auto collEpoch = OID::gen();
+
+ // set up first chunk
+ auto origVersion = ChunkVersion(1, 2, collEpoch);
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId("shard0000"));
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkMax = BSON("a" << 10);
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkMax);
+
+ std::vector<BSONObj> splitPoints;
+ auto chunkSplitPoint = BSON("a" << 5);
+ splitPoints.push_back(chunkSplitPoint);
+
+ // set up second chunk (chunk2)
+ auto competingVersion = ChunkVersion(2, 1, collEpoch);
+ chunk2.setVersion(competingVersion);
+ chunk2.setShard(ShardId("shard0000"));
+ chunk2.setMin(BSON("a" << 10));
+ chunk2.setMax(BSON("a" << 20));
+
+ setupChunks({chunk, chunk2});
+
+ ASSERT_OK(ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ kNamespace,
+ collEpoch,
+ ChunkRange(chunkMin, chunkMax),
+ splitPoints,
+ "shard0000"));
+
+ // First chunkDoc should have range [chunkMin, chunkSplitPoint]
+ auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin);
+ ASSERT_OK(chunkDocStatus.getStatus());
+
+ auto chunkDoc = chunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax());
+
// Check for major version increment based on the competing chunk version.
ASSERT_EQ(competingVersion.majorVersion() + 1, chunkDoc.getVersion().majorVersion());
// The minor version gets reset to 0 when the major version is incremented, and chunk splits
@@ -245,7 +465,8 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
ASSERT_EQ(2u, otherChunkDoc.getVersion().minorVersion());
}
-TEST_F(SplitChunkTest, SplitsOnShardWithLowerShardVersionDoesNotIncreaseCollectionVersion) {
+TEST_F(SplitChunkWithMajorVersionIncrementTest,
+ SplitsOnShardWithLowerShardVersionDoesNotIncreaseCollectionVersion) {
ChunkType chunk, chunk2;
chunk.setNS(kNamespace);
chunk2.setNS(kNamespace);
@@ -297,7 +518,8 @@ TEST_F(SplitChunkTest, SplitsOnShardWithLowerShardVersionDoesNotIncreaseCollecti
ASSERT_EQ(competingVersion.minorVersion() + 2u, otherChunkDoc.getVersion().minorVersion());
}
-TEST_F(SplitChunkTest, SplitsOnShardWithHighestShardVersionIncreasesCollectionVersion) {
+TEST_F(SplitChunkWithMajorVersionIncrementTest,
+ SplitsOnShardWithHighestShardVersionIncreasesCollectionVersion) {
ChunkType chunk, chunk2;
chunk.setNS(kNamespace);
chunk2.setNS(kNamespace);