diff options
author | Matthew Saltz <matthew.saltz@mongodb.com> | 2020-07-02 22:52:06 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2020-07-08 20:37:36 +0000 |
commit | f0253e7ba95430c7e100b4ab5f76dd786f9f9a3d (patch) | |
tree | 6bdd91ce5cd0c068623f07b88856d89f8daaf54b | |
parent | 9d0e2b304e59c29e899c3c225f67bc822d64f153 (diff) | |
download | mongo-f0253e7ba95430c7e100b4ab5f76dd786f9f9a3d.tar.gz |
SERVER-49233 Introduce a flag to toggle the logic for bumping collection's major version during split
-rw-r--r-- | jstests/sharding/major_version_check.js | 147 | ||||
-rw-r--r-- | jstests/sharding/migration_failure.js | 196 | ||||
-rw-r--r-- | jstests/sharding/zero_shard_version.js | 462 | ||||
-rw-r--r-- | src/mongo/s/catalog/sharding_catalog_manager_chunk_operations.cpp | 6 | ||||
-rw-r--r-- | src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp | 203 |
5 files changed, 742 insertions, 272 deletions
diff --git a/jstests/sharding/major_version_check.js b/jstests/sharding/major_version_check.js index 546d9eb0c33..56440b2bfe6 100644 --- a/jstests/sharding/major_version_check.js +++ b/jstests/sharding/major_version_check.js @@ -3,66 +3,125 @@ // (function() { 'use strict'; + // Test with default value of incrementChunkMajorVersionOnChunkSplits, which is + // false. + (() => { + var st = new ShardingTest({shards: 1, mongos: 2}); - var st = new ShardingTest({shards: 1, mongos: 2}); + var mongos = st.s0; + var staleMongos = st.s1; + var admin = mongos.getDB("admin"); + var config = mongos.getDB("config"); + var coll = mongos.getCollection("foo.bar"); - var mongos = st.s0; - var staleMongos = st.s1; - var admin = mongos.getDB("admin"); - var config = mongos.getDB("config"); - var coll = mongos.getCollection("foo.bar"); + // Shard collection + assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""})); + assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}})); - // Shard collection - assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""})); - assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}})); + // Make sure our stale mongos is up-to-date with no splits + staleMongos.getCollection(coll + "").findOne(); - // Make sure our stale mongos is up-to-date with no splits - staleMongos.getCollection(coll + "").findOne(); + // Run one split + assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}})); - // Run one split - assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}})); + // Make sure our stale mongos is not up-to-date with the split + printjson(admin.runCommand({getShardVersion: coll + ""})); + printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""})); - // Make sure our stale mongos is not up-to-date with the split - printjson(admin.runCommand({getShardVersion: coll + ""})); - printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""})); + // Compare strings b/c timestamp comparison is a bit weird + assert.eq(Timestamp(1, 2), admin.runCommand({getShardVersion: coll + ""}).version); + assert.eq(Timestamp(1, 0), + staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version); - // Compare strings b/c timestamp comparison is a bit weird - assert.eq(Timestamp(2, 2), admin.runCommand({getShardVersion: coll + ""}).version); - assert.eq(Timestamp(1, 0), - staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version); + // See if our stale mongos is required to catch up to run a findOne on an existing + // connection + staleMongos.getCollection(coll + "").findOne(); - // See if our stale mongos is required to catch up to run a findOne on an existing connection - staleMongos.getCollection(coll + "").findOne(); + printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""})); - printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""})); + assert.eq(Timestamp(1, 0), + staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version); - assert.eq(Timestamp(1, 0), - staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version); + // See if our stale mongos is required to catch up to run a findOne on a new connection + staleMongos = new Mongo(staleMongos.host); + staleMongos.getCollection(coll + "").findOne(); - // See if our stale mongos is required to catch up to run a findOne on a new connection - staleMongos = new Mongo(staleMongos.host); - staleMongos.getCollection(coll + "").findOne(); + printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""})); - printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""})); + assert.eq(Timestamp(1, 0), + staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version); - assert.eq(Timestamp(1, 0), - staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version); + st.stop(); + })(); - // Run another split on the original chunk, which does not exist anymore (but the stale mongos - // thinks it exists). This should fail and cause a refresh on the shard, updating its shard - // version. - assert.commandFailed(staleMongos.getDB("admin").runCommand( - {split: coll + "", bounds: [{_id: MinKey}, {_id: MaxKey}]})); + // Test with incrementChunkMajorVersionOnChunkSplits set to true. + (() => { + var st = new ShardingTest({ + shards: 1, + mongos: 2, + other: + {configOptions: {setParameter: {incrementChunkMajorVersionOnChunkSplits: true}}} + }); - // This findOne will cause a refresh on the router since the shard version has now been - // increased. - staleMongos.getCollection(coll + "").findOne(); + var mongos = st.s0; + var staleMongos = st.s1; + var admin = mongos.getDB("admin"); + var config = mongos.getDB("config"); + var coll = mongos.getCollection("foo.bar"); - printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""})); + // Shard collection + assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""})); + assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}})); - // The previously stale mongos should now be up-to-date. - assert.eq(Timestamp(2, 2), - staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version); + // Make sure our stale mongos is up-to-date with no splits + staleMongos.getCollection(coll + "").findOne(); - st.stop(); + // Run one split + assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}})); + + // Make sure our stale mongos is not up-to-date with the split + printjson(admin.runCommand({getShardVersion: coll + ""})); + printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""})); + + // Compare strings b/c timestamp comparison is a bit weird + assert.eq(Timestamp(2, 2), admin.runCommand({getShardVersion: coll + ""}).version); + assert.eq(Timestamp(1, 0), + staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version); + + // See if our stale mongos is required to catch up to run a findOne on an existing + // connection + staleMongos.getCollection(coll + "").findOne(); + + printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""})); + + assert.eq(Timestamp(1, 0), + staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version); + + // See if our stale mongos is required to catch up to run a findOne on a new connection + staleMongos = new Mongo(staleMongos.host); + staleMongos.getCollection(coll + "").findOne(); + + printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""})); + + assert.eq(Timestamp(1, 0), + staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version); + + // Run another split on the original chunk, which does not exist anymore (but the stale + // mongos thinks it exists). This should fail and cause a refresh on + // the shard, updating its shard version. + assert.commandFailed(staleMongos.getDB("admin").runCommand( + {split: coll + "", bounds: [{_id: MinKey}, {_id: MaxKey}]})); + + // This findOne will cause a refresh on the router since the shard version has now been + // increased. + staleMongos.getCollection(coll + "").findOne(); + + printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""})); + + // The previously stale mongos should now be up-to-date. + assert.eq(Timestamp(2, 2), + staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version); + + st.stop(); + })(); })(); diff --git a/jstests/sharding/migration_failure.js b/jstests/sharding/migration_failure.js index cd346a02e18..81dbf43a9ef 100644 --- a/jstests/sharding/migration_failure.js +++ b/jstests/sharding/migration_failure.js @@ -7,92 +7,112 @@ (function() { 'use strict'; - var st = new ShardingTest({shards: 2, mongos: 1}); - - var mongos = st.s0; - var admin = mongos.getDB("admin"); - var coll = mongos.getCollection("foo.bar"); - - assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok); - printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName})); - assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok); - assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok); - - st.printShardingStatus(); - - jsTest.log("Testing failed migrations..."); - - var oldVersion = null; - var newVersion = null; - - // failMigrationCommit -- this creates an error that aborts the migration before the commit - // migration command is sent. - assert.commandWorked(st.shard0.getDB("admin").runCommand( - {configureFailPoint: 'failMigrationCommit', mode: 'alwaysOn'})); - - // The split command above bumps the shard version, and this is obtained by the router via a - // refresh at the end of the command, but the shard does not know about it yet. This find will - // cause the shard to refresh so that this next check for 'oldVersion' sees the most recent - // version prior to the migration. - coll.findOne(); - - oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global; - - assert.commandFailed( - admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName})); - - newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global; - - assert.eq(oldVersion.t, - newVersion.t, - "The shard version major value should not change after a failed migration"); - // Split does not cause a shard routing table refresh, but the moveChunk attempt will. - assert.eq(2, - newVersion.i, - "The shard routing table should refresh on a failed migration and show the split"); - - assert.commandWorked(st.shard0.getDB("admin").runCommand( - {configureFailPoint: 'failMigrationCommit', mode: 'off'})); - - // migrationCommitNetworkError -- mimic migration commit command returning a network error, - // whereupon the config server is queried to determine that this commit was successful. - assert.commandWorked(st.shard0.getDB("admin").runCommand( - {configureFailPoint: 'migrationCommitNetworkError', mode: 'alwaysOn'})); - - // Run a migration where there will still be chunks in the collection remaining on the shard - // afterwards. This will cause the collection's shardVersion to be bumped higher. - oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global; - - assert.commandWorked( - admin.runCommand({moveChunk: coll + "", find: {_id: 1}, to: st.shard1.shardName})); - - newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global; - - assert.lt( - oldVersion.t, newVersion.t, "The major value in the shard version should have increased"); - assert.eq(1, newVersion.i, "The minor value in the shard version should be 1"); - - // Run a migration to move off the shard's last chunk in the collection. The collection's - // shardVersion will be reset. - oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global; - - assert.commandWorked( - admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: st.shard1.shardName})); - - newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global; - - assert.gt(oldVersion.t, - newVersion.t, - "The version prior to the migration should be greater than the reset value"); - - assert.eq( - 0, newVersion.t, "The shard version should have reset, but the major value is not zero"); - assert.eq( - 0, newVersion.i, "The shard version should have reset, but the minor value is not zero"); - - assert.commandWorked(st.shard0.getDB("admin").runCommand( - {configureFailPoint: 'migrationCommitNetworkError', mode: 'off'})); - - st.stop(); - + function runTest(shouldIncrementChunkMajorVersionOnChunkSplits) { + var st = new ShardingTest({ + shards: 2, + mongos: 1, + other: { + configOptions: { + setParameter: { + incrementChunkMajorVersionOnChunkSplits: + shouldIncrementChunkMajorVersionOnChunkSplits + } + } + } + }); + + var mongos = st.s0; + var admin = mongos.getDB("admin"); + var coll = mongos.getCollection("foo.bar"); + + assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok); + printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName})); + assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok); + assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok); + + st.printShardingStatus(); + + jsTest.log("Testing failed migrations..."); + + var oldVersion = null; + var newVersion = null; + + // failMigrationCommit -- this creates an error that aborts the migration before the commit + // migration command is sent. + assert.commandWorked(st.shard0.getDB("admin").runCommand( + {configureFailPoint: 'failMigrationCommit', mode: 'alwaysOn'})); + if (shouldIncrementChunkMajorVersionOnChunkSplits) { + // The split command above bumps the shard version, and this is obtained by the router + // via a refresh at the end of the command, but the shard does not know about it yet. + // This find will cause the shard to refresh so that this next check for 'oldVersion' + // sees the most recent version prior to the migration. + coll.findOne(); + } + + oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global; + + assert.commandFailed( + admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: st.shard1.shardName})); + + newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global; + + assert.eq(oldVersion.t, + newVersion.t, + "The shard version major value should not change after a failed migration"); + // Split does not cause a shard routing table refresh, but the moveChunk attempt will. + assert.eq( + 2, + newVersion.i, + "The shard routing table should refresh on a failed migration and show the split"); + + assert.commandWorked(st.shard0.getDB("admin").runCommand( + {configureFailPoint: 'failMigrationCommit', mode: 'off'})); + + // migrationCommitNetworkError -- mimic migration commit command returning a network error, + // whereupon the config server is queried to determine that this commit was successful. + assert.commandWorked(st.shard0.getDB("admin").runCommand( + {configureFailPoint: 'migrationCommitNetworkError', mode: 'alwaysOn'})); + + // Run a migration where there will still be chunks in the collection remaining on the shard + // afterwards. This will cause the collection's shardVersion to be bumped higher. + oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global; + + assert.commandWorked( + admin.runCommand({moveChunk: coll + "", find: {_id: 1}, to: st.shard1.shardName})); + + newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global; + + assert.lt(oldVersion.t, + newVersion.t, + "The major value in the shard version should have increased"); + assert.eq(1, newVersion.i, "The minor value in the shard version should be 1"); + + // Run a migration to move off the shard's last chunk in the collection. The collection's + // shardVersion will be reset. + oldVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global; + + assert.commandWorked( + admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: st.shard1.shardName})); + + newVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()}).global; + + assert.gt(oldVersion.t, + newVersion.t, + "The version prior to the migration should be greater than the reset value"); + + assert.eq(0, + newVersion.t, + "The shard version should have reset, but the major value is not zero"); + assert.eq(0, + newVersion.i, + "The shard version should have reset, but the minor value is not zero"); + + assert.commandWorked(st.shard0.getDB("admin").runCommand( + {configureFailPoint: 'migrationCommitNetworkError', mode: 'off'})); + + st.stop(); + } + + runTest(false); + runTest(true); })(); diff --git a/jstests/sharding/zero_shard_version.js b/jstests/sharding/zero_shard_version.js index cf2ea2c2d71..1a92ab4d9df 100644 --- a/jstests/sharding/zero_shard_version.js +++ b/jstests/sharding/zero_shard_version.js @@ -5,178 +5,366 @@ (function() { 'use strict'; - var st = new ShardingTest({shards: 2, mongos: 4}); + // Test with default value of incrementChunkMajorVersionOnChunkSplits, which is + // false. + (() => { + var st = new ShardingTest({shards: 2, mongos: 4}); + + var testDB_s0 = st.s.getDB('test'); + assert.commandWorked(testDB_s0.adminCommand({enableSharding: 'test'})); + st.ensurePrimaryShard('test', 'shard0001'); + assert.commandWorked(testDB_s0.adminCommand({shardCollection: 'test.user', key: {x: 1}})); + + var checkShardMajorVersion = function(conn, expectedVersion) { + var shardVersionInfo = conn.adminCommand({getShardVersion: 'test.user'}); + assert.eq(expectedVersion, shardVersionInfo.global.getTime()); + }; - var testDB_s0 = st.s.getDB('test'); - assert.commandWorked(testDB_s0.adminCommand({enableSharding: 'test'})); - st.ensurePrimaryShard('test', 'shard0001'); - assert.commandWorked(testDB_s0.adminCommand({shardCollection: 'test.user', key: {x: 1}})); + /////////////////////////////////////////////////////// + // Test shard with empty chunk - var checkShardMajorVersion = function(conn, expectedVersion) { - var shardVersionInfo = conn.adminCommand({getShardVersion: 'test.user'}); - assert.eq(expectedVersion, shardVersionInfo.global.getTime()); - }; + // shard0: 0|0|a + // shard1: 1|0|a, [-inf, inf) + // mongos0: 1|0|a - /////////////////////////////////////////////////////// - // Test shard with empty chunk + var testDB_s1 = st.s1.getDB('test'); + assert.writeOK(testDB_s1.user.insert({x: 1})); + assert.commandWorked( + testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: 'shard0000'})); + + st.configRS.awaitLastOpCommitted(); + + // Official config: + // shard0: 2|0|a, [-inf, inf) + // shard1: 0|0|a + // + // Shard metadata: + // shard0: 0|0|a + // shard1: 0|0|a + // mongos0: 1|0|a + + checkShardMajorVersion(st.d0, 0); + checkShardMajorVersion(st.d1, 0); + + // mongos0 still thinks that { x: 1 } belong to shard0001, but should be able to + // refresh it's metadata correctly. + assert.neq(null, testDB_s0.user.findOne({x: 1})); + + checkShardMajorVersion(st.d0, 2); + checkShardMajorVersion(st.d1, 0); + + // Set mongos2 & mongos3 to version 2|0|a + var testDB_s2 = st.s2.getDB('test'); + assert.neq(null, testDB_s2.user.findOne({x: 1})); + + var testDB_s3 = st.s3.getDB('test'); + assert.neq(null, testDB_s3.user.findOne({x: 1})); - // shard0: 0|0|a - // shard1: 1|0|a, [-inf, inf) - // mongos0: 1|0|a + /////////////////////////////////////////////////////// + // Test unsharded collection + // mongos versions: s0, s2, s3: 2|0|a - var testDB_s1 = st.s1.getDB('test'); - assert.writeOK(testDB_s1.user.insert({x: 1})); - assert.commandWorked( - testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: 'shard0000'})); - - st.configRS.awaitLastOpCommitted(); - - // Official config: - // shard0: 2|0|a, [-inf, inf) - // shard1: 0|0|a - // - // Shard metadata: - // shard0: 0|0|a - // shard1: 0|0|a - // mongos0: 1|0|a + testDB_s1.user.drop(); + assert.writeOK(testDB_s1.user.insert({x: 10})); + + // shard0: 0|0|0 + // shard1: 0|0|0 + // mongos0: 2|0|a + + checkShardMajorVersion(st.d0, 0); + checkShardMajorVersion(st.d1, 0); + + // mongos0 still thinks { x: 10 } belong to shard0000, but since coll is dropped, + // query should be routed to primary shard. + assert.neq(null, testDB_s0.user.findOne({x: 10})); + + checkShardMajorVersion(st.d0, 0); + checkShardMajorVersion(st.d1, 0); + + /////////////////////////////////////////////////////// + // Test 2 shards with 1 chunk + // mongos versions: s0: 0|0|0, s2, s3: 2|0|a - checkShardMajorVersion(st.d0, 0); - checkShardMajorVersion(st.d1, 0); + testDB_s1.user.drop(); + testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}}); + testDB_s1.adminCommand({split: 'test.user', middle: {x: 0}}); + + // shard0: 0|0|b, + // shard1: 1|1|b, [-inf, 0), [0, inf) + + testDB_s1.user.insert({x: 1}); + testDB_s1.user.insert({x: -11}); + assert.commandWorked( + testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: -1}, to: 'shard0000'})); + + st.configRS.awaitLastOpCommitted(); + + // Official config: + // shard0: 2|0|b, [-inf, 0) + // shard1: 2|1|b, [0, inf) + // + // Shard metadata: + // shard0: 0|0|b + // shard1: 2|1|b + // + // mongos2: 2|0|a + + checkShardMajorVersion(st.d0, 0); + checkShardMajorVersion(st.d1, 2); + + // mongos2 still thinks that { x: 1 } belong to shard0000, but should be able to + // refresh it's metadata correctly. + assert.neq(null, testDB_s2.user.findOne({x: 1})); + + checkShardMajorVersion(st.d0, 2); + checkShardMajorVersion(st.d1, 2); - // mongos0 still thinks that { x: 1 } belong to shard0001, but should be able to - // refresh it's metadata correctly. - assert.neq(null, testDB_s0.user.findOne({x: 1})); + // Set shard metadata to 2|0|b + assert.neq(null, testDB_s2.user.findOne({x: -11})); + + checkShardMajorVersion(st.d0, 2); + checkShardMajorVersion(st.d1, 2); + + // Official config: + // shard0: 2|0|b, [-inf, 0) + // shard1: 2|1|b, [0, inf) + // + // Shard metadata: + // shard0: 2|0|b + // shard1: 2|1|b + // + // mongos3: 2|0|a + + // 4th mongos still thinks that { x: 1 } belong to shard0000, but should be able to + // refresh it's metadata correctly. + assert.neq(null, testDB_s3.user.findOne({x: 1})); + + /////////////////////////////////////////////////////// + // Test mongos thinks unsharded when it's actually sharded + // mongos current versions: s0: 0|0|0, s2, s3: 2|0|b - checkShardMajorVersion(st.d0, 2); - checkShardMajorVersion(st.d1, 0); + // Set mongos0 to version 0|0|0 + testDB_s0.user.drop(); - // Set mongos2 & mongos3 to version 2|0|a - var testDB_s2 = st.s2.getDB('test'); - assert.neq(null, testDB_s2.user.findOne({x: 1})); + checkShardMajorVersion(st.d0, 0); + checkShardMajorVersion(st.d1, 0); - var testDB_s3 = st.s3.getDB('test'); - assert.neq(null, testDB_s3.user.findOne({x: 1})); + assert.eq(null, testDB_s0.user.findOne({x: 1})); - /////////////////////////////////////////////////////// - // Test unsharded collection - // mongos versions: s0, s2, s3: 2|0|a + // Needs to also set mongos1 to version 0|0|0, otherwise it'll complain that collection is + // already sharded. + assert.eq(null, testDB_s1.user.findOne({x: 1})); + assert.commandWorked(testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}})); + testDB_s1.user.insert({x: 1}); - testDB_s1.user.drop(); - assert.writeOK(testDB_s1.user.insert({x: 10})); + assert.commandWorked( + testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: 'shard0000'})); - // shard0: 0|0|0 - // shard1: 0|0|0 - // mongos0: 2|0|a - - checkShardMajorVersion(st.d0, 0); - checkShardMajorVersion(st.d1, 0); - - // mongos0 still thinks { x: 10 } belong to shard0000, but since coll is dropped, - // query should be routed to primary shard. - assert.neq(null, testDB_s0.user.findOne({x: 10})); + st.configRS.awaitLastOpCommitted(); - checkShardMajorVersion(st.d0, 0); - checkShardMajorVersion(st.d1, 0); + // Official config: + // shard0: 2|0|c, [-inf, inf) + // shard1: 0|0|c + // + // Shard metadata: + // shard0: 0|0|c + // shard1: 0|0|c + // + // mongos0: 0|0|0 + + checkShardMajorVersion(st.d0, 0); + checkShardMajorVersion(st.d1, 0); + + // 1st mongos thinks that collection is unshareded and will attempt to query primary shard. + assert.neq(null, testDB_s0.user.findOne({x: 1})); - /////////////////////////////////////////////////////// - // Test 2 shards with 1 chunk - // mongos versions: s0: 0|0|0, s2, s3: 2|0|a + checkShardMajorVersion(st.d0, 2); + checkShardMajorVersion(st.d1, 0); - testDB_s1.user.drop(); - testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}}); - testDB_s1.adminCommand({split: 'test.user', middle: {x: 0}}); + st.stop(); - // shard0: 0|0|b, - // shard1: 2|1|b, [-inf, 0), [0, inf) + })(); - testDB_s1.user.insert({x: 1}); - testDB_s1.user.insert({x: -11}); - assert.commandWorked( - testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: -1}, to: 'shard0000'})); + // Test with incrementChunkMajorVersionOnChunkSplits set to true. + (() => { + var st = new ShardingTest({ + shards: 2, + mongos: 4, + other: + {configOptions: {setParameter: {incrementChunkMajorVersionOnChunkSplits: true}}} + }); + + var testDB_s0 = st.s.getDB('test'); + assert.commandWorked(testDB_s0.adminCommand({enableSharding: 'test'})); + st.ensurePrimaryShard('test', 'shard0001'); + assert.commandWorked(testDB_s0.adminCommand({shardCollection: 'test.user', key: {x: 1}})); + + var checkShardMajorVersion = function(conn, expectedVersion) { + var shardVersionInfo = conn.adminCommand({getShardVersion: 'test.user'}); + assert.eq(expectedVersion, shardVersionInfo.global.getTime()); + }; + + /////////////////////////////////////////////////////// + // Test shard with empty chunk - st.configRS.awaitLastOpCommitted(); + // shard0: 0|0|a + // shard1: 1|0|a, [-inf, inf) + // mongos0: 1|0|a - // Official config: - // shard0: 3|0|b, [-inf, 0) - // shard1: 3|1|b, [0, inf) - // - // Shard metadata: - // shard0: 0|0|b - // shard1: 3|1|b - // - // mongos2: 2|0|a + var testDB_s1 = st.s1.getDB('test'); + assert.writeOK(testDB_s1.user.insert({x: 1})); + assert.commandWorked( + testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: 'shard0000'})); + + st.configRS.awaitLastOpCommitted(); + + // Official config: + // shard0: 2|0|a, [-inf, inf) + // shard1: 0|0|a + // + // Shard metadata: + // shard0: 0|0|a + // shard1: 0|0|a + // mongos0: 1|0|a + + checkShardMajorVersion(st.d0, 0); + checkShardMajorVersion(st.d1, 0); + + // mongos0 still thinks that { x: 1 } belong to shard0001, but should be able to + // refresh it's metadata correctly. + assert.neq(null, testDB_s0.user.findOne({x: 1})); - checkShardMajorVersion(st.d0, 0); - checkShardMajorVersion(st.d1, 3); + checkShardMajorVersion(st.d0, 2); + checkShardMajorVersion(st.d1, 0); - // mongos2 still thinks that { x: 1 } belong to shard0000, but should be able to - // refresh it's metadata correctly. - assert.neq(null, testDB_s2.user.findOne({x: 1})); + // Set mongos2 & mongos3 to version 2|0|a + var testDB_s2 = st.s2.getDB('test'); + assert.neq(null, testDB_s2.user.findOne({x: 1})); - checkShardMajorVersion(st.d0, 3); - checkShardMajorVersion(st.d1, 3); + var testDB_s3 = st.s3.getDB('test'); + assert.neq(null, testDB_s3.user.findOne({x: 1})); - // Set shard metadata to 2|0|b - assert.neq(null, testDB_s2.user.findOne({x: -11})); + /////////////////////////////////////////////////////// + // Test unsharded collection + // mongos versions: s0, s2, s3: 2|0|a - checkShardMajorVersion(st.d0, 3); - checkShardMajorVersion(st.d1, 3); + testDB_s1.user.drop(); + assert.writeOK(testDB_s1.user.insert({x: 10})); + + // shard0: 0|0|0 + // shard1: 0|0|0 + // mongos0: 2|0|a + + checkShardMajorVersion(st.d0, 0); + checkShardMajorVersion(st.d1, 0); + + // mongos0 still thinks { x: 10 } belong to shard0000, but since coll is dropped, + // query should be routed to primary shard. + assert.neq(null, testDB_s0.user.findOne({x: 10})); + + checkShardMajorVersion(st.d0, 0); + checkShardMajorVersion(st.d1, 0); + + /////////////////////////////////////////////////////// + // Test 2 shards with 1 chunk + // mongos versions: s0: 0|0|0, s2, s3: 2|0|a - // Official config: - // shard0: 3|0|b, [-inf, 0) - // shard1: 3|1|b, [0, inf) - // - // Shard metadata: - // shard0: 3|0|b - // shard1: 3|1|b - // - // mongos3: 2|0|a - - // 4th mongos still thinks that { x: 1 } belong to shard0000, but should be able to - // refresh it's metadata correctly. - assert.neq(null, testDB_s3.user.findOne({x: 1})); + testDB_s1.user.drop(); + testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}}); + testDB_s1.adminCommand({split: 'test.user', middle: {x: 0}}); - /////////////////////////////////////////////////////// - // Test mongos thinks unsharded when it's actually sharded - // mongos current versions: s0: 0|0|0, s2, s3: 2|0|b + // shard0: 0|0|b, + // shard1: 2|1|b, [-inf, 0), [0, inf) + + testDB_s1.user.insert({x: 1}); + testDB_s1.user.insert({x: -11}); + assert.commandWorked( + testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: -1}, to: 'shard0000'})); - // Set mongos0 to version 0|0|0 - testDB_s0.user.drop(); + st.configRS.awaitLastOpCommitted(); + + // Official config: + // shard0: 3|0|b, [-inf, 0) + // shard1: 3|1|b, [0, inf) + // + // Shard metadata: + // shard0: 0|0|b + // shard1: 3|1|b + // + // mongos2: 2|0|a - checkShardMajorVersion(st.d0, 0); - checkShardMajorVersion(st.d1, 0); + checkShardMajorVersion(st.d0, 0); + checkShardMajorVersion(st.d1, 3); + + // mongos2 still thinks that { x: 1 } belong to shard0000, but should be able to + // refresh it's metadata correctly. + assert.neq(null, testDB_s2.user.findOne({x: 1})); + + checkShardMajorVersion(st.d0, 3); + checkShardMajorVersion(st.d1, 3); - assert.eq(null, testDB_s0.user.findOne({x: 1})); + // Set shard metadata to 2|0|b + assert.neq(null, testDB_s2.user.findOne({x: -11})); - // Needs to also set mongos1 to version 0|0|0, otherwise it'll complain that collection is - // already sharded. - assert.eq(null, testDB_s1.user.findOne({x: 1})); - assert.commandWorked(testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}})); - testDB_s1.user.insert({x: 1}); + checkShardMajorVersion(st.d0, 3); + checkShardMajorVersion(st.d1, 3); - assert.commandWorked( - testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: 'shard0000'})); + // Official config: + // shard0: 3|0|b, [-inf, 0) + // shard1: 3|1|b, [0, inf) + // + // Shard metadata: + // shard0: 3|0|b + // shard1: 3|1|b + // + // mongos3: 2|0|a + + // 4th mongos still thinks that { x: 1 } belong to shard0000, but should be able to + // refresh it's metadata correctly. + assert.neq(null, testDB_s3.user.findOne({x: 1})); - st.configRS.awaitLastOpCommitted(); + /////////////////////////////////////////////////////// + // Test mongos thinks unsharded when it's actually sharded + // mongos current versions: s0: 0|0|0, s2, s3: 2|0|b - // Official config: - // shard0: 2|0|c, [-inf, inf) - // shard1: 0|0|c - // - // Shard metadata: - // shard0: 0|0|c - // shard1: 0|0|c - // - // mongos0: 0|0|0 + // Set mongos0 to version 0|0|0 + testDB_s0.user.drop(); - checkShardMajorVersion(st.d0, 0); - checkShardMajorVersion(st.d1, 0); - - // 1st mongos thinks that collection is unshareded and will attempt to query primary shard. - assert.neq(null, testDB_s0.user.findOne({x: 1})); + checkShardMajorVersion(st.d0, 0); + checkShardMajorVersion(st.d1, 0); - checkShardMajorVersion(st.d0, 2); - checkShardMajorVersion(st.d1, 0); + assert.eq(null, testDB_s0.user.findOne({x: 1})); - st.stop(); + // Needs to also set mongos1 to version 0|0|0, otherwise it'll complain that collection is + // already sharded. + assert.eq(null, testDB_s1.user.findOne({x: 1})); + assert.commandWorked(testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}})); + testDB_s1.user.insert({x: 1}); + + assert.commandWorked( + testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: 'shard0000'})); + + st.configRS.awaitLastOpCommitted(); + + // Official config: + // shard0: 2|0|c, [-inf, inf) + // shard1: 0|0|c + // + // Shard metadata: + // shard0: 0|0|c + // shard1: 0|0|c + // + // mongos0: 0|0|0 + + checkShardMajorVersion(st.d0, 0); + checkShardMajorVersion(st.d1, 0); + + // 1st mongos thinks that collection is unshareded and will attempt to query primary shard. + assert.neq(null, testDB_s0.user.findOne({x: 1})); + + checkShardMajorVersion(st.d0, 2); + checkShardMajorVersion(st.d1, 0); + + st.stop(); + })(); })(); diff --git a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations.cpp index d467fc84522..2dd01a54645 100644 --- a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations.cpp +++ b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations.cpp @@ -43,6 +43,7 @@ #include "mongo/db/dbdirectclient.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/server_parameters.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_chunk.h" @@ -55,6 +56,9 @@ #include "mongo/util/mongoutils/str.h" namespace mongo { + +MONGO_EXPORT_SERVER_PARAMETER(incrementChunkMajorVersionOnChunkSplits, bool, false); + namespace { MONGO_FP_DECLARE(migrationCommitVersionError); @@ -318,7 +322,7 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx, ChunkVersion currentMaxVersion = collVersion; // Increment the major version only if the shard that owns the chunk being split has version == // collection version. See SERVER-41480 for details. - if (shardVersion == collVersion) { + if (incrementChunkMajorVersionOnChunkSplits.load() && shardVersion == collVersion) { currentMaxVersion.incMajor(); } diff --git a/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp b/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp index 7567cc0f066..ac85566c620 100644 --- a/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp +++ b/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp @@ -32,6 +32,7 @@ #include "mongo/client/read_preference.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/server_parameters.h" #include "mongo/s/catalog/sharding_catalog_manager.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/config_server_test_fixture.h" @@ -41,6 +42,30 @@ namespace { using SplitChunkTest = ConfigServerTestFixture; +/** + * A fixture which sets the incrementChunkMajorVersionOnChunkSplits server parameter to true. + */ +class SplitChunkWithMajorVersionIncrementTest : public ConfigServerTestFixture { +public: + void setUp() override { + ConfigServerTestFixture::setUp(); + // Ignore the return status. + std::ignore = ServerParameterSet::getGlobal() + ->getMap() + .find("incrementChunkMajorVersionOnChunkSplits") + ->second->setFromString("true"); + } + + void tearDown() override { + // Ignore the return status. + std::ignore = ServerParameterSet::getGlobal() + ->getMap() + .find("incrementChunkMajorVersionOnChunkSplits") + ->second->setFromString("false"); + ConfigServerTestFixture::tearDown(); + } +}; + const NamespaceString kNamespace("TestDB.TestColl"); TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) { @@ -76,6 +101,56 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) { auto chunkDoc = chunkDocStatus.getValue(); ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax()); + // Check for increment on first chunkDoc's minor version + ASSERT_EQ(origVersion.majorVersion(), chunkDoc.getVersion().majorVersion()); + ASSERT_EQ(origVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion()); + + // Second chunkDoc should have range [chunkSplitPoint, chunkMax] + auto otherChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint); + ASSERT_OK(otherChunkDocStatus.getStatus()); + + auto otherChunkDoc = otherChunkDocStatus.getValue(); + ASSERT_BSONOBJ_EQ(chunkMax, otherChunkDoc.getMax()); + + // Check for increment on second chunkDoc's minor version + ASSERT_EQ(origVersion.majorVersion(), otherChunkDoc.getVersion().majorVersion()); + ASSERT_EQ(origVersion.minorVersion() + 2, otherChunkDoc.getVersion().minorVersion()); +} + + +TEST_F(SplitChunkWithMajorVersionIncrementTest, SplitExistingChunkCorrectlyShouldSucceed) { + ChunkType chunk; + chunk.setNS("TestDB.TestColl"); + + auto origVersion = ChunkVersion(1, 0, OID::gen()); + chunk.setVersion(origVersion); + chunk.setShard(ShardId("shard0000")); + + auto chunkMin = BSON("a" << 1); + auto chunkMax = BSON("a" << 10); + chunk.setMin(chunkMin); + chunk.setMax(chunkMax); + + auto chunkSplitPoint = BSON("a" << 5); + std::vector<BSONObj> splitPoints{chunkSplitPoint}; + + setupChunks({chunk}); + + ASSERT_OK(ShardingCatalogManager::get(operationContext()) + ->commitChunkSplit(operationContext(), + kNamespace, + origVersion.epoch(), + ChunkRange(chunkMin, chunkMax), + splitPoints, + "shard0000")); + + // First chunkDoc should have range [chunkMin, chunkSplitPoint] + auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin); + ASSERT_OK(chunkDocStatus.getStatus()); + + auto chunkDoc = chunkDocStatus.getValue(); + ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax()); + // Check for increment on first chunkDoc's major version. ASSERT_EQ(origVersion.majorVersion() + 1, chunkDoc.getVersion().majorVersion()); ASSERT_EQ(1u, chunkDoc.getVersion().minorVersion()); @@ -126,6 +201,68 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) { auto chunkDoc = chunkDocStatus.getValue(); ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax()); + // Check for increment on first chunkDoc's minor version + ASSERT_EQ(origVersion.majorVersion(), chunkDoc.getVersion().majorVersion()); + ASSERT_EQ(origVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion()); + + // Second chunkDoc should have range [chunkSplitPoint, chunkSplitPoint2] + auto midChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint); + ASSERT_OK(midChunkDocStatus.getStatus()); + + auto midChunkDoc = midChunkDocStatus.getValue(); + ASSERT_BSONOBJ_EQ(chunkSplitPoint2, midChunkDoc.getMax()); + + // Check for increment on second chunkDoc's minor version + ASSERT_EQ(origVersion.majorVersion(), midChunkDoc.getVersion().majorVersion()); + ASSERT_EQ(origVersion.minorVersion() + 2, midChunkDoc.getVersion().minorVersion()); + + // Third chunkDoc should have range [chunkSplitPoint2, chunkMax] + auto lastChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint2); + ASSERT_OK(lastChunkDocStatus.getStatus()); + + auto lastChunkDoc = lastChunkDocStatus.getValue(); + ASSERT_BSONOBJ_EQ(chunkMax, lastChunkDoc.getMax()); + + // Check for increment on third chunkDoc's minor version + ASSERT_EQ(origVersion.majorVersion(), lastChunkDoc.getVersion().majorVersion()); + ASSERT_EQ(origVersion.minorVersion() + 3, lastChunkDoc.getVersion().minorVersion()); +} + + +TEST_F(SplitChunkWithMajorVersionIncrementTest, MultipleSplitsOnExistingChunkShouldSucceed) { + ChunkType chunk; + chunk.setNS("TestDB.TestColl"); + + auto origVersion = ChunkVersion(1, 0, OID::gen()); + chunk.setVersion(origVersion); + chunk.setShard(ShardId("shard0000")); + + auto chunkMin = BSON("a" << 1); + auto chunkMax = BSON("a" << 10); + chunk.setMin(chunkMin); + chunk.setMax(chunkMax); + + auto chunkSplitPoint = BSON("a" << 5); + auto chunkSplitPoint2 = BSON("a" << 7); + std::vector<BSONObj> splitPoints{chunkSplitPoint, chunkSplitPoint2}; + + setupChunks({chunk}); + + ASSERT_OK(ShardingCatalogManager::get(operationContext()) + ->commitChunkSplit(operationContext(), + kNamespace, + origVersion.epoch(), + ChunkRange(chunkMin, chunkMax), + splitPoints, + "shard0000")); + + // First chunkDoc should have range [chunkMin, chunkSplitPoint] + auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin); + ASSERT_OK(chunkDocStatus.getStatus()); + + auto chunkDoc = chunkDocStatus.getValue(); + ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax()); + // Check for increment on first chunkDoc's major version. ASSERT_EQ(origVersion.majorVersion() + 1, chunkDoc.getVersion().majorVersion()); ASSERT_EQ(1u, chunkDoc.getVersion().minorVersion()); @@ -197,6 +334,66 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) { auto chunkDoc = chunkDocStatus.getValue(); ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax()); + // Check for increment based on the competing chunk version + ASSERT_EQ(competingVersion.majorVersion(), chunkDoc.getVersion().majorVersion()); + ASSERT_EQ(competingVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion()); + + // Second chunkDoc should have range [chunkSplitPoint, chunkMax] + auto otherChunkDocStatus = getChunkDoc(operationContext(), chunkSplitPoint); + ASSERT_OK(otherChunkDocStatus.getStatus()); + + auto otherChunkDoc = otherChunkDocStatus.getValue(); + ASSERT_BSONOBJ_EQ(chunkMax, otherChunkDoc.getMax()); + + // Check for increment based on the competing chunk version + ASSERT_EQ(competingVersion.majorVersion(), otherChunkDoc.getVersion().majorVersion()); + ASSERT_EQ(competingVersion.minorVersion() + 2, otherChunkDoc.getVersion().minorVersion()); +} + +TEST_F(SplitChunkWithMajorVersionIncrementTest, NewSplitShouldClaimHighestVersion) { + ChunkType chunk, chunk2; + chunk.setNS("TestDB.TestColl"); + chunk2.setNS("TestDB.TestColl"); + auto collEpoch = OID::gen(); + + // set up first chunk + auto origVersion = ChunkVersion(1, 2, collEpoch); + chunk.setVersion(origVersion); + chunk.setShard(ShardId("shard0000")); + + auto chunkMin = BSON("a" << 1); + auto chunkMax = BSON("a" << 10); + chunk.setMin(chunkMin); + chunk.setMax(chunkMax); + + std::vector<BSONObj> splitPoints; + auto chunkSplitPoint = BSON("a" << 5); + splitPoints.push_back(chunkSplitPoint); + + // set up second chunk (chunk2) + auto competingVersion = ChunkVersion(2, 1, collEpoch); + chunk2.setVersion(competingVersion); + chunk2.setShard(ShardId("shard0000")); + chunk2.setMin(BSON("a" << 10)); + chunk2.setMax(BSON("a" << 20)); + + setupChunks({chunk, chunk2}); + + ASSERT_OK(ShardingCatalogManager::get(operationContext()) + ->commitChunkSplit(operationContext(), + kNamespace, + collEpoch, + ChunkRange(chunkMin, chunkMax), + splitPoints, + "shard0000")); + + // First chunkDoc should have range [chunkMin, chunkSplitPoint] + auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin); + ASSERT_OK(chunkDocStatus.getStatus()); + + auto chunkDoc = chunkDocStatus.getValue(); + ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax()); + // Check for major version increment based on the competing chunk version. ASSERT_EQ(competingVersion.majorVersion() + 1, chunkDoc.getVersion().majorVersion()); // The minor version gets reset to 0 when the major version is incremented, and chunk splits @@ -219,7 +416,8 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) { ASSERT_EQ(2u, otherChunkDoc.getVersion().minorVersion()); } -TEST_F(SplitChunkTest, SplitsOnShardWithLowerShardVersionDoesNotIncreaseCollectionVersion) { +TEST_F(SplitChunkWithMajorVersionIncrementTest, + SplitsOnShardWithLowerShardVersionDoesNotIncreaseCollectionVersion) { ChunkType chunk, chunk2; chunk.setNS(kNamespace.toString()); chunk2.setNS(kNamespace.toString()); @@ -271,7 +469,8 @@ TEST_F(SplitChunkTest, SplitsOnShardWithLowerShardVersionDoesNotIncreaseCollecti ASSERT_EQ(competingVersion.minorVersion() + 2u, otherChunkDoc.getVersion().minorVersion()); } -TEST_F(SplitChunkTest, SplitsOnShardWithHighestShardVersionIncreasesCollectionVersion) { +TEST_F(SplitChunkWithMajorVersionIncrementTest, + SplitsOnShardWithHighestShardVersionIncreasesCollectionVersion) { ChunkType chunk, chunk2; chunk.setNS(kNamespace.toString()); chunk2.setNS(kNamespace.toString()); |