diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2016-06-06 10:45:06 +0300 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2016-06-09 13:13:05 -0400 |
commit | 2477b8c33b2e8f26fcde47c38c19c3fbb8b99839 (patch) | |
tree | da07b93547289dd370ba02434b0e82551dca9463 | |
parent | 3f7dce2ea7a4692380e04d09da89388c23133635 (diff) | |
download | mongo-2477b8c33b2e8f26fcde47c38c19c3fbb8b99839.tar.gz |
SERVER-22512 Remove unnecessary calls to stopBalancer
21 files changed, 799 insertions, 785 deletions
diff --git a/jstests/aggregation/bugs/server6118.js b/jstests/aggregation/bugs/server6118.js index 898e5927b63..af287661e42 100644 --- a/jstests/aggregation/bugs/server6118.js +++ b/jstests/aggregation/bugs/server6118.js @@ -1,12 +1,12 @@ // SERVER-6118: support for sharded sorts (function() { + 'use strict'; - var s = new ShardingTest({name: "aggregation_sort1", shards: 2, mongos: 1}); - s.stopBalancer(); + var s = new ShardingTest({shards: 2}); - s.adminCommand({enablesharding: "test"}); + assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); s.ensurePrimaryShard('test', 'shard0001'); - s.adminCommand({shardcollection: "test.data", key: {_id: 1}}); + assert.commandWorked(s.s0.adminCommand({shardcollection: "test.data", key: {_id: 1}})); var d = s.getDB("test"); @@ -20,12 +20,12 @@ bulkOp.execute(); // Split the data into 3 chunks - s.adminCommand({split: "test.data", middle: {_id: 33}}); - s.adminCommand({split: "test.data", middle: {_id: 66}}); + assert.commandWorked(s.s0.adminCommand({split: "test.data", middle: {_id: 33}})); + assert.commandWorked(s.s0.adminCommand({split: "test.data", middle: {_id: 66}})); // Migrate the middle chunk to another shard - s.adminCommand( - {movechunk: "test.data", find: {_id: 50}, to: s.getOther(s.getPrimaryShard("test")).name}); + assert.commandWorked(s.s0.adminCommand( + {movechunk: "test.data", find: {_id: 50}, to: s.getOther(s.getPrimaryShard("test")).name})); // Check that the results are in order. var result = d.data.aggregate({$sort: {_id: 1}}).toArray(); @@ -36,5 +36,4 @@ } s.stop(); - })(); diff --git a/jstests/aggregation/bugs/server6179.js b/jstests/aggregation/bugs/server6179.js index c05103a13b8..4e98ee17eb5 100644 --- a/jstests/aggregation/bugs/server6179.js +++ b/jstests/aggregation/bugs/server6179.js @@ -1,12 +1,12 @@ // SERVER-6179: support for two $groups in sharded agg (function() { + 'use strict'; - var s = new ShardingTest({name: "aggregation_multiple_group", shards: 2, mongos: 1}); - s.stopBalancer(); + var s = new ShardingTest({shards: 2}); - s.adminCommand({enablesharding: "test"}); + assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); s.ensurePrimaryShard('test', 'shard0001'); - s.adminCommand({shardcollection: "test.data", key: {_id: 1}}); + assert.commandWorked(s.s0.adminCommand({shardcollection: "test.data", key: {_id: 1}})); var d = s.getDB("test"); @@ -20,12 +20,12 @@ bulkOp.execute(); // Split the data into 3 chunks - s.adminCommand({split: "test.data", middle: {_id: 33}}); - s.adminCommand({split: "test.data", middle: {_id: 66}}); + assert.commandWorked(s.s0.adminCommand({split: "test.data", middle: {_id: 33}})); + assert.commandWorked(s.s0.adminCommand({split: "test.data", middle: {_id: 66}})); // Migrate the middle chunk to another shard - s.adminCommand( - {movechunk: "test.data", find: {_id: 50}, to: s.getOther(s.getPrimaryShard("test")).name}); + assert.commandWorked(s.s0.adminCommand( + {movechunk: "test.data", find: {_id: 50}, to: s.getOther(s.getPrimaryShard("test")).name})); // Check that we get results rather than an error var result = d.data @@ -33,7 +33,7 @@ {$group: {_id: '$i', avg_id: {$avg: '$_id'}}}, {$sort: {_id: 1}}) .toArray(); - expected = [ + var expected = [ {"_id": 0, "avg_id": 45}, {"_id": 1, "avg_id": 46}, {"_id": 2, "avg_id": 47}, @@ -49,5 +49,4 @@ assert.eq(result, expected); s.stop(); - })(); diff --git a/jstests/aggregation/bugs/server7781.js b/jstests/aggregation/bugs/server7781.js index 0a4831fb800..e8684dd813f 100644 --- a/jstests/aggregation/bugs/server7781.js +++ b/jstests/aggregation/bugs/server7781.js @@ -1,5 +1,6 @@ // SERVER-7781 $geoNear pipeline stage (function() { + 'use strict'; load('jstests/libs/geo_near_random.js'); load('jstests/aggregation/extras/utils.js'); @@ -56,10 +57,12 @@ shards.push(shard._id); }); - db.adminCommand({shardCollection: db[coll].getFullName(), key: {rand: 1}}); + assert.commandWorked( + db.adminCommand({shardCollection: db[coll].getFullName(), key: {rand: 1}})); for (var i = 1; i < 10; i++) { // split at 0.1, 0.2, ... 0.9 - db.adminCommand({split: db[coll].getFullName(), middle: {rand: i / 10}}); + assert.commandWorked( + db.adminCommand({split: db[coll].getFullName(), middle: {rand: i / 10}})); db.adminCommand({ moveChunk: db[coll].getFullName(), find: {rand: i / 10}, @@ -84,8 +87,8 @@ // test with defaults var queryPoint = pointMaker.mkPt(0.25); // stick to center of map - geoCmd = {geoNear: coll, near: queryPoint, includeLocs: true, spherical: true}; - aggCmd = { + var geoCmd = {geoNear: coll, near: queryPoint, includeLocs: true, spherical: true}; + var aggCmd = { $geoNear: { near: queryPoint, includeLocs: 'stats.loc', @@ -126,7 +129,7 @@ geoCmd.num = 40; geoCmd.near = queryPoint; aggCmd.$geoNear.near = queryPoint; - aggArr = [aggCmd, {$limit: 50}, {$limit: 60}, {$limit: 40}]; + var aggArr = [aggCmd, {$limit: 50}, {$limit: 60}, {$limit: 40}]; checkOutput(db.runCommand(geoCmd), db[coll].aggregate(aggArr), 40); // Test $geoNear with an initial batchSize of 0. Regression test for SERVER-20935. @@ -149,13 +152,11 @@ test(db, false, '2dsphere'); var sharded = new ShardingTest({shards: 3, mongos: 1}); - sharded.stopBalancer(); - sharded.adminCommand({enablesharding: "test"}); + assert.commandWorked(sharded.s0.adminCommand({enablesharding: "test"})); sharded.ensurePrimaryShard('test', 'shard0001'); test(sharded.getDB('test'), true, '2d'); test(sharded.getDB('test'), true, '2dsphere'); sharded.stop(); - })(); diff --git a/jstests/aggregation/bugs/server9444.js b/jstests/aggregation/bugs/server9444.js index ad5f4b03ca6..f3dc2748b0a 100644 --- a/jstests/aggregation/bugs/server9444.js +++ b/jstests/aggregation/bugs/server9444.js @@ -1,76 +1,80 @@ // server-9444 support disk storage of intermediate results in aggregation - -var t = db.server9444; -t.drop(); - -var sharded = (typeof(RUNNING_IN_SHARDED_AGG_TEST) != 'undefined'); // see end of testshard1.js -if (sharded) { - db.adminCommand({shardcollection: t.getFullName(), key: {"_id": 'hashed'}}); -} - -var memoryLimitMB = sharded ? 200 : 100; - -function loadData() { - var bigStr = Array(1024 * 1024 + 1).toString(); // 1MB of ',' - for (var i = 0; i < memoryLimitMB + 1; i++) - t.insert({_id: i, bigStr: i + bigStr, random: Math.random()}); - - assert.gt(t.stats().size, memoryLimitMB * 1024 * 1024); -} -loadData(); - -function test(pipeline, outOfMemoryCode) { - // ensure by default we error out if exceeding memory limit - var res = t.runCommand('aggregate', {pipeline: pipeline}); - assert.commandFailed(res); - assert.eq(res.code, outOfMemoryCode); - - // ensure allowDiskUse: false does what it says - var res = t.runCommand('aggregate', {pipeline: pipeline, allowDiskUse: false}); - assert.commandFailed(res); - assert.eq(res.code, outOfMemoryCode); - - // allowDiskUse only supports bool. In particular, numbers aren't allowed. - var res = t.runCommand('aggregate', {pipeline: pipeline, allowDiskUse: 1}); - assert.commandFailed(res); - assert.eq(res.code, 16949); - - // ensure we work when allowDiskUse === true - var res = t.aggregate(pipeline, {allowDiskUse: true}); - assert.eq(res.itcount(), t.count()); // all tests output one doc per input doc -} - -var groupCode = 16945; -var sortCode = 16819; -var sortLimitCode = 16820; - -test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}], groupCode); - -// sorting with _id would use index which doesn't require extsort -test([{$sort: {random: 1}}], sortCode); -test([{$sort: {bigStr: 1}}], sortCode); // big key and value - -// make sure sort + large limit won't crash the server (SERVER-10136) -test([{$sort: {bigStr: 1}}, {$limit: 1000 * 1000 * 1000}], sortLimitCode); - -// test combining two extSorts in both same and different orders -test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}, {$sort: {_id: 1}}], groupCode); -test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}, {$sort: {_id: -1}}], groupCode); -test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}, {$sort: {random: 1}}], groupCode); -test([{$sort: {random: 1}}, {$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}], sortCode); - -var origDB = db; -if (sharded) { - // Stop balancer first before dropping so there will be no contention on the ns lock. - // It's alright to modify the global db variable since sharding tests never run in parallel. - db = db.getSiblingDB('config'); - sh.stopBalancer(); -} - -// don't leave large collection laying around -t.drop(); - -if (sharded) { - sh.startBalancer(); - db = origDB; -} +(function() { + 'use strict'; + + var t = db.server9444; + t.drop(); + + var sharded = (typeof(RUNNING_IN_SHARDED_AGG_TEST) != 'undefined'); // see end of testshard1.js + if (sharded) { + assert.commandWorked( + db.adminCommand({shardcollection: t.getFullName(), key: {"_id": 'hashed'}})); + } + + var memoryLimitMB = sharded ? 200 : 100; + + function loadData() { + var bigStr = Array(1024 * 1024 + 1).toString(); // 1MB of ',' + for (var i = 0; i < memoryLimitMB + 1; i++) + t.insert({_id: i, bigStr: i + bigStr, random: Math.random()}); + + assert.gt(t.stats().size, memoryLimitMB * 1024 * 1024); + } + loadData(); + + function test(pipeline, outOfMemoryCode) { + // ensure by default we error out if exceeding memory limit + var res = t.runCommand('aggregate', {pipeline: pipeline}); + assert.commandFailed(res); + assert.eq(res.code, outOfMemoryCode); + + // ensure allowDiskUse: false does what it says + var res = t.runCommand('aggregate', {pipeline: pipeline, allowDiskUse: false}); + assert.commandFailed(res); + assert.eq(res.code, outOfMemoryCode); + + // allowDiskUse only supports bool. In particular, numbers aren't allowed. + var res = t.runCommand('aggregate', {pipeline: pipeline, allowDiskUse: 1}); + assert.commandFailed(res); + assert.eq(res.code, 16949); + + // ensure we work when allowDiskUse === true + var res = t.aggregate(pipeline, {allowDiskUse: true}); + assert.eq(res.itcount(), t.count()); // all tests output one doc per input doc + } + + var groupCode = 16945; + var sortCode = 16819; + var sortLimitCode = 16820; + + test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}], groupCode); + + // sorting with _id would use index which doesn't require extsort + test([{$sort: {random: 1}}], sortCode); + test([{$sort: {bigStr: 1}}], sortCode); // big key and value + + // make sure sort + large limit won't crash the server (SERVER-10136) + test([{$sort: {bigStr: 1}}, {$limit: 1000 * 1000 * 1000}], sortLimitCode); + + // test combining two extSorts in both same and different orders + test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}, {$sort: {_id: 1}}], groupCode); + test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}, {$sort: {_id: -1}}], groupCode); + test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}, {$sort: {random: 1}}], groupCode); + test([{$sort: {random: 1}}, {$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}], sortCode); + + var origDB = db; + if (sharded) { + // Stop balancer first before dropping so there will be no contention on the ns lock. + // It's alright to modify the global db variable since sharding tests never run in parallel. + db = db.getSiblingDB('config'); + sh.stopBalancer(); + } + + // don't leave large collection laying around + t.drop(); + + if (sharded) { + sh.startBalancer(); + db = origDB; + } +})(); diff --git a/jstests/gle/gle_sharded_write.js b/jstests/gle/gle_sharded_write.js index f1feffed5b2..8d2a21cd758 100644 --- a/jstests/gle/gle_sharded_write.js +++ b/jstests/gle/gle_sharded_write.js @@ -2,192 +2,192 @@ // Ensures GLE correctly reports basic write stats and failures // Note that test should work correctly with and without write commands. // - -var st = new ShardingTest({shards: 2, mongos: 1}); -st.stopBalancer(); - -var mongos = st.s0; -var admin = mongos.getDB("admin"); -var config = mongos.getDB("config"); -var coll = mongos.getCollection(jsTestName() + ".coll"); -var shards = config.shards.find().toArray(); - -assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()})); -printjson(admin.runCommand({movePrimary: coll.getDB().toString(), to: shards[0]._id})); -assert.commandWorked(admin.runCommand({shardCollection: coll.toString(), key: {_id: 1}})); -assert.commandWorked(admin.runCommand({split: coll.toString(), middle: {_id: 0}})); -assert.commandWorked( - admin.runCommand({moveChunk: coll.toString(), find: {_id: 0}, to: shards[1]._id})); - -st.printShardingStatus(); - -var gle = null; - -// -// Successful insert -coll.remove({}); -coll.insert({_id: -1}); -printjson(gle = coll.getDB().runCommand({getLastError: 1})); -assert(gle.ok); -assert('err' in gle); -assert(!gle.err); -assert.eq(coll.count(), 1); - -// -// Successful update -coll.remove({}); -coll.insert({_id: 1}); -coll.update({_id: 1}, {$set: {foo: "bar"}}); -printjson(gle = coll.getDB().runCommand({getLastError: 1})); -assert(gle.ok); -assert('err' in gle); -assert(!gle.err); -assert(gle.updatedExisting); -assert.eq(gle.n, 1); -assert.eq(coll.count(), 1); - -// -// Successful multi-update -coll.remove({}); -coll.insert({_id: 1}); -coll.update({}, {$set: {foo: "bar"}}, false, true); -printjson(gle = coll.getDB().runCommand({getLastError: 1})); -assert(gle.ok); -assert('err' in gle); -assert(!gle.err); -assert(gle.updatedExisting); -assert.eq(gle.n, 1); -assert.eq(coll.count(), 1); - -// -// Successful upsert -coll.remove({}); -coll.update({_id: 1}, {_id: 1}, true); -printjson(gle = coll.getDB().runCommand({getLastError: 1})); -assert(gle.ok); -assert('err' in gle); -assert(!gle.err); -assert(!gle.updatedExisting); -assert.eq(gle.n, 1); -assert.eq(gle.upserted, 1); -assert.eq(coll.count(), 1); - -// -// Successful upserts -coll.remove({}); -coll.update({_id: -1}, {_id: -1}, true); -coll.update({_id: 1}, {_id: 1}, true); -printjson(gle = coll.getDB().runCommand({getLastError: 1})); -assert(gle.ok); -assert('err' in gle); -assert(!gle.err); -assert(!gle.updatedExisting); -assert.eq(gle.n, 1); -assert.eq(gle.upserted, 1); -assert.eq(coll.count(), 2); - -// -// Successful remove -coll.remove({}); -coll.insert({_id: 1}); -coll.remove({_id: 1}); -printjson(gle = coll.getDB().runCommand({getLastError: 1})); -assert(gle.ok); -assert('err' in gle); -assert(!gle.err); -assert.eq(gle.n, 1); -assert.eq(coll.count(), 0); - -// -// Error on one host during update -coll.remove({}); -coll.update({_id: 1}, {$invalid: "xxx"}, true); -printjson(gle = coll.getDB().runCommand({getLastError: 1})); -assert(gle.ok); -assert(gle.err); -assert(gle.code); -assert(!gle.errmsg); -assert(gle.singleShard); -assert.eq(coll.count(), 0); - -// -// Error on two hosts during remove -coll.remove({}); -coll.remove({$invalid: 'remove'}); -printjson(gle = coll.getDB().runCommand({getLastError: 1})); -assert(gle.ok); -assert(gle.err); -assert(gle.code); -assert(!gle.errmsg); -assert(gle.shards); -assert.eq(coll.count(), 0); - -// -// Repeated calls to GLE should work -coll.remove({}); -coll.update({_id: 1}, {$invalid: "xxx"}, true); -printjson(gle = coll.getDB().runCommand({getLastError: 1})); -assert(gle.ok); -assert(gle.err); -assert(gle.code); -assert(!gle.errmsg); -assert(gle.singleShard); -printjson(gle = coll.getDB().runCommand({getLastError: 1})); -assert(gle.ok); -assert(gle.err); -assert(gle.code); -assert(!gle.errmsg); -assert(gle.singleShard); -assert.eq(coll.count(), 0); - -// -// Geo $near is not supported on mongos -coll.ensureIndex({loc: "2dsphere"}); -coll.remove({}); -var query = { - loc: { - $near: { - $geometry: {type: "Point", coordinates: [0, 0]}, - $maxDistance: 1000, +(function() { + 'use strict'; + + var st = new ShardingTest({shards: 2, mongos: 1}); + + var mongos = st.s0; + var admin = mongos.getDB("admin"); + var config = mongos.getDB("config"); + var coll = mongos.getCollection(jsTestName() + ".coll"); + var shards = config.shards.find().toArray(); + + assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()})); + printjson(admin.runCommand({movePrimary: coll.getDB().toString(), to: shards[0]._id})); + assert.commandWorked(admin.runCommand({shardCollection: coll.toString(), key: {_id: 1}})); + assert.commandWorked(admin.runCommand({split: coll.toString(), middle: {_id: 0}})); + assert.commandWorked( + admin.runCommand({moveChunk: coll.toString(), find: {_id: 0}, to: shards[1]._id})); + + st.printShardingStatus(); + + var gle = null; + + // + // Successful insert + coll.remove({}); + coll.insert({_id: -1}); + printjson(gle = coll.getDB().runCommand({getLastError: 1})); + assert(gle.ok); + assert('err' in gle); + assert(!gle.err); + assert.eq(coll.count(), 1); + + // + // Successful update + coll.remove({}); + coll.insert({_id: 1}); + coll.update({_id: 1}, {$set: {foo: "bar"}}); + printjson(gle = coll.getDB().runCommand({getLastError: 1})); + assert(gle.ok); + assert('err' in gle); + assert(!gle.err); + assert(gle.updatedExisting); + assert.eq(gle.n, 1); + assert.eq(coll.count(), 1); + + // + // Successful multi-update + coll.remove({}); + coll.insert({_id: 1}); + coll.update({}, {$set: {foo: "bar"}}, false, true); + printjson(gle = coll.getDB().runCommand({getLastError: 1})); + assert(gle.ok); + assert('err' in gle); + assert(!gle.err); + assert(gle.updatedExisting); + assert.eq(gle.n, 1); + assert.eq(coll.count(), 1); + + // + // Successful upsert + coll.remove({}); + coll.update({_id: 1}, {_id: 1}, true); + printjson(gle = coll.getDB().runCommand({getLastError: 1})); + assert(gle.ok); + assert('err' in gle); + assert(!gle.err); + assert(!gle.updatedExisting); + assert.eq(gle.n, 1); + assert.eq(gle.upserted, 1); + assert.eq(coll.count(), 1); + + // + // Successful upserts + coll.remove({}); + coll.update({_id: -1}, {_id: -1}, true); + coll.update({_id: 1}, {_id: 1}, true); + printjson(gle = coll.getDB().runCommand({getLastError: 1})); + assert(gle.ok); + assert('err' in gle); + assert(!gle.err); + assert(!gle.updatedExisting); + assert.eq(gle.n, 1); + assert.eq(gle.upserted, 1); + assert.eq(coll.count(), 2); + + // + // Successful remove + coll.remove({}); + coll.insert({_id: 1}); + coll.remove({_id: 1}); + printjson(gle = coll.getDB().runCommand({getLastError: 1})); + assert(gle.ok); + assert('err' in gle); + assert(!gle.err); + assert.eq(gle.n, 1); + assert.eq(coll.count(), 0); + + // + // Error on one host during update + coll.remove({}); + coll.update({_id: 1}, {$invalid: "xxx"}, true); + printjson(gle = coll.getDB().runCommand({getLastError: 1})); + assert(gle.ok); + assert(gle.err); + assert(gle.code); + assert(!gle.errmsg); + assert(gle.singleShard); + assert.eq(coll.count(), 0); + + // + // Error on two hosts during remove + coll.remove({}); + coll.remove({$invalid: 'remove'}); + printjson(gle = coll.getDB().runCommand({getLastError: 1})); + assert(gle.ok); + assert(gle.err); + assert(gle.code); + assert(!gle.errmsg); + assert(gle.shards); + assert.eq(coll.count(), 0); + + // + // Repeated calls to GLE should work + coll.remove({}); + coll.update({_id: 1}, {$invalid: "xxx"}, true); + printjson(gle = coll.getDB().runCommand({getLastError: 1})); + assert(gle.ok); + assert(gle.err); + assert(gle.code); + assert(!gle.errmsg); + assert(gle.singleShard); + printjson(gle = coll.getDB().runCommand({getLastError: 1})); + assert(gle.ok); + assert(gle.err); + assert(gle.code); + assert(!gle.errmsg); + assert(gle.singleShard); + assert.eq(coll.count(), 0); + + // + // Geo $near is not supported on mongos + coll.ensureIndex({loc: "2dsphere"}); + coll.remove({}); + var query = { + loc: { + $near: { + $geometry: {type: "Point", coordinates: [0, 0]}, + $maxDistance: 1000, + } } - } -}; -printjson(coll.remove(query)); -printjson(gle = coll.getDB().runCommand({getLastError: 1})); -assert(gle.ok); -assert(gle.err); -assert(gle.code); -assert(!gle.errmsg); -assert(gle.shards); -assert.eq(coll.count(), 0); - -// -// First shard down -// - -// -// Successful bulk insert on two hosts, host dies before gle (error contacting host) -coll.remove({}); -coll.insert([{_id: 1}, {_id: -1}]); -// Wait for write to be written to shards before shutting it down. -printjson(gle = coll.getDB().runCommand({getLastError: 1})); -MongoRunner.stopMongod(st.shard0); -printjson(gle = coll.getDB().runCommand({getLastError: 1})); -// Should get an error about contacting dead host. -assert(!gle.ok); -assert(gle.errmsg); - -// -// Failed insert on two hosts, first host dead -// NOTE: This is DIFFERENT from 2.4, since we don't need to contact a host we didn't get -// successful writes from. -coll.remove({_id: 1}); -coll.insert([{_id: 1}, {_id: -1}]); -printjson(gle = coll.getDB().runCommand({getLastError: 1})); -assert(gle.ok); -assert(gle.err); -assert.eq(coll.count({_id: 1}), 1); - -jsTest.log("DONE!"); - -st.stop(); + }; + printjson(coll.remove(query)); + printjson(gle = coll.getDB().runCommand({getLastError: 1})); + assert(gle.ok); + assert(gle.err); + assert(gle.code); + assert(!gle.errmsg); + assert(gle.shards); + assert.eq(coll.count(), 0); + + // + // First shard down + // + + // + // Successful bulk insert on two hosts, host dies before gle (error contacting host) + coll.remove({}); + coll.insert([{_id: 1}, {_id: -1}]); + // Wait for write to be written to shards before shutting it down. + printjson(gle = coll.getDB().runCommand({getLastError: 1})); + MongoRunner.stopMongod(st.shard0); + printjson(gle = coll.getDB().runCommand({getLastError: 1})); + // Should get an error about contacting dead host. + assert(!gle.ok); + assert(gle.errmsg); + + // + // Failed insert on two hosts, first host dead + // NOTE: This is DIFFERENT from 2.4, since we don't need to contact a host we didn't get + // successful writes from. + coll.remove({_id: 1}); + coll.insert([{_id: 1}, {_id: -1}]); + printjson(gle = coll.getDB().runCommand({getLastError: 1})); + assert(gle.ok); + assert(gle.err); + assert.eq(coll.count({_id: 1}), 1); + + st.stop(); +})(); diff --git a/jstests/noPassthrough/cursor_timeout.js b/jstests/noPassthrough/cursor_timeout.js deleted file mode 100644 index 46a054da0ea..00000000000 --- a/jstests/noPassthrough/cursor_timeout.js +++ /dev/null @@ -1,83 +0,0 @@ -// Basic integration tests for the background job that periodically kills idle cursors, in both -// mongod and mongos. This test creates the following four cursors: -// -// 1. A no-timeout cursor through mongos. -// 2. A no-timeout cursor through mongod. -// 3. A normal cursor through mongos. -// 4. A normal cursor through mongod. -// -// After a period of inactivity, the test asserts that cursors #1 and #2 are still alive, and that -// #3 and #4 have been killed. - -var st = new ShardingTest({ - shards: 2, - other: { - chunkSize: 1, - shardOptions: {setParameter: "cursorTimeoutMillis=1000"}, - mongosOptions: {setParameter: "cursorTimeoutMillis=1000"} - } -}); -st.stopBalancer(); - -var adminDB = st.admin; -var configDB = st.config; -var coll = st.s.getDB('test').user; - -adminDB.runCommand({enableSharding: coll.getDB().getName()}); -st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001'); -adminDB.runCommand({shardCollection: coll.getFullName(), key: {x: 1}}); - -var data = 'c'; -for (var x = 0; x < 18; x++) { - data += data; -} - -for (x = 0; x < 200; x++) { - coll.insert({x: x, v: data}); -} - -var chunkDoc = configDB.chunks.findOne(); -var chunkOwner = chunkDoc.shard; -var toShard = configDB.shards.findOne({_id: {$ne: chunkOwner}})._id; -var cmd = {moveChunk: coll.getFullName(), find: chunkDoc.min, to: toShard, _waitForDelete: true}; -var res = adminDB.runCommand(cmd); - -jsTest.log('move result: ' + tojson(res)); - -var shardedCursorWithTimeout = coll.find(); -var shardedCursorWithNoTimeout = coll.find(); -shardedCursorWithNoTimeout.addOption(DBQuery.Option.noTimeout); - -// Query directly to mongod -var shardHost = configDB.shards.findOne({_id: chunkOwner}).host; -var mongod = new Mongo(shardHost); -var shardColl = mongod.getCollection(coll.getFullName()); - -var cursorWithTimeout = shardColl.find(); -var cursorWithNoTimeout = shardColl.find(); -cursorWithNoTimeout.addOption(DBQuery.Option.noTimeout); - -shardedCursorWithTimeout.next(); -shardedCursorWithNoTimeout.next(); - -cursorWithTimeout.next(); -cursorWithNoTimeout.next(); - -// Wait until the idle cursor background job has killed the cursors that do not have the "no -// timeout" flag set. We use the "cursorTimeoutMillis" setParameter above to reduce the amount of -// time we need to wait here. -sleep(5000); - -assert.throws(function() { - shardedCursorWithTimeout.itcount(); -}); -assert.throws(function() { - cursorWithTimeout.itcount(); -}); - -// +1 because we already advanced once -assert.eq(coll.count(), shardedCursorWithNoTimeout.itcount() + 1); - -assert.eq(shardColl.count(), cursorWithNoTimeout.itcount() + 1); - -st.stop(); diff --git a/jstests/noPassthroughWithMongod/no_balance_collection.js b/jstests/noPassthroughWithMongod/no_balance_collection.js index cfec6199ca2..1c2f1aae009 100644 --- a/jstests/noPassthroughWithMongod/no_balance_collection.js +++ b/jstests/noPassthroughWithMongod/no_balance_collection.js @@ -1,14 +1,11 @@ // Tests whether the noBalance flag disables balancing for collections -var st = new ShardingTest({shards: 2, mongos: 1, verbose: 1}); +var st = new ShardingTest({shards: 2, mongos: 1}); // First, test that shell helpers require an argument assert.throws(sh.disableBalancing, [], "sh.disableBalancing requires a collection"); assert.throws(sh.enableBalancing, [], "sh.enableBalancing requires a collection"); -// Initially stop balancing -st.stopBalancer(); - var shardAName = st._shardNames[0]; var shardBName = st._shardNames[1]; @@ -70,10 +67,11 @@ jsTest.log("Chunks for " + collB + " are balanced."); // Re-disable balancing for collB sh.disableBalancing(collB); + // Wait for the balancer to fully finish the last migration and write the changelog // MUST set db var here, ugly but necessary db = st.s0.getDB("config"); -sh.waitForBalancer(true); +st.waitForBalancerRound(); // Make sure auto-migrates on insert don't move chunks var lastMigration = sh._lastMigration(collB); diff --git a/jstests/sharding/balance_repl.js b/jstests/sharding/balance_repl.js index 39c28b46448..a3c9eefdca8 100644 --- a/jstests/sharding/balance_repl.js +++ b/jstests/sharding/balance_repl.js @@ -3,7 +3,7 @@ // (function() { - "use strict"; + 'use strict'; // The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries // from stepping down during migrations on slow evergreen builders. @@ -33,25 +33,27 @@ } assert.writeOK(bulk.execute()); - s.adminCommand({enablesharding: "test"}); + assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); s.ensurePrimaryShard('test', 'test-rs0'); - s.adminCommand({shardcollection: "test.foo", key: {_id: 1}}); + assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}})); - for (i = 0; i < 20; i++) - s.adminCommand({split: "test.foo", middle: {_id: i * 100}}); + for (i = 0; i < 20; i++) { + assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {_id: i * 100}})); + } assert.eq(2100, db.foo.find().itcount()); + var coll = db.foo; coll.setSlaveOk(); + assert.eq(2100, coll.find().itcount()); var dbPrimaryShardId = s.getPrimaryShardIdForDatabase("test"); var other = s.config.shards.findOne({_id: {$ne: dbPrimaryShardId}}); for (i = 0; i < 20; i++) { - // Needs to waitForDelete because we'll be performing a slaveOk query, - // and secondaries don't have a chunk manager so it doesn't know how to - // filter out docs it doesn't own. - assert(s.adminCommand({ + // Needs to waitForDelete because we'll be performing a slaveOk query, and secondaries don't + // have a chunk manager so it doesn't know how to filter out docs it doesn't own. + assert.commandWorked(s.s0.adminCommand({ moveChunk: "test.foo", find: {_id: i * 100}, to: other._id, @@ -59,9 +61,9 @@ writeConcern: {w: 2}, _waitForDelete: true })); + assert.eq(2100, coll.find().itcount()); } s.stop(); - }()); diff --git a/jstests/sharding/balance_tags2.js b/jstests/sharding/balance_tags2.js index 8c54b2f3fc6..58ce0fa5ccc 100644 --- a/jstests/sharding/balance_tags2.js +++ b/jstests/sharding/balance_tags2.js @@ -1,27 +1,26 @@ // Test balancing all chunks to one shard by tagging the full shard-key range on that collection -var s = new ShardingTest( - {name: "balance_tags2", shards: 3, mongos: 1, other: {chunkSize: 1, enableBalancer: true}}); +var s = new ShardingTest({shards: 3, mongos: 1, other: {chunkSize: 1, enableBalancer: true}}); -s.adminCommand({enablesharding: "test"}); +assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); s.ensurePrimaryShard('test', 'shard0001'); var db = s.getDB("test"); var bulk = db.foo.initializeUnorderedBulkOp(); -for (i = 0; i < 21; i++) { +for (var i = 0; i < 21; i++) { bulk.insert({_id: i, x: i}); } assert.writeOK(bulk.execute()); -sh.shardCollection("test.foo", {_id: 1}); +assert.commandWorked(s.s0.adminCommand({shardCollection: "test.foo", key: {_id: 1}})); -sh.stopBalancer(); +s.stopBalancer(); -for (i = 0; i < 20; i++) { +for (var i = 0; i < 20; i++) { sh.splitAt("test.foo", {_id: i}); } -sh.startBalancer(); +s.startBalancer(); s.printShardingStatus(true); diff --git a/jstests/sharding/cursor_timeout.js b/jstests/sharding/cursor_timeout.js new file mode 100644 index 00000000000..cea17d93dcc --- /dev/null +++ b/jstests/sharding/cursor_timeout.js @@ -0,0 +1,86 @@ +// Basic integration tests for the background job that periodically kills idle cursors, in both +// mongod and mongos. This test creates the following four cursors: +// +// 1. A no-timeout cursor through mongos. +// 2. A no-timeout cursor through mongod. +// 3. A normal cursor through mongos. +// 4. A normal cursor through mongod. +// +// After a period of inactivity, the test asserts that cursors #1 and #2 are still alive, and that +// #3 and #4 have been killed. +(function() { + 'use strict'; + + var st = new ShardingTest({ + shards: 2, + other: { + chunkSize: 1, + shardOptions: {setParameter: "cursorTimeoutMillis=1000"}, + mongosOptions: {setParameter: "cursorTimeoutMillis=1000"} + } + }); + + var adminDB = st.admin; + var configDB = st.config; + var coll = st.s.getDB('test').user; + + assert.commandWorked(adminDB.runCommand({enableSharding: coll.getDB().getName()})); + st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001'); + assert.commandWorked(adminDB.runCommand({shardCollection: coll.getFullName(), key: {x: 1}})); + + var data = 'c'; + for (var x = 0; x < 18; x++) { + data += data; + } + + for (x = 0; x < 200; x++) { + coll.insert({x: x, v: data}); + } + + var chunkDoc = configDB.chunks.findOne(); + var chunkOwner = chunkDoc.shard; + var toShard = configDB.shards.findOne({_id: {$ne: chunkOwner}})._id; + var cmd = + {moveChunk: coll.getFullName(), find: chunkDoc.min, to: toShard, _waitForDelete: true}; + var res = adminDB.runCommand(cmd); + + jsTest.log('move result: ' + tojson(res)); + + var shardedCursorWithTimeout = coll.find(); + var shardedCursorWithNoTimeout = coll.find(); + shardedCursorWithNoTimeout.addOption(DBQuery.Option.noTimeout); + + // Query directly to mongod + var shardHost = configDB.shards.findOne({_id: chunkOwner}).host; + var mongod = new Mongo(shardHost); + var shardColl = mongod.getCollection(coll.getFullName()); + + var cursorWithTimeout = shardColl.find(); + var cursorWithNoTimeout = shardColl.find(); + cursorWithNoTimeout.addOption(DBQuery.Option.noTimeout); + + shardedCursorWithTimeout.next(); + shardedCursorWithNoTimeout.next(); + + cursorWithTimeout.next(); + cursorWithNoTimeout.next(); + + // Wait until the idle cursor background job has killed the cursors that do not have the "no + // timeout" flag set. We use the "cursorTimeoutMillis" setParameter above to reduce the amount + // of time we need to wait here. + sleep(5000); + + assert.throws(function() { + shardedCursorWithTimeout.itcount(); + }); + assert.throws(function() { + cursorWithTimeout.itcount(); + }); + + // +1 because we already advanced once + assert.eq(coll.count(), shardedCursorWithNoTimeout.itcount() + 1); + + assert.eq(shardColl.count(), cursorWithNoTimeout.itcount() + 1); + + st.stop(); +})(); diff --git a/jstests/sharding/explain_cmd.js b/jstests/sharding/explain_cmd.js index c638fccbced..3293c167db1 100644 --- a/jstests/sharding/explain_cmd.js +++ b/jstests/sharding/explain_cmd.js @@ -1,172 +1,183 @@ // Tests for the mongos explain command. - -// Create a cluster with 3 shards. -var st = new ShardingTest({shards: 2}); -st.stopBalancer(); - -var db = st.s.getDB("test"); -var explain; - -// Setup a collection that will be sharded. The shard key will be 'a'. There's also an index on 'b'. -var collSharded = db.getCollection("mongos_explain_cmd"); -collSharded.drop(); -collSharded.ensureIndex({a: 1}); -collSharded.ensureIndex({b: 1}); - -// Enable sharding. -assert.commandWorked(db.adminCommand({enableSharding: db.getName()})); -st.ensurePrimaryShard(db.getName(), 'shard0001'); -db.adminCommand({shardCollection: collSharded.getFullName(), key: {a: 1}}); - -// Pre-split the collection to ensure that both shards have chunks. Explicitly -// move chunks since the balancer is disabled. -for (var i = 1; i <= 2; i++) { - assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: i}})); - - var shardName = "shard000" + (i - 1); - printjson(db.adminCommand({moveChunk: collSharded.getFullName(), find: {a: i}, to: shardName})); -} - -// Put data on each shard. -for (var i = 0; i < 3; i++) { - collSharded.insert({_id: i, a: i, b: 1}); -} - -st.printShardingStatus(); - -// Test a scatter-gather count command. -assert.eq(3, collSharded.count({b: 1})); - -// Explain the scatter-gather count. -explain = db.runCommand( - {explain: {count: collSharded.getName(), query: {b: 1}}, verbosity: "allPlansExecution"}); - -// Validate some basic properties of the result. -printjson(explain); -assert.commandWorked(explain); -assert("queryPlanner" in explain); -assert("executionStats" in explain); -assert.eq(2, explain.queryPlanner.winningPlan.shards.length); -assert.eq(2, explain.executionStats.executionStages.shards.length); - -// An explain of a command that doesn't exist should fail gracefully. -explain = db.runCommand( - {explain: {nonexistent: collSharded.getName(), query: {b: 1}}, verbosity: "allPlansExecution"}); -printjson(explain); -assert.commandFailed(explain); - -// ------- - -// Setup a collection that is not sharded. -var collUnsharded = db.getCollection("mongos_explain_cmd_unsharded"); -collUnsharded.drop(); -collUnsharded.ensureIndex({a: 1}); -collUnsharded.ensureIndex({b: 1}); - -for (var i = 0; i < 3; i++) { - collUnsharded.insert({_id: i, a: i, b: 1}); -} -assert.eq(3, collUnsharded.count({b: 1})); - -explain = db.runCommand({ - explain: { - group: { - ns: collUnsharded.getName(), - key: "a", - cond: "b", - $reduce: function(curr, result) {}, - initial: {} - } - }, - verbosity: "allPlansExecution" -}); - -// Basic validation: a group command can only be passed through to an unsharded collection, -// so we should confirm that the mongos stage is always SINGLE_SHARD. -printjson(explain); -assert.commandWorked(explain); -assert("queryPlanner" in explain); -assert("executionStats" in explain); -assert.eq("SINGLE_SHARD", explain.queryPlanner.winningPlan.stage); - -// The same group should fail over the sharded collection, because group is only supported -// if it is passed through to an unsharded collection. -explain = db.runCommand({ - explain: { - group: { - ns: collSharded.getName(), - key: "a", - cond: "b", - $reduce: function(curr, result) {}, - initial: {} - } - }, - verbosity: "allPlansExecution" -}); -printjson(explain); -assert.commandFailed(explain); - -// ------- - -// Explain a delete operation and verify that it hits all shards without the shard key -explain = db.runCommand({ - explain: {delete: collSharded.getName(), deletes: [{q: {b: 1}, limit: 0}]}, - verbosity: "allPlansExecution" -}); -assert.commandWorked(explain, tojson(explain)); -assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE"); -assert.eq(explain.queryPlanner.winningPlan.shards.length, 2); -assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "DELETE"); -assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "DELETE"); -// Check that the deletes didn't actually happen. -assert.eq(3, collSharded.count({b: 1})); - -// Explain a delete operation and verify that it hits only one shard with the shard key -explain = db.runCommand({ - explain: {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 0}]}, - verbosity: "allPlansExecution" -}); -assert.commandWorked(explain, tojson(explain)); -assert.eq(explain.queryPlanner.winningPlan.shards.length, 1); -// Check that the deletes didn't actually happen. -assert.eq(3, collSharded.count({b: 1})); - -// Check that we fail gracefully if we try to do an explain of a write batch that has more -// than one operation in it. -explain = db.runCommand({ - explain: - {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 1}, {q: {a: 2}, limit: 1}]}, - verbosity: "allPlansExecution" -}); -assert.commandFailed(explain, tojson(explain)); - -// Explain a multi upsert operation and verify that it hits all shards -explain = db.runCommand({ - explain: {update: collSharded.getName(), updates: [{q: {}, u: {$set: {b: 10}}, multi: true}]}, - verbosity: "allPlansExecution" -}); -assert.commandWorked(explain, tojson(explain)); -assert.eq(explain.queryPlanner.winningPlan.shards.length, 2); -assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE"); -assert.eq(explain.queryPlanner.winningPlan.shards.length, 2); -assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "UPDATE"); -assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "UPDATE"); -// Check that the update didn't actually happen. -assert.eq(0, collSharded.count({b: 10})); - -// Explain an upsert operation and verify that it hits only a single shard -explain = db.runCommand({ - explain: {update: collSharded.getName(), updates: [{q: {a: 10}, u: {a: 10}, upsert: true}]}, - verbosity: "allPlansExecution" -}); -assert.commandWorked(explain, tojson(explain)); -assert.eq(explain.queryPlanner.winningPlan.shards.length, 1); -// Check that the upsert didn't actually happen. -assert.eq(0, collSharded.count({a: 10})); - -// Explain an upsert operation which cannot be targeted, ensure an error is thrown -explain = db.runCommand({ - explain: {update: collSharded.getName(), updates: [{q: {b: 10}, u: {b: 10}, upsert: true}]}, - verbosity: "allPlansExecution" -}); -assert.commandFailed(explain, tojson(explain)); +(function() { + 'use strict'; + + // Create a cluster with 3 shards. + var st = new ShardingTest({shards: 2}); + + var db = st.s.getDB("test"); + var explain; + + // Setup a collection that will be sharded. The shard key will be 'a'. There's also an index on + // 'b'. + var collSharded = db.getCollection("mongos_explain_cmd"); + collSharded.drop(); + collSharded.ensureIndex({a: 1}); + collSharded.ensureIndex({b: 1}); + + // Enable sharding. + assert.commandWorked(db.adminCommand({enableSharding: db.getName()})); + st.ensurePrimaryShard(db.getName(), 'shard0001'); + db.adminCommand({shardCollection: collSharded.getFullName(), key: {a: 1}}); + + // Pre-split the collection to ensure that both shards have chunks. Explicitly + // move chunks since the balancer is disabled. + for (var i = 1; i <= 2; i++) { + assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: i}})); + + var shardName = "shard000" + (i - 1); + printjson( + db.adminCommand({moveChunk: collSharded.getFullName(), find: {a: i}, to: shardName})); + } + + // Put data on each shard. + for (var i = 0; i < 3; i++) { + collSharded.insert({_id: i, a: i, b: 1}); + } + + st.printShardingStatus(); + + // Test a scatter-gather count command. + assert.eq(3, collSharded.count({b: 1})); + + // Explain the scatter-gather count. + explain = db.runCommand( + {explain: {count: collSharded.getName(), query: {b: 1}}, verbosity: "allPlansExecution"}); + + // Validate some basic properties of the result. + printjson(explain); + assert.commandWorked(explain); + assert("queryPlanner" in explain); + assert("executionStats" in explain); + assert.eq(2, explain.queryPlanner.winningPlan.shards.length); + assert.eq(2, explain.executionStats.executionStages.shards.length); + + // An explain of a command that doesn't exist should fail gracefully. + explain = db.runCommand({ + explain: {nonexistent: collSharded.getName(), query: {b: 1}}, + verbosity: "allPlansExecution" + }); + printjson(explain); + assert.commandFailed(explain); + + // ------- + + // Setup a collection that is not sharded. + var collUnsharded = db.getCollection("mongos_explain_cmd_unsharded"); + collUnsharded.drop(); + collUnsharded.ensureIndex({a: 1}); + collUnsharded.ensureIndex({b: 1}); + + for (var i = 0; i < 3; i++) { + collUnsharded.insert({_id: i, a: i, b: 1}); + } + assert.eq(3, collUnsharded.count({b: 1})); + + explain = db.runCommand({ + explain: { + group: { + ns: collUnsharded.getName(), + key: "a", + cond: "b", + $reduce: function(curr, result) {}, + initial: {} + } + }, + verbosity: "allPlansExecution" + }); + + // Basic validation: a group command can only be passed through to an unsharded collection, + // so we should confirm that the mongos stage is always SINGLE_SHARD. + printjson(explain); + assert.commandWorked(explain); + assert("queryPlanner" in explain); + assert("executionStats" in explain); + assert.eq("SINGLE_SHARD", explain.queryPlanner.winningPlan.stage); + + // The same group should fail over the sharded collection, because group is only supported + // if it is passed through to an unsharded collection. + explain = db.runCommand({ + explain: { + group: { + ns: collSharded.getName(), + key: "a", + cond: "b", + $reduce: function(curr, result) {}, + initial: {} + } + }, + verbosity: "allPlansExecution" + }); + printjson(explain); + assert.commandFailed(explain); + + // ------- + + // Explain a delete operation and verify that it hits all shards without the shard key + explain = db.runCommand({ + explain: {delete: collSharded.getName(), deletes: [{q: {b: 1}, limit: 0}]}, + verbosity: "allPlansExecution" + }); + assert.commandWorked(explain, tojson(explain)); + assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE"); + assert.eq(explain.queryPlanner.winningPlan.shards.length, 2); + assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "DELETE"); + assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "DELETE"); + // Check that the deletes didn't actually happen. + assert.eq(3, collSharded.count({b: 1})); + + // Explain a delete operation and verify that it hits only one shard with the shard key + explain = db.runCommand({ + explain: {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 0}]}, + verbosity: "allPlansExecution" + }); + assert.commandWorked(explain, tojson(explain)); + assert.eq(explain.queryPlanner.winningPlan.shards.length, 1); + // Check that the deletes didn't actually happen. + assert.eq(3, collSharded.count({b: 1})); + + // Check that we fail gracefully if we try to do an explain of a write batch that has more + // than one operation in it. + explain = db.runCommand({ + explain: { + delete: collSharded.getName(), + deletes: [{q: {a: 1}, limit: 1}, {q: {a: 2}, limit: 1}] + }, + verbosity: "allPlansExecution" + }); + assert.commandFailed(explain, tojson(explain)); + + // Explain a multi upsert operation and verify that it hits all shards + explain = db.runCommand({ + explain: + {update: collSharded.getName(), updates: [{q: {}, u: {$set: {b: 10}}, multi: true}]}, + verbosity: "allPlansExecution" + }); + assert.commandWorked(explain, tojson(explain)); + assert.eq(explain.queryPlanner.winningPlan.shards.length, 2); + assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE"); + assert.eq(explain.queryPlanner.winningPlan.shards.length, 2); + assert.eq(explain.queryPlanner.winningPlan.shards[0].winningPlan.stage, "UPDATE"); + assert.eq(explain.queryPlanner.winningPlan.shards[1].winningPlan.stage, "UPDATE"); + // Check that the update didn't actually happen. + assert.eq(0, collSharded.count({b: 10})); + + // Explain an upsert operation and verify that it hits only a single shard + explain = db.runCommand({ + explain: {update: collSharded.getName(), updates: [{q: {a: 10}, u: {a: 10}, upsert: true}]}, + verbosity: "allPlansExecution" + }); + assert.commandWorked(explain, tojson(explain)); + assert.eq(explain.queryPlanner.winningPlan.shards.length, 1); + // Check that the upsert didn't actually happen. + assert.eq(0, collSharded.count({a: 10})); + + // Explain an upsert operation which cannot be targeted, ensure an error is thrown + explain = db.runCommand({ + explain: {update: collSharded.getName(), updates: [{q: {b: 10}, u: {b: 10}, upsert: true}]}, + verbosity: "allPlansExecution" + }); + assert.commandFailed(explain, tojson(explain)); + + st.stop(); +})(); diff --git a/jstests/sharding/explain_find_and_modify_sharded.js b/jstests/sharding/explain_find_and_modify_sharded.js index 62ffa2d35f8..b4bb1a5ccb9 100644 --- a/jstests/sharding/explain_find_and_modify_sharded.js +++ b/jstests/sharding/explain_find_and_modify_sharded.js @@ -9,7 +9,6 @@ // Create a cluster with 2 shards. var st = new ShardingTest({shards: 2}); - st.stopBalancer(); var testDB = st.s.getDB('test'); var shardKey = {a: 1}; @@ -84,4 +83,5 @@ assert.commandWorked(res); assertExplainResult(res, 'executionStats', 'executionStages', 'shard0001', 'DELETE'); + st.stop(); })(); diff --git a/jstests/sharding/hash_shard_unique_compound.js b/jstests/sharding/hash_shard_unique_compound.js index 3d82c2452b5..abaf45260b9 100644 --- a/jstests/sharding/hash_shard_unique_compound.js +++ b/jstests/sharding/hash_shard_unique_compound.js @@ -2,44 +2,42 @@ // Does 2 things and checks for consistent error: // 1.) shard collection on hashed "a", ensure unique index {a:1, b:1} // 2.) reverse order +(function() { + 'use strict'; -var s = new ShardingTest({name: jsTestName(), shards: 1, mongos: 1, verbose: 1}); -var dbName = "test"; -var collName = "foo"; -var ns = dbName + "." + collName; -var db = s.getDB(dbName); -var coll = db.getCollection(collName); + var s = new ShardingTest({shards: 1, mongos: 1}); + var dbName = "test"; + var collName = "foo"; + var ns = dbName + "." + collName; + var db = s.getDB(dbName); + var coll = db.getCollection(collName); -// Enable sharding on DB -var res = db.adminCommand({enablesharding: dbName}); + // Enable sharding on DB + assert.commandWorked(db.adminCommand({enablesharding: dbName})); -// for simplicity start by turning off balancer -var res = s.stopBalancer(); + // Shard a fresh collection using a hashed shard key + assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}})); -// shard a fresh collection using a hashed shard key -coll.drop(); -assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}})); -s.printShardingStatus(); + // Create unique index + assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true})); -// Create unique index -assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true})); + jsTest.log("------ indexes -------"); + jsTest.log(tojson(coll.getIndexes())); -jsTest.log("------ indexes -------"); -jsTest.log(tojson(coll.getIndexes())); + // Second Part + jsTest.log("------ dropping sharded collection to start part 2 -------"); + coll.drop(); -// Second Part -jsTest.log("------ dropping sharded collection to start part 2 -------"); -coll.drop(); + // Create unique index + assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true})); -// Create unique index -assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true})); + // shard a fresh collection using a hashed shard key + assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}), + "shardcollection didn't worked 2"); -// shard a fresh collection using a hashed shard key -assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}), - "shardcollection didn't worked 2"); + s.printShardingStatus(); + jsTest.log("------ indexes 2-------"); + jsTest.log(tojson(coll.getIndexes())); -s.printShardingStatus(); -jsTest.log("------ indexes 2-------"); -jsTest.log(tojson(coll.getIndexes())); - -s.stop(); + s.stop(); +})(); diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js index 01260123b67..6166682bd83 100644 --- a/jstests/sharding/migrateBig.js +++ b/jstests/sharding/migrateBig.js @@ -1,64 +1,66 @@ (function() { + 'use strict'; var s = new ShardingTest({name: "migrateBig", shards: 2, other: {chunkSize: 1}}); - s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true); - s.adminCommand({enablesharding: "test"}); + assert.writeOK( + s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true)); + assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); s.ensurePrimaryShard('test', 'shard0001'); - s.adminCommand({shardcollection: "test.foo", key: {x: 1}}); + assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}})); - db = s.getDB("test"); - coll = db.foo; + var db = s.getDB("test"); + var coll = db.foo; - big = ""; + var big = ""; while (big.length < 10000) big += "eliot"; var bulk = coll.initializeUnorderedBulkOp(); - for (x = 0; x < 100; x++) { + for (var x = 0; x < 100; x++) { bulk.insert({x: x, big: big}); } assert.writeOK(bulk.execute()); - s.printShardingStatus(); - - s.adminCommand({split: "test.foo", middle: {x: 30}}); - s.adminCommand({split: "test.foo", middle: {x: 66}}); - s.adminCommand( - {movechunk: "test.foo", find: {x: 90}, to: s.getOther(s.getPrimaryShard("test")).name}); + assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 30}})); + assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 66}})); + assert.commandWorked(s.s0.adminCommand( + {movechunk: "test.foo", find: {x: 90}, to: s.getOther(s.getPrimaryShard("test")).name})); s.printShardingStatus(); print("YO : " + s.getPrimaryShard("test").host); - direct = new Mongo(s.getPrimaryShard("test").host); + var direct = new Mongo(s.getPrimaryShard("test").host); print("direct : " + direct); - directDB = direct.getDB("test"); + var directDB = direct.getDB("test"); - for (done = 0; done < 2 * 1024 * 1024; done += big.length) { + for (var done = 0; done < 2 * 1024 * 1024; done += big.length) { assert.writeOK(directDB.foo.insert({x: 50 + Math.random(), big: big})); } s.printShardingStatus(); assert.throws(function() { - s.adminCommand( - {movechunk: "test.foo", find: {x: 50}, to: s.getOther(s.getPrimaryShard("test")).name}); + assert.commandWorked(s.s0.adminCommand({ + movechunk: "test.foo", + find: {x: 50}, + to: s.getOther(s.getPrimaryShard("test")).name + })); }, [], "move should fail"); - for (i = 0; i < 20; i += 2) { + for (var i = 0; i < 20; i += 2) { try { - s.adminCommand({split: "test.foo", middle: {x: i}}); + assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: i}})); } catch (e) { - // we may have auto split on some of these - // which is ok + // We may have auto split on some of these, which is ok print(e); } } s.printShardingStatus(); - s.config.settings.update({_id: "balancer"}, {$set: {stopped: false}}, true); + s.startBalancer(); assert.soon(function() { var x = s.chunkDiff("foo", "test"); @@ -73,5 +75,4 @@ assert.eq(coll.count(), coll.find().itcount()); s.stop(); - })(); diff --git a/jstests/sharding/printShardingStatus.js b/jstests/sharding/printShardingStatus.js index 63b5ef3090c..798338c39c0 100644 --- a/jstests/sharding/printShardingStatus.js +++ b/jstests/sharding/printShardingStatus.js @@ -3,6 +3,7 @@ // headings and the names of sharded collections and their shard keys. (function() { + 'use strict'; var st = new ShardingTest({shards: 1, mongos: 2, config: 1, other: {smallfiles: true}}); @@ -230,5 +231,4 @@ assert(mongos.getDB("test").dropDatabase()); st.stop(); - })(); diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js index 926b350c7e9..64716ede81e 100644 --- a/jstests/sharding/shard3.js +++ b/jstests/sharding/shard3.js @@ -1,5 +1,4 @@ (function() { - // Include helpers for analyzing explain output. load("jstests/libs/analyze_plan.js"); @@ -17,11 +16,14 @@ } assert(sh.getBalancerState(), "A1"); - sh.setBalancerState(false); + + sh.stopBalancer(); assert(!sh.getBalancerState(), "A2"); - sh.setBalancerState(true); + + sh.startBalancer(); assert(sh.getBalancerState(), "A3"); - sh.setBalancerState(false); + + sh.stopBalancer(); assert(!sh.getBalancerState(), "A4"); s.config.databases.find().forEach(printjson); diff --git a/jstests/sharding/split_with_force_small.js b/jstests/sharding/split_with_force_small.js index ad14f8642cb..be21049650e 100644 --- a/jstests/sharding/split_with_force_small.js +++ b/jstests/sharding/split_with_force_small.js @@ -1,71 +1,69 @@ // // Tests autosplit locations with force : true, for small collections // +(function() { + 'use strict'; -var options = { - chunkSize: 1, // MB - mongosOptions: {noAutoSplit: ""} -}; + var st = new ShardingTest( + {shards: 1, mongos: 1, other: {chunkSize: 1, mongosOptions: {noAutoSplit: ""}}}); -var st = new ShardingTest({shards: 1, mongos: 1, other: options}); -st.stopBalancer(); + var mongos = st.s0; + var admin = mongos.getDB("admin"); + var config = mongos.getDB("config"); + var shardAdmin = st.shard0.getDB("admin"); + var coll = mongos.getCollection("foo.bar"); -var mongos = st.s0; -var admin = mongos.getDB("admin"); -var config = mongos.getDB("config"); -var shardAdmin = st.shard0.getDB("admin"); -var coll = mongos.getCollection("foo.bar"); + assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""})); + assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}})); + assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}})); -assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok); -assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok); -assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok); + jsTest.log("Insert a bunch of data into the low chunk of a collection," + + " to prevent relying on stats."); -jsTest.log("Insert a bunch of data into the low chunk of a collection," + - " to prevent relying on stats."); + var data128k = "x"; + for (var i = 0; i < 7; i++) + data128k += data128k; -var data128k = "x"; -for (var i = 0; i < 7; i++) - data128k += data128k; + var bulk = coll.initializeUnorderedBulkOp(); + for (var i = 0; i < 1024; i++) { + bulk.insert({_id: -(i + 1)}); + } + assert.writeOK(bulk.execute()); -var bulk = coll.initializeUnorderedBulkOp(); -for (var i = 0; i < 1024; i++) { - bulk.insert({_id: -(i + 1)}); -} -assert.writeOK(bulk.execute()); + jsTest.log("Insert 32 docs into the high chunk of a collection"); -jsTest.log("Insert 32 docs into the high chunk of a collection"); + bulk = coll.initializeUnorderedBulkOp(); + for (var i = 0; i < 32; i++) { + bulk.insert({_id: i}); + } + assert.writeOK(bulk.execute()); -bulk = coll.initializeUnorderedBulkOp(); -for (var i = 0; i < 32; i++) { - bulk.insert({_id: i}); -} -assert.writeOK(bulk.execute()); + jsTest.log("Split off MaxKey chunk..."); -jsTest.log("Split off MaxKey chunk..."); + assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 32}})); -assert(admin.runCommand({split: coll + "", middle: {_id: 32}}).ok); + jsTest.log("Keep splitting chunk multiple times..."); -jsTest.log("Keep splitting chunk multiple times..."); - -st.printShardingStatus(); - -for (var i = 0; i < 5; i++) { - assert(admin.runCommand({split: coll + "", find: {_id: 0}}).ok); st.printShardingStatus(); -} -// Make sure we can't split further than 5 (2^5) times -assert(!admin.runCommand({split: coll + "", find: {_id: 0}}).ok); + for (var i = 0; i < 5; i++) { + assert.commandWorked(admin.runCommand({split: coll + "", find: {_id: 0}})); + st.printShardingStatus(); + } + + // Make sure we can't split further than 5 (2^5) times + assert.commandFailed(admin.runCommand({split: coll + "", find: {_id: 0}})); -var chunks = config.chunks.find({'min._id': {$gte: 0, $lt: 32}}).sort({min: 1}).toArray(); -printjson(chunks); + var chunks = config.chunks.find({'min._id': {$gte: 0, $lt: 32}}).sort({min: 1}).toArray(); + printjson(chunks); -// Make sure the chunks grow by 2x (except the first) -var nextSize = 1; -for (var i = 0; i < chunks.size; i++) { - assert.eq(coll.count({_id: {$gte: chunks[i].min._id, $lt: chunks[i].max._id}}), nextSize); - if (i != 0) - nextSize += nextSize; -} + // Make sure the chunks grow by 2x (except the first) + var nextSize = 1; + for (var i = 0; i < chunks.size; i++) { + assert.eq(coll.count({_id: {$gte: chunks[i].min._id, $lt: chunks[i].max._id}}), nextSize); + if (i != 0) + nextSize += nextSize; + } -st.stop(); + st.stop(); +})(); diff --git a/jstests/sharding/stale_version_write.js b/jstests/sharding/stale_version_write.js index e5885dcfa41..bd603124548 100644 --- a/jstests/sharding/stale_version_write.js +++ b/jstests/sharding/stale_version_write.js @@ -1,37 +1,37 @@ // Tests whether a reset sharding version triggers errors +(function() { + 'use strict'; -jsTest.log("Starting sharded cluster..."); + var st = new ShardingTest({shards: 1, mongos: 2}); -var st = new ShardingTest({shards: 1, mongos: 2, verbose: 2}); + var mongosA = st.s0; + var mongosB = st.s1; -st.stopBalancer(); + jsTest.log("Adding new collections..."); -var mongosA = st.s0; -var mongosB = st.s1; + var collA = mongosA.getCollection(jsTestName() + ".coll"); + assert.writeOK(collA.insert({hello: "world"})); -jsTest.log("Adding new collections..."); + var collB = mongosB.getCollection("" + collA); + assert.writeOK(collB.insert({hello: "world"})); -var collA = mongosA.getCollection(jsTestName() + ".coll"); -assert.writeOK(collA.insert({hello: "world"})); + jsTest.log("Enabling sharding..."); -var collB = mongosB.getCollection("" + collA); -assert.writeOK(collB.insert({hello: "world"})); + assert.commandWorked(mongosA.getDB("admin").adminCommand({enableSharding: "" + collA.getDB()})); + assert.commandWorked( + mongosA.getDB("admin").adminCommand({shardCollection: "" + collA, key: {_id: 1}})); -jsTest.log("Enabling sharding..."); + // MongoD doesn't know about the config shard version *until* MongoS tells it + collA.findOne(); -printjson(mongosA.getDB("admin").runCommand({enableSharding: "" + collA.getDB()})); -printjson(mongosA.getDB("admin").runCommand({shardCollection: "" + collA, key: {_id: 1}})); + jsTest.log("Trigger shard version mismatch..."); -// MongoD doesn't know about the config shard version *until* MongoS tells it -collA.findOne(); + assert.writeOK(collB.insert({goodbye: "world"})); -jsTest.log("Trigger shard version mismatch..."); + print("Inserted..."); -assert.writeOK(collB.insert({goodbye: "world"})); + assert.eq(3, collA.find().itcount()); + assert.eq(3, collB.find().itcount()); -print("Inserted..."); - -assert.eq(3, collA.find().itcount()); -assert.eq(3, collB.find().itcount()); - -st.stop(); + st.stop(); +})(); diff --git a/jstests/slow1/mr_during_migrate.js b/jstests/slow1/mr_during_migrate.js index 61a06f11249..ae72e094ce6 100644 --- a/jstests/slow1/mr_during_migrate.js +++ b/jstests/slow1/mr_during_migrate.js @@ -1,111 +1,110 @@ // Do parallel ops with migrates occurring +(function() { + 'use strict'; -var st = new ShardingTest({shards: 10, mongos: 2, verbose: 2}); + var st = new ShardingTest({shards: 10, mongos: 2, verbose: 2}); -jsTest.log("Doing parallel operations..."); + var mongos = st.s0; + var admin = mongos.getDB("admin"); + var coll = st.s.getCollection(jsTest.name() + ".coll"); -// Stop balancer, since it'll just get in the way of these -st.stopBalancer(); + var numDocs = 1024 * 1024; + var dataSize = 1024; // bytes, must be power of 2 -var mongos = st.s0; -var admin = mongos.getDB("admin"); -var coll = st.s.getCollection(jsTest.name() + ".coll"); + var data = "x"; + while (data.length < dataSize) + data += data; -var numDocs = 1024 * 1024; -var dataSize = 1024; // bytes, must be power of 2 + var bulk = coll.initializeUnorderedBulkOp(); + for (var i = 0; i < numDocs; i++) { + bulk.insert({_id: i, data: data}); + } + assert.writeOK(bulk.execute()); -var data = "x"; -while (data.length < dataSize) - data += data; + // Make sure everything got inserted + assert.eq(numDocs, coll.find().itcount()); -var bulk = coll.initializeUnorderedBulkOp(); -for (var i = 0; i < numDocs; i++) { - bulk.insert({_id: i, data: data}); -} -assert.writeOK(bulk.execute()); + jsTest.log("Inserted " + sh._dataFormat(dataSize * numDocs) + " of data."); -// Make sure everything got inserted -assert.eq(numDocs, coll.find().itcount()); + // Shard collection + st.shardColl(coll, {_id: 1}, false); -jsTest.log("Inserted " + sh._dataFormat(dataSize * numDocs) + " of data."); + st.printShardingStatus(); -// Shard collection -st.shardColl(coll, {_id: 1}, false); + jsTest.log("Sharded collection now initialized, starting migrations..."); -st.printShardingStatus(); - -jsTest.log("Sharded collection now initialized, starting migrations..."); + var checkMigrate = function() { + print("Result of migrate : "); + printjson(this); + }; -var checkMigrate = function() { - print("Result of migrate : "); - printjson(this); -}; + // Creates a number of migrations of random chunks to diff shard servers + var ops = []; + for (var i = 0; i < st._connections.length; i++) { + ops.push({ + op: "command", + ns: "admin", + command: { + moveChunk: "" + coll, + find: {_id: {"#RAND_INT": [0, numDocs]}}, + to: st._connections[i].shardName, + _waitForDelete: true + }, + showResult: true + }); + } -// Creates a number of migrations of random chunks to diff shard servers -var ops = []; -for (var i = 0; i < st._connections.length; i++) { - ops.push({ - op: "command", - ns: "admin", - command: { - moveChunk: "" + coll, - find: {_id: {"#RAND_INT": [0, numDocs]}}, - to: st._connections[i].shardName, - _waitForDelete: true - }, - showResult: true - }); -} + // TODO: Also migrate output collection -// TODO: Also migrate output collection + jsTest.log("Starting migrations now..."); -jsTest.log("Starting migrations now..."); + var bid = benchStart({ops: ops, host: st.s.host, parallel: 1, handleErrors: false}); -var bid = benchStart({ops: ops, host: st.s.host, parallel: 1, handleErrors: false}); + //####################### + // Tests during migration -//####################### -// Tests during migration + var numTests = 5; -var numTests = 5; + for (var t = 0; t < numTests; t++) { + jsTest.log("Test #" + t); -for (var t = 0; t < numTests; t++) { - jsTest.log("Test #" + t); + var mongos = st.s1; // use other mongos so we get stale shard versions + var coll = mongos.getCollection(coll + ""); + var outputColl = mongos.getCollection(coll + "_output"); - var mongos = st.s1; // use other mongos so we get stale shard versions - var coll = mongos.getCollection(coll + ""); - var outputColl = mongos.getCollection(coll + "_output"); + var numTypes = 32; + var map = function() { + emit(this._id % 32 /* must be hardcoded */, {c: 1}); + }; - var numTypes = 32; - var map = function() { - emit(this._id % 32 /* must be hardcoded */, {c: 1}); - }; - var reduce = function(k, vals) { - var total = 0; - for (var i = 0; i < vals.length; i++) - total += vals[i].c; - return {c: total}; - }; + var reduce = function(k, vals) { + var total = 0; + for (var i = 0; i < vals.length; i++) + total += vals[i].c; + return {c: total}; + }; - printjson(coll.find({_id: 0}).itcount()); + printjson(coll.find({_id: 0}).itcount()); - jsTest.log("Starting new mapReduce run #" + t); + jsTest.log("Starting new mapReduce run #" + t); - // assert.eq( coll.find().itcount(), numDocs ) + // assert.eq( coll.find().itcount(), numDocs ) - coll.getMongo().getDB("admin").runCommand({setParameter: 1, traceExceptions: true}); + coll.getMongo().getDB("admin").runCommand({setParameter: 1, traceExceptions: true}); - printjson(coll.mapReduce( - map, reduce, {out: {replace: outputColl.getName(), db: outputColl.getDB() + ""}})); + printjson(coll.mapReduce( + map, reduce, {out: {replace: outputColl.getName(), db: outputColl.getDB() + ""}})); - jsTest.log("MapReduce run #" + t + " finished."); + jsTest.log("MapReduce run #" + t + " finished."); - assert.eq(outputColl.find().itcount(), numTypes); + assert.eq(outputColl.find().itcount(), numTypes); - outputColl.find().forEach(function(x) { - assert.eq(x.value.c, numDocs / numTypes); - }); -} + outputColl.find().forEach(function(x) { + assert.eq(x.value.c, numDocs / numTypes); + }); + } -printjson(benchFinish(bid)); + printjson(benchFinish(bid)); -st.stop(); + st.stop(); +})(); diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript index 0876aeec7d9..2d6655f04c8 100644 --- a/src/mongo/s/SConscript +++ b/src/mongo/s/SConscript @@ -81,9 +81,9 @@ env.Library( '$BUILD_DIR/mongo/s/catalog/dist_lock_manager_mock', '$BUILD_DIR/mongo/s/catalog/replset/catalog_manager_replica_set', '$BUILD_DIR/mongo/s/coreshard', - '$BUILD_DIR/mongo/s/mongoscore', '$BUILD_DIR/mongo/util/clock_source_mock', '$BUILD_DIR/mongo/util/net/message_port_mock', + 'mongoscore', ], LIBDEPS_TAGS=[ # Depends on coreshard, but that would be circular diff --git a/src/mongo/s/catalog/catalog_manager.h b/src/mongo/s/catalog/catalog_manager.h index 72cfdc51b8a..b3f87f56d37 100644 --- a/src/mongo/s/catalog/catalog_manager.h +++ b/src/mongo/s/catalog/catalog_manager.h @@ -448,9 +448,6 @@ public: StringData whyMessage, Milliseconds waitFor = DistLockManager::kSingleLockAttemptTimeout) = 0; -protected: - CatalogManager() = default; - /** * Obtains a reference to the distributed lock manager instance to use for synchronizing * system-wide changes. @@ -459,6 +456,9 @@ protected: * be cached. */ virtual DistLockManager* getDistLockManager() = 0; + +protected: + CatalogManager() = default; }; } // namespace mongo |