diff options
author | Randolph Tan <randolph@10gen.com> | 2015-10-23 13:49:53 -0400 |
---|---|---|
committer | Randolph Tan <randolph@10gen.com> | 2015-10-27 11:33:23 -0400 |
commit | a051e68a4e1688b0943becbcc4c1dde9bf29521b (patch) | |
tree | d6bf1f16ce37e1fad8215a960c4731a5183c1b88 /jstests | |
parent | 45bfa34b8b99237c16a839e5afa0df44c5fde15d (diff) | |
download | mongo-a051e68a4e1688b0943becbcc4c1dde9bf29521b.tar.gz |
SERVER-21102 Add more basic tests for split and auto split
Diffstat (limited to 'jstests')
-rw-r--r-- | jstests/sharding/basic_split.js | 92 | ||||
-rw-r--r-- | jstests/sharding/write_cmd_auto_split.js | 6 |
2 files changed, 96 insertions, 2 deletions
diff --git a/jstests/sharding/basic_split.js b/jstests/sharding/basic_split.js new file mode 100644 index 00000000000..0c5f7e0e416 --- /dev/null +++ b/jstests/sharding/basic_split.js @@ -0,0 +1,92 @@ +/** + * Perform basic tests for the split command against mongos. + */ +(function() { +"use strict"; + +var st = new ShardingTest({ shards: 1, other: { chunkSize: 1 }}); +var configDB = st.s.getDB('config'); + +// split on invalid ns. +assert.commandFailed(configDB.adminCommand({ split: 'user', key: { _id: 1 }})); + +// split on unsharded collection (db is not sharding enabled). +assert.commandFailed(configDB.adminCommand({ split: 'test.user', key: { _id: 1 }})); + +configDB.adminCommand({ enableSharding: 'test' }); + +// split on unsharded collection (db is sharding enabled). +assert.commandFailed(configDB.adminCommand({ split: 'test.user', key: { _id: 1 }})); + +assert.commandWorked(configDB.adminCommand({ shardCollection: 'test.user', key: { _id: 1 }})); + +assert.eq(null, configDB.chunks.findOne({ ns: 'test.user', min: { _id: 0 }})); + +assert.commandWorked(configDB.adminCommand({ split: 'test.user', middle: { _id: 0 }})); +assert.neq(null, configDB.chunks.findOne({ ns: 'test.user', min: { _id: 0 }})); + +// Cannot split on existing chunk boundary. +assert.commandFailed(configDB.adminCommand({ split: 'test.user', middle: { _id: 0 }})); + +// Attempt to split on a value that is not the shard key. +assert.commandFailed(configDB.adminCommand({ split: 'test.user', middle: { x: 100 }})); +assert.commandFailed(configDB.adminCommand({ split: 'test.user', find: { x: 100 }})); +assert.commandFailed(configDB.adminCommand({ split: 'test.user', + bounds: [{ x: MinKey }, { x: MaxKey }]})); + +// Insert documents large enough to fill up a chunk, but do it directly in the shard in order +// to bypass the auto-split logic. +var kiloDoc = new Array(1024).join('x'); +var testDB = st.d0.getDB('test'); +var bulk = testDB.user.initializeUnorderedBulkOp(); +for (var x = -1200; x < 1200; x++) { + bulk.insert({ _id: x, val: kiloDoc }); +} +assert.writeOK(bulk.execute()); + +assert.eq(1, configDB.chunks.find({ ns: 'test.user', min: { $gte: { _id: 0 }}}).itcount()); + +// Errors if bounds do not correspond to existing chunk boundaries. +assert.commandFailed(configDB.adminCommand({ split: 'test.user', + bounds: [{ _id: 0 }, { _id: 1000 }]})); +assert.eq(1, configDB.chunks.find({ ns: 'test.user', min: { $gte: { _id: 0 }}}).itcount()); + +assert.commandWorked(configDB.adminCommand({ split: 'test.user', + bounds: [{ _id: 0 }, { _id: MaxKey }]})); +assert.gt(configDB.chunks.find({ ns: 'test.user', min: { $gte: { _id: 0 }}}).itcount(), 1); + +assert.eq(1, configDB.chunks.find({ ns: 'test.user', min: { $lt: { _id: 0 }}}).itcount()); +assert.commandWorked(configDB.adminCommand({ split: 'test.user', find: { _id: -1 }})); +assert.gt(configDB.chunks.find({ ns: 'test.user', min: { $lt: { _id: 0 }}}).itcount(), 1); + +// +// Compound Key +// + +assert.commandWorked(configDB.adminCommand({ shardCollection: 'test.compound', key: { x: 1, y: 1 }})); + +assert.eq(null, configDB.chunks.findOne({ ns: 'test.compound', min: { x: 0, y: 0 }})); +assert.commandWorked(configDB.adminCommand({ split: 'test.compound', middle: { x: 0, y: 0 }})); +assert.neq(null, configDB.chunks.findOne({ ns: 'test.compound', min: { x: 0, y: 0 }})); + +// cannot split on existing chunk boundary. +assert.commandFailed(configDB.adminCommand({ split: 'test.compound', middle: { x: 0, y: 0 }})); + +bulk = testDB.compound.initializeUnorderedBulkOp(); +for (x = -1200; x < 1200; x++) { + bulk.insert({ x: x, y: x, val: kiloDoc }); +} +assert.writeOK(bulk.execute()); + +assert.eq(1, configDB.chunks.find({ ns: 'test.compound', min: { $gte: { x: 0, y: 0 }}}).itcount()); +assert.commandWorked(configDB.adminCommand({ split: 'test.compound', + bounds: [{ x: 0, y: 0 }, { x: MaxKey, y: MaxKey }]})); +assert.gt(configDB.chunks.find({ ns: 'test.compound', min: { $gte: { x: 0, y: 0 }}}).itcount(), 1); + +assert.eq(1, configDB.chunks.find({ ns: 'test.compound', min: { $lt: { x: 0, y: 0 }}}).itcount()); +assert.commandWorked(configDB.adminCommand({ split: 'test.compound', find: { x: -1, y: -1 }})); +assert.gt(configDB.chunks.find({ ns: 'test.compound', min: { $lt: { x: 0, y: 0 }}}).itcount(), 1); + +st.stop(); + +})(); diff --git a/jstests/sharding/write_cmd_auto_split.js b/jstests/sharding/write_cmd_auto_split.js index 0e7e3cbd0c9..799c36cc3aa 100644 --- a/jstests/sharding/write_cmd_auto_split.js +++ b/jstests/sharding/write_cmd_auto_split.js @@ -18,7 +18,7 @@ assert.eq(1, configDB.chunks.find().itcount()); // Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so // we are going to be conservative. -for (var x = 0; x < 1100; x++) { +for (var x = 0; x < 3100; x++) { var res = testDB.runCommand({ insert: 'insert', documents: [{ x: x, v: doc1k }], ordered: false, @@ -27,7 +27,9 @@ for (var x = 0; x < 1100; x++) { assert(res.ok, 'insert failed: ' + tojson(res)); } -assert.gt(configDB.chunks.find().itcount(), 1); +// Inserted batch is a multiple of the chunkSize, expect the chunks to split into +// more than 2. +assert.gt(configDB.chunks.find().itcount(), 2); testDB.dropDatabase(); jsTest.log('Test single batch update should auto-split'); |