diff options
Diffstat (limited to 'jstests/sharding')
-rw-r--r-- | jstests/sharding/hash_crud.js | 65 | ||||
-rw-r--r-- | jstests/sharding/hash_crud_during_migration.js | 115 | ||||
-rw-r--r-- | jstests/sharding/hash_crud_txns_during_migration.js | 135 | ||||
-rw-r--r-- | jstests/sharding/libs/chunk_bounds_util.js | 74 |
4 files changed, 389 insertions, 0 deletions
diff --git a/jstests/sharding/hash_crud.js b/jstests/sharding/hash_crud.js new file mode 100644 index 00000000000..85b9cf52ec0 --- /dev/null +++ b/jstests/sharding/hash_crud.js @@ -0,0 +1,65 @@ +/* + * Test that crud and find operations target the right shards. + */ +(function() { +'use strict'; + +load("jstests/sharding/libs/chunk_bounds_util.js"); + +let st = new ShardingTest({shards: 3}); +let dbName = "test"; +let collName = "user"; +let ns = dbName + "." + collName; +let configDB = st.s.getDB('config'); +let testDB = st.s.getDB(dbName); + +assert.commandWorked(st.s.adminCommand({enableSharding: dbName})); +st.ensurePrimaryShard(dbName, st.shard1.shardName); +assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 'hashed'}})); + +let chunkDocs = configDB.chunks.find({ns: ns}).toArray(); +let shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunkDocs); + +jsTest.log("Test 'insert'"); +// Insert docs that are expected to go to three different shards. +let docs = [{x: -10}, {x: -1}, {x: 10}]; +assert.commandWorked(testDB.user.insert(docs)); + +// Check that the docs are on the right shards and store the shard for each doc. +let shards = []; +for (let doc of docs) { + let hash = convertShardKeyToHashed(doc.x); + let shard = chunkBoundsUtil.findShardForShardKey(st, shardChunkBounds, {x: hash}); + assert.eq(1, shard.getCollection(ns).count(doc)); + shards.push(shard); +} +assert.eq(3, (new Set(shards)).size); + +jsTest.log("Test 'find'"); +assert.eq(3, testDB.user.find({}).count()); +assert.eq(2, testDB.user.find({x: {$lt: 0}}).count()); + +jsTest.log("Test 'update'"); +assert.commandWorked(testDB.user.update({x: -10}, {$set: {updated: true}}, {multi: true})); +assert.eq(1, testDB.user.find({x: -10, updated: true}).count()); +assert.eq(1, shards[0].getCollection(ns).count({updated: true})); +assert.eq(0, shards[1].getCollection(ns).count({updated: true})); +assert.eq(0, shards[2].getCollection(ns).count({updated: true})); + +jsTest.log("Test 'findAndModify'"); +assert.commandWorked( + testDB.runCommand({findAndModify: collName, query: {x: -1}, update: {$set: {y: 1}}})); +assert.eq(1, testDB.user.find({x: -1, y: 1}).count()); +assert.eq(0, shards[0].getCollection(ns).count({y: 1})); +assert.eq(1, shards[1].getCollection(ns).count({y: 1})); +assert.eq(0, shards[2].getCollection(ns).count({y: 1})); + +jsTest.log("Test 'remove'"); +assert.commandWorked(testDB.user.remove({x: 10})); +assert.eq(2, testDB.user.find({}).count()); +assert.eq(1, shards[0].getCollection(ns).count({})); +assert.eq(1, shards[1].getCollection(ns).count({})); +assert.eq(0, shards[2].getCollection(ns).count({})); + +st.stop(); +})(); diff --git a/jstests/sharding/hash_crud_during_migration.js b/jstests/sharding/hash_crud_during_migration.js new file mode 100644 index 00000000000..fe1982552d3 --- /dev/null +++ b/jstests/sharding/hash_crud_during_migration.js @@ -0,0 +1,115 @@ +/* + * Test that crud and find operations target the right shards during migration. + * @tags: [uses_transactions, uses_prepare_transaction] + */ +(function() { +'use strict'; + +load('jstests/libs/chunk_manipulation_util.js'); +load("jstests/sharding/libs/chunk_bounds_util.js"); + +let st = new ShardingTest({shards: 3}); +let dbName = "test"; +let collName = "user"; +let ns = dbName + "." + collName; +let configDB = st.s.getDB('config'); +let testDB = st.s.getDB(dbName); + +// For startParallelOps to write its state. +let staticMongod = MongoRunner.runMongod({}); + +assert.commandWorked(st.s.adminCommand({enableSharding: dbName})); +st.ensurePrimaryShard(dbName, st.shard1.shardName); +assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 'hashed'}})); + +let chunkDocs = configDB.chunks.find({ns: ns}).toArray(); +let shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunkDocs); + +jsTest.log("Test 'insert'"); +// Insert a doc while migrating the chunk that the doc belongs to. +let doc = {x: 0}; +let hash = convertShardKeyToHashed(doc.x); +let shardBoundsPair = + chunkBoundsUtil.findShardAndChunkBoundsForShardKey(st, shardChunkBounds, {x: hash}); +let fromShard = shardBoundsPair.shard; +let toShard = st.getOther(fromShard); +runCommandDuringTransferMods( + st.s, staticMongod, ns, shardBoundsPair.bounds, fromShard, toShard, () => { + assert.commandWorked(testDB.user.insert(doc)); + }); + +// Check that the inserted doc is on the recipient shard. +assert.eq(1, testDB.user.find(doc).count()); +assert.eq(1, toShard.getCollection(ns).find(doc).count()); + +// Clean up. +assert.commandWorked(testDB.user.remove({})); +chunkDocs = configDB.chunks.find({ns: ns}).toArray(); +shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunkDocs); + +// Insert docs that are expected to go to three different shards, check that the docs +// are on the right shards and store the shard and chunk bounds for each doc. +let docs = [{x: -10}, {x: -1}, {x: 10}]; +assert.commandWorked(testDB.user.insert(docs)); +let shards = []; +let docChunkBounds = []; +for (let doc of docs) { + let hash = convertShardKeyToHashed(doc.x); + let shardBoundsPair = + chunkBoundsUtil.findShardAndChunkBoundsForShardKey(st, shardChunkBounds, {x: hash}); + assert.eq(1, shardBoundsPair.shard.getCollection(ns).find(doc).count()); + shards.push(shardBoundsPair.shard); + docChunkBounds.push(shardBoundsPair.bounds); +} +assert.eq(3, (new Set(shards)).size); +assert.eq(3, testDB.user.find({}).count()); + +// Perform a series of operations on docs[1] while moving the chunk that it belongs to +// from shards[1] to shards[2], then to shards[0] and back to shards[1]. + +jsTest.log("Test 'update'"); +// Update the doc while migrating the chunk. +fromShard = shards[1]; +toShard = shards[2]; +runCommandDuringTransferMods(st.s, staticMongod, ns, docChunkBounds[1], fromShard, toShard, () => { + assert.commandWorked(testDB.user.update({x: -1}, {$set: {updated: true}}, {multi: true})); +}); + +// Check that the doc is updated correctly. +assert.eq(1, testDB.user.find({x: -1, updated: true}).count()); +assert.eq(0, shards[0].getCollection(ns).find({updated: true}).count()); +assert.eq(0, shards[1].getCollection(ns).find({updated: true}).count()); +assert.eq(1, shards[2].getCollection(ns).find({updated: true}).count()); + +jsTest.log("Test 'findAndModify'"); +// findAndModify the doc while migrating the chunk. +fromShard = shards[2]; +toShard = shards[0]; +runCommandDuringTransferMods(st.s, staticMongod, ns, docChunkBounds[1], fromShard, toShard, () => { + assert.commandWorked( + testDB.runCommand({findAndModify: collName, query: {x: -1}, update: {$set: {y: 1}}})); +}); + +// Check that the doc is updated correctly. +assert.eq(1, testDB.user.find({x: -1, y: 1}).count()); +assert.eq(1, shards[0].getCollection(ns).count({y: 1})); +assert.eq(0, shards[1].getCollection(ns).count({y: 1})); +assert.eq(0, shards[2].getCollection(ns).count({y: 1})); + +jsTest.log("Test 'remove'"); +// Remove the doc while migrating the chunk. +fromShard = shards[0]; +toShard = shards[1]; +runCommandDuringTransferMods(st.s, staticMongod, ns, docChunkBounds[1], fromShard, toShard, () => { + assert.commandWorked(testDB.user.remove({x: -1})); +}); + +// Check that the doc is removed correctly. +assert.eq(2, testDB.user.find({}).count()); +assert.eq(1, shards[0].getCollection(ns).find({}).count()); +assert.eq(0, shards[1].getCollection(ns).find({}).count()); +assert.eq(1, shards[2].getCollection(ns).find({}).count()); + +st.stop(); +MongoRunner.stopMongod(staticMongod); +})(); diff --git a/jstests/sharding/hash_crud_txns_during_migration.js b/jstests/sharding/hash_crud_txns_during_migration.js new file mode 100644 index 00000000000..1bcafad854d --- /dev/null +++ b/jstests/sharding/hash_crud_txns_during_migration.js @@ -0,0 +1,135 @@ +/* + * Test that crud operations in transactions target the right shards during migration. + * @tags: [uses_transactions, uses_prepare_transaction] + */ +(function() { +'use strict'; + +load('jstests/libs/chunk_manipulation_util.js'); +load("jstests/sharding/libs/chunk_bounds_util.js"); + +function runCommandInTxn(cmdFunc) { + let session = st.s.startSession(); + session.startTransaction(); + cmdFunc(session); + assert.commandWorked(session.commitTransaction_forTesting()); + session.endSession(); +} + +let st = new ShardingTest({shards: 3}); +let dbName = "test"; +let collName = "user"; +let ns = dbName + "." + collName; +let configDB = st.s.getDB('config'); +let testDB = st.s.getDB(dbName); + +// For startParallelOps to write its state. +let staticMongod = MongoRunner.runMongod({}); + +assert.commandWorked(st.s.adminCommand({enableSharding: dbName})); +st.ensurePrimaryShard(dbName, st.shard1.shardName); +assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 'hashed'}})); + +let chunkDocs = configDB.chunks.find({ns: ns}).toArray(); +let shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunkDocs); + +jsTest.log("Test 'insert'"); +// Insert a doc while migrating the chunk that the doc belongs to. +let doc = {x: 0}; +let hash = convertShardKeyToHashed(doc.x); +let shardBoundsPair = + chunkBoundsUtil.findShardAndChunkBoundsForShardKey(st, shardChunkBounds, {x: hash}); +let fromShard = shardBoundsPair.shard; +let toShard = st.getOther(fromShard); +runCommandDuringTransferMods( + st.s, staticMongod, ns, shardBoundsPair.bounds, fromShard, toShard, () => { + runCommandInTxn((session) => { + let sessionColl = session.getDatabase(dbName).getCollection(collName); + assert.commandWorked(sessionColl.insert(doc)); + }); + }); + +// Check that the inserted doc is on the recipient shard. +assert.eq(1, testDB.user.find(doc).count()); +assert.eq(1, toShard.getCollection(ns).find(doc).count()); + +// Clean up. +assert.commandWorked(testDB.user.remove({})); +chunkDocs = configDB.chunks.find({ns: ns}).toArray(); +shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunkDocs); + +// Insert docs that are expected to go to three different shards, check that the docs +// are on the right shards and store the shard and chunk bounds for each doc. +let docs = [{x: -10}, {x: -1}, {x: 10}]; +assert.commandWorked(testDB.user.insert(docs)); +let shards = []; +let docChunkBounds = []; +for (let doc of docs) { + let hash = convertShardKeyToHashed(doc.x); + let shardBoundsPair = + chunkBoundsUtil.findShardAndChunkBoundsForShardKey(st, shardChunkBounds, {x: hash}); + assert.eq(1, shardBoundsPair.shard.getCollection(ns).find(doc).count()); + shards.push(shardBoundsPair.shard); + docChunkBounds.push(shardBoundsPair.bounds); +} +assert.eq(3, (new Set(shards)).size); +assert.eq(3, testDB.user.find({}).count()); + +// Perform a series of operations on docs[1] while moving the chunk that it belongs to +// from shards[1] to shards[2], then to shards[0] and back to shards[1]. + +jsTest.log("Test 'update'"); +// Update the doc while migrating the chunk. +fromShard = shards[1]; +toShard = shards[2]; +runCommandDuringTransferMods(st.s, staticMongod, ns, docChunkBounds[1], fromShard, toShard, () => { + runCommandInTxn((session) => { + let sessionColl = session.getDatabase(dbName).getCollection(collName); + assert.commandWorked(sessionColl.update({x: -1}, {$set: {updated: true}}, {multi: true})); + }); +}); + +// Check that the doc is updated correctly. +assert.eq(1, testDB.user.find({x: -1, updated: true}).count()); +assert.eq(0, shards[0].getCollection(ns).find({updated: true}).count()); +assert.eq(0, shards[1].getCollection(ns).find({updated: true}).count()); +assert.eq(1, shards[2].getCollection(ns).find({updated: true}).count()); + +jsTest.log("Test 'findAndModify'"); +// findAndModify the doc while migrating the chunk. +fromShard = shards[2]; +toShard = shards[0]; +runCommandDuringTransferMods(st.s, staticMongod, ns, docChunkBounds[1], fromShard, toShard, () => { + runCommandInTxn((session) => { + let sessionDB = session.getDatabase(dbName); + assert.commandWorked(sessionDB.runCommand( + {findAndModify: collName, query: {x: -1}, update: {$set: {y: 1}}})); + }); +}); + +// Check that the doc is updated correctly. +assert.eq(1, testDB.user.find({x: -1, y: 1}).count()); +assert.eq(1, shards[0].getCollection(ns).count({y: 1})); +assert.eq(0, shards[1].getCollection(ns).count({y: 1})); +assert.eq(0, shards[2].getCollection(ns).count({y: 1})); + +jsTest.log("Test 'remove'"); +// Remove the doc while migrating the chunk. +fromShard = shards[0]; +toShard = shards[1]; +runCommandDuringTransferMods(st.s, staticMongod, ns, docChunkBounds[1], fromShard, toShard, () => { + runCommandInTxn((session) => { + let sessionColl = session.getDatabase(dbName).getCollection(collName); + assert.commandWorked(sessionColl.remove({x: -1})); + }); +}); + +// Check that the doc is removed correctly. +assert.eq(2, testDB.user.find({}).count()); +assert.eq(1, shards[0].getCollection(ns).find({}).count()); +assert.eq(0, shards[1].getCollection(ns).find({}).count()); +assert.eq(1, shards[2].getCollection(ns).find({}).count()); + +st.stop(); +MongoRunner.stopMongod(staticMongod); +})(); diff --git a/jstests/sharding/libs/chunk_bounds_util.js b/jstests/sharding/libs/chunk_bounds_util.js new file mode 100644 index 00000000000..79b30a8d026 --- /dev/null +++ b/jstests/sharding/libs/chunk_bounds_util.js @@ -0,0 +1,74 @@ +/* + * Utilities for dealing with chunk bounds. + */ +var chunkBoundsUtil = (function() { + let _gte = function(shardKeyA, shardKeyB) { + return bsonWoCompare(shardKeyA, shardKeyB) >= 0; + }; + + let _lt = function(shardKeyA, shardKeyB) { + return bsonWoCompare(shardKeyA, shardKeyB) < 0; + }; + + let containsKey = function(shardKey, minKey, maxKey) { + return _gte(shardKey, minKey) && _lt(shardKey, maxKey); + }; + + /* + * Returns a object mapping each shard name to an array of chunk bounds + * that it owns. + */ + let findShardChunkBounds = function(chunkDocs) { + let allBounds = {}; + for (let chunkDoc of chunkDocs) { + let bounds = [chunkDoc.min, chunkDoc.max]; + + if (!(chunkDoc.shard in allBounds)) { + allBounds[chunkDoc.shard] = [bounds]; + } else { + allBounds[chunkDoc.shard].push(bounds); + } + } + return allBounds; + }; + + /* + * Returns the corresponding shard object for the given shard name. + */ + let _getShard = function(st, shardName) { + for (let i = 0; i < st._connections.length; i++) { + if (st._connections[i].shardName == shardName) { + return st._connections[i]; + } + } + }; + + /* + * Returns the shard object for the shard that owns the chunk that contains + * the given shard key value and the bounds of the chunk. + */ + let findShardAndChunkBoundsForShardKey = function(st, shardChunkBounds, shardKey) { + for (const [shardName, chunkBounds] of Object.entries(shardChunkBounds)) { + for (let bounds of chunkBounds) { + if (containsKey(shardKey, bounds[0], bounds[1])) { + return {shard: _getShard(st, shardName), bounds: bounds}; + } + } + } + }; + + /* + * Returns the shard object for the shard that owns the chunk that contains + * the given shard key value. + */ + let findShardForShardKey = function(st, shardChunkBounds, shardKey) { + return findShardAndChunkBoundsForShardKey(st, shardChunkBounds, shardKey).shard; + }; + + return { + containsKey, + findShardChunkBounds, + findShardAndChunkBoundsForShardKey, + findShardForShardKey + }; +})(); |