diff options
author | Greg Studer <greg@10gen.com> | 2012-06-11 12:58:46 -0400 |
---|---|---|
committer | Greg Studer <greg@10gen.com> | 2012-06-11 16:25:23 -0400 |
commit | 11da8181ead847d3a48e7953a18bdb4e1f347ae9 (patch) | |
tree | b08335d2e7959146e8851bfc8ef0cea5358d6584 /jstests | |
parent | d27fa2fd6b121916a804cd61cbde806fb6a22e8a (diff) | |
download | mongo-11da8181ead847d3a48e7953a18bdb4e1f347ae9.tar.gz |
SERVER-4476 reload shard data before migration starts
Diffstat (limited to 'jstests')
-rw-r--r-- | jstests/sharding/addshard5.js | 63 |
1 files changed, 63 insertions, 0 deletions
diff --git a/jstests/sharding/addshard5.js b/jstests/sharding/addshard5.js new file mode 100644 index 00000000000..b272ae4ab1e --- /dev/null +++ b/jstests/sharding/addshard5.js @@ -0,0 +1,63 @@ +// +// Tests that dropping and re-adding a shard with the same name to a cluster doesn't mess up +// migrations +// + +var st = new ShardingTest({ shards : 3, mongos : 1, other : { separateConfig : true } }) +st.stopBalancer() + +var mongos = st.s +var admin = mongos.getDB( "admin" ) +var config = mongos.getDB( "config" ) +var coll = mongos.getCollection( "foo.bar" ) + +// Get all the shard info and connections +var shards = [] +config.shards.find().sort({ _id : 1 }).forEach( function( doc ){ + shards.push( Object.merge( doc, { conn : new Mongo( doc.host ) } ) ) +}) + +// +// Remove the last shard so we can use it later +// + +// Drain & remove +printjson( admin.runCommand({ removeShard : shards[2]._id }) ) +printjson( admin.runCommand({ removeShard : shards[2]._id }) ) + +// Shard collection +printjson( admin.runCommand({ enableSharding : coll.getDB() + "" }) ) +// Just to be sure what primary we start from +printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) ) +printjson( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }) ) + +// Insert one document +coll.insert({ hello : "world" }) +assert.eq( null, coll.getDB().getLastError() ) + +// Migrate the collection to and from shard2 so shard1 loads the shard2 host +printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : shards[1]._id }) ) +printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : shards[0]._id }) ) + +// +// Drop and re-add shard with last shard's host +// + +printjson( admin.runCommand({ removeShard : shards[1]._id }) ) +printjson( admin.runCommand({ removeShard : shards[1]._id }) ) +printjson( admin.runCommand({ addShard : shards[2].host, name : shards[1]._id }) ) + +jsTest.log( "Shard was dropped and re-added with same name..." ) +st.printShardingStatus() + +shards[0].conn.getDB( "admin" ).runCommand({ setParameter : 1, traceExceptions : true }) +shards[2].conn.getDB( "admin" ).runCommand({ setParameter : 1, traceExceptions : true }) + +// Try a migration +printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : shards[1]._id }) ) + +assert.neq( null, shards[2].conn.getCollection( coll + "" ).findOne() ) + +st.stop() + + |