diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2015-12-30 17:01:13 -0500 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2016-01-13 14:43:36 -0500 |
commit | ce0f41c5ace60ba89e55c5244cdaf79c5d78c503 (patch) | |
tree | 5b058db6ebc16648e9d2457834dd29126549e895 /jstests/sharding | |
parent | 8652b39ffa98d938b60609f76a4ce2f7b73faad8 (diff) | |
download | mongo-ce0f41c5ace60ba89e55c5244cdaf79c5d78c503.tar.gz |
SERVER-22027 Sharding should not retry killed operations
This change introduces a different interruption code (11602) which will be
used to kill operations during replication primary stepdown so the config
server retry logic can differentiate them from user-killed operations.
Diffstat (limited to 'jstests/sharding')
-rw-r--r-- | jstests/sharding/auth_add_shard.js | 86 | ||||
-rw-r--r-- | jstests/sharding/features3.js | 33 | ||||
-rw-r--r-- | jstests/sharding/mongos_rs_shard_failure_tolerance.js | 9 |
3 files changed, 63 insertions, 65 deletions
diff --git a/jstests/sharding/auth_add_shard.js b/jstests/sharding/auth_add_shard.js index a20f34034cd..ef2e5dfa760 100644 --- a/jstests/sharding/auth_add_shard.js +++ b/jstests/sharding/auth_add_shard.js @@ -1,8 +1,8 @@ - -/* SERVER-5124 - * The puporse of this test is to test authentication when adding/removing a shard - * The test sets up a sharded system, then adds/remove a shard. - */ +// SERVER-5124 +// The puporse of this test is to test authentication when adding/removing a shard. The test sets +// up a sharded system, then adds/removes a shard. +(function() { +'use strict'; // login method to login into the database function login(userObj) { @@ -11,89 +11,89 @@ function login(userObj) { } // admin user object -adminUser = { db : "admin", username : "foo", password : "bar" }; +var adminUser = { db: "admin", username: "foo", password: "bar" }; //set up a 2 shard cluster with keyfile -var st = new ShardingTest( { name : "auth_add_shard1", shards : 1, - mongos : 1, keyFile : "jstests/libs/key1" } ) +var st = new ShardingTest({ name: "auth_add_shard1", shards: 1, + mongos: 1, keyFile: "jstests/libs/key1" }); -var mongos = st.s0 -var admin = mongos.getDB("admin") +var mongos = st.s0; +var admin = mongos.getDB("admin"); print("1 shard system setup"); //add the admin user print("adding user"); -mongos.getDB(adminUser.db).createUser({user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles}); +mongos.getDB(adminUser.db).createUser({ user: adminUser.username, + pwd: adminUser.password, + roles: jsTest.adminUserRoles}); //login as admin user login(adminUser); -st.stopBalancer(); -assert.eq( 1, st.config.shards.count() , "initial server count wrong" ); +assert.eq(1, st.config.shards.count() , "initial server count wrong"); //start a mongod with NO keyfile var conn = MongoRunner.runMongod({}); -print (conn); +print(conn); // --------------- Test 1 -------------------- -// Add shard to the existing cluster -var result = admin.runCommand( {addShard : conn.host} ); -printjson(result); -// make sure the shard wasn't added -assert.eq(result.ok, 0, "added shard without keyfile"); +// Add shard to the existing cluster (should fail because it was added without a keyfile) +printjson(assert.commandFailed(admin.runCommand({ addShard: conn.host }))); + // stop mongod -MongoRunner.stopMongod( conn ); +MongoRunner.stopMongod(conn); //--------------- Test 2 -------------------- //start mongod again, this time with keyfile -var conn = MongoRunner.runMongod( {keyFile : "jstests/libs/key1"} ); +var conn = MongoRunner.runMongod({keyFile: "jstests/libs/key1"}); //try adding the new shard -var result = admin.runCommand( {addShard : conn.host} ); -printjson(result); -//make sure the shard was added successfully -assert.eq(result.ok, 1, "failed to add shard with keyfile"); +printjson(assert.commandWorked(admin.runCommand({ addShard: conn.host }))); //Add some data var db = mongos.getDB("foo"); -var collA = mongos.getCollection("foo.bar") +var collA = mongos.getCollection("foo.bar"); // enable sharding on a collection -printjson( admin.runCommand( { enableSharding : "" + collA.getDB() } ) ) -printjson( admin.runCommand( { movePrimary : "foo", to : "shard0000" } ) ); +assert.commandWorked(admin.runCommand({ enableSharding: "" + collA.getDB() })); +st.ensurePrimaryShard("foo", "shard0000"); -admin.runCommand( { shardCollection : "" + collA, key : { _id : 1 } } ) +assert.commandWorked(admin.runCommand({ shardCollection: "" + collA, key: { _id: 1 } })); // add data to the sharded collection -for (i=0; i<4; i++) { - db.bar.save({_id:i}); - printjson(admin.runCommand( { split : "" + collA, middle : { _id : i } }) ) +for (var i = 0; i < 4; i++) { + db.bar.save({ _id: i }); + assert.commandWorked(admin.runCommand({ split: "" + collA, middle: { _id: i } })); } + // move a chunk -printjson( admin.runCommand( { moveChunk : "foo.bar", find : { _id : 1 }, to : "shard0001" }) ) +assert.commandWorked(admin.runCommand({ moveChunk: "foo.bar", find: { _id: 1 }, to: "shard0001" })); //verify the chunk was moved -admin.runCommand( { flushRouterConfig : 1 } ) -var config = mongos.getDB("config") -config.printShardingStatus(true) +admin.runCommand({ flushRouterConfig: 1 }); + +var config = mongos.getDB("config"); +config.printShardingStatus(true); // start balancer before removing the shard st.startBalancer(); //--------------- Test 3 -------------------- // now drain the shard -var result = admin.runCommand( {removeShard : conn.host} ); -printjson(result); -assert.eq(result.ok, 1, "failed to start draining shard"); +printjson(assert.commandWorked(admin.runCommand({removeShard: conn.host}))); // give it some time to drain assert.soon(function() { - var result = admin.runCommand( {removeShard : conn.host} ); + var result = admin.runCommand({removeShard: conn.host}); printjson(result); - return result.ok && result.state == "completed" + + return result.ok && result.state == "completed"; }, "failed to drain shard completely", 5 * 60 * 1000) -assert.eq( 1, st.config.shards.count() , "removed server still appears in count" ); +assert.eq(1, st.config.shards.count() , "removed server still appears in count"); + +MongoRunner.stopMongod(conn); -MongoRunner.stopMongod( conn ); st.stop(); + +})(); diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js index 84c857e644c..03a5cedc25a 100644 --- a/jstests/sharding/features3.js +++ b/jstests/sharding/features3.js @@ -29,8 +29,9 @@ s.startBalancer(); // insert 10k small documents into the sharded collection var bulk = dbForTest.foo.initializeUnorderedBulkOp(); -for (i = 0; i < numDocs; i++) +for (var i = 0; i < numDocs; i++) { bulk.insert({ _id: i }); +} assert.writeOK(bulk.execute()); var x = dbForTest.foo.stats(); @@ -53,12 +54,10 @@ assert(!x.sharded, "XXX3: " + tojson(x)); // fork shell and start querying the data var start = new Date(); -// TODO: Still potential problem when our sampling of current ops misses when $where is active - -// solution is to increase sleep time -var whereKillSleepTime = 10000; +var whereKillSleepTime = 1000; var parallelCommand = "db.foo.find(function() { " + - " sleep( " + whereKillSleepTime + " ); " + + " sleep(" + whereKillSleepTime + "); " + " return false; " + "}).itcount(); "; @@ -68,17 +67,22 @@ var awaitShell = startParallelShell(parallelCommand, s.s.port); print("done forking shell at: " + Date()); // Get all current $where operations -function getMine(printInprog) { +function getInProgWhereOps() { var inprog = dbForTest.currentOp().inprog; - if (printInprog) - printjson(inprog); // Find all the where queries var myProcs = []; - for (var x = 0; x < inprog.length; x++) { - if (inprog[x].query && inprog[x].query.filter && inprog[x].query.filter.$where) { - myProcs.push(inprog[x]); + inprog.forEach(function(op) { + if (op.query && op.query.filter && op.query.filter.$where) { + myProcs.push(op); } + }); + + if (myProcs.length == 0) { + print('No $where operations found: ' + tojson(inprog)); + } + else { + print('Found ' + myProcs.length + ' $where operations: ' + tojson(myProcs)); } return myProcs; @@ -86,16 +90,11 @@ function getMine(printInprog) { var curOpState = 0; // 0 = not found, 1 = killed var killTime = null; -var i = 0; var mine; assert.soon(function() { // Get all the current operations - mine = getMine(true); // SERVER-8794: print all operations - - // get curren tops, but only print out operations before we see a $where op has started - // mine = getMine(curOpState == 0 && i > 20); - i++; + mine = getInProgWhereOps(); // Wait for the queries to start (one per shard, so 2 total) if (curOpState == 0 && mine.length == 2) { diff --git a/jstests/sharding/mongos_rs_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_shard_failure_tolerance.js index 6cd99a1bbc3..b5117439925 100644 --- a/jstests/sharding/mongos_rs_shard_failure_tolerance.js +++ b/jstests/sharding/mongos_rs_shard_failure_tolerance.js @@ -10,11 +10,10 @@ // sequence), idle (connection is connected but not used before a shard change), and new // (connection connected after shard change). // +(function() { +'use strict'; -var options = {rs : true, rsOptions : { nodes : 2 }}; - -var st = new ShardingTest({shards : 3, mongos : 1, other : options}); -st.stopBalancer(); +var st = new ShardingTest({ shards: 3, mongos: 1, other: { rs: true, rsOptions: { nodes: 2 } } }); var mongos = st.s0; var admin = mongos.getDB( "admin" ); @@ -412,6 +411,6 @@ assert.writeError(mongosConnNew.getCollection( collUnsharded.toString() ).insert gc(); // Clean up new connections -jsTest.log("DONE!"); st.stop(); +})(); |