summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSpencer T Brody <spencer@mongodb.com>2015-10-23 11:26:05 -0400
committerSpencer T Brody <spencer@mongodb.com>2015-10-23 15:51:03 -0400
commit3887f0f51467fe97060bdfdc7e1021d54a73467a (patch)
treee4054eb8a8305cf4bf85cd6aa2016c2c24e213bf
parentb314afeac81e704e60743363ba7ab83129d2ba68 (diff)
downloadmongo-3887f0f51467fe97060bdfdc7e1021d54a73467a.tar.gz
SERVER-21098 Clean up some sharding jstests to allow them to run under both CSRS and SCCC mode
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_auth.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_auth_audit.yml2
-rw-r--r--jstests/sharding/bad_config_load.js (renamed from jstests/sharding/replset_config/bad_config_load.js)21
-rw-r--r--jstests/sharding/count_config_servers.js (renamed from jstests/sharding/sync_cluster_config/sync_conn_cmd.js)29
-rw-r--r--jstests/sharding/parallel.js (renamed from jstests/sharding/sync_cluster_config/parallel.js)33
-rw-r--r--jstests/sharding/repl_monitor_refresh.js (renamed from jstests/sharding/sync_cluster_config/repl_monitor_refresh.js)6
-rw-r--r--jstests/sharding/replset_config/batch_write_command_sharded.js250
-rw-r--r--jstests/sharding/replset_config/config_rs_no_primary.js4
-rw-r--r--jstests/sharding/rs_stepdown_and_pooling.js (renamed from jstests/sharding/sync_cluster_config/rs_stepdown_and_pooling.js)7
-rw-r--r--jstests/sharding/sync_cluster_config/batch_write_command_sharded.js4
-rw-r--r--src/mongo/shell/replsettest.js10
11 files changed, 316 insertions, 52 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_auth.yml b/buildscripts/resmokeconfig/suites/sharding_auth.yml
index 37fa3fc58cd..1e3b8f198a1 100644
--- a/buildscripts/resmokeconfig/suites/sharding_auth.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_auth.yml
@@ -18,7 +18,7 @@ selector:
- jstests/sharding/sync_cluster_config/*[aA]uth*.js
# Skip these additional tests when running with auth enabled.
- jstests/sharding/copydb_from_mongos.js # SERVER-13080
- - jstests/sharding/sync_cluster_config/parallel.js
+ - jstests/sharding/parallel.js
- jstests/sharding/sync_cluster_config/sync[36].js
executor:
diff --git a/buildscripts/resmokeconfig/suites/sharding_auth_audit.yml b/buildscripts/resmokeconfig/suites/sharding_auth_audit.yml
index 7dd52783512..f313ebc2ae6 100644
--- a/buildscripts/resmokeconfig/suites/sharding_auth_audit.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_auth_audit.yml
@@ -18,7 +18,7 @@ selector:
- jstests/sharding/sync_cluster_config/*[aA]uth*.js
# Skip these additional tests when running with auth enabled.
- jstests/sharding/copydb_from_mongos.js # SERVER-13080
- - jstests/sharding/sync_cluster_config/parallel.js
+ - jstests/sharding/parallel.js
- jstests/sharding/sync_cluster_config/sync[36].js
executor:
diff --git a/jstests/sharding/replset_config/bad_config_load.js b/jstests/sharding/bad_config_load.js
index 5f354eb7121..7c9ae6502a0 100644
--- a/jstests/sharding/replset_config/bad_config_load.js
+++ b/jstests/sharding/bad_config_load.js
@@ -1,22 +1,15 @@
//
// Test for what happens when config servers are down and the database config is loaded
// Should fail sanely
-// Note: Test uses only 2.0 compatible features to make backport easier.
//
+(function() {
+"use strict";
-var st = new ShardingTest({ shards : 2, mongos : 1, sync : false })
+var st = new ShardingTest({ shards : 1, mongos : 1 })
var mongos = st.s
var coll = mongos.getCollection( "foo.bar" )
-mongos.getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 })
-mongos.getDB( "admin" ).runCommand({ movePrimary : coll.getDB() + "", to : "shard0001" })
-
-// Need to start two shards and remove one (which is also the config server) b/c 2.0 branch
-// ShardingTest annoyingly doesn't have non-replica set separateConfig options
-mongos.getDB( "admin" ).runCommand({ removeShard : "shard0000" })
-mongos.getDB( "admin" ).runCommand({ removeShard : "shard0000" })
-
// Make sure mongos has no database info currently loaded
mongos.getDB( "admin" ).runCommand({ flushRouterConfig : 1 })
@@ -24,7 +17,9 @@ jsTestLog( "Setup complete!" )
st.printShardingStatus()
jsTestLog( "Stopping config servers" );
-st.configRS.stopSet();
+for (var i = 0; i < st._configServers.length; i++) {
+ MongoRunner.stopMongod(st._configServers[i]);
+}
jsTestLog( "Config flushed and config servers down!" )
@@ -40,7 +35,8 @@ for( var i = 0; i < 2; i++ ){
printjson( e )
// Make sure we get a transport error, and not a no-primary error
- assert(e.code == 10276 || // Transport error
+ assert(e.code == 8002 || // All servers down/unreachable in SyncClusterConnection
+ e.code == 10276 || // Transport error
e.code == 13328 || // Connect error
e.code == 13639 || // Connect error to replSet primary
e.code == ErrorCodes.HostUnreachable ||
@@ -52,3 +48,4 @@ for( var i = 0; i < 2; i++ ){
jsTestLog( "Done!" )
st.stop()
+}()); \ No newline at end of file
diff --git a/jstests/sharding/sync_cluster_config/sync_conn_cmd.js b/jstests/sharding/count_config_servers.js
index 5bf44cb5969..86517073336 100644
--- a/jstests/sharding/sync_cluster_config/sync_conn_cmd.js
+++ b/jstests/sharding/count_config_servers.js
@@ -1,13 +1,13 @@
-// This test fails when run with authentication due to SERVER-6327
/**
- * Test SyncClusterConnection commands using call instead of findOne
+ * Test count commands against the config servers, including when some of them are down.
+ * This test fails when run with authentication due to SERVER-6327
*/
-
(function() {
+"use strict";
+
+var st = new ShardingTest({name: 'sync_conn_cmd', shards: 0});
+st.s.setSlaveOk(true);
-var st = new ShardingTest({ name: 'sync_conn_cmd',
- shards: 0,
- other: { sync: true }});
var configDB = st.config;
var coll = configDB.test;
@@ -15,6 +15,11 @@ for( var x = 0; x < 10; x++ ){
assert.writeOK(coll.insert({ v: x }));
}
+if (st.configRS) {
+ // Make sure the inserts are replicated to all config servers.
+ st.configRS.awaitReplication();
+}
+
var testNormalCount = function(){
var cmdRes = configDB.runCommand({ count: coll.getName() });
assert( cmdRes.ok );
@@ -40,18 +45,14 @@ testCountWithQuery();
testInvalidCount();
// Test with the first config server down
-var firstConfigOpts = st.c0.commandLine;
-MongoRunner.stopMongod( firstConfigOpts.port );
+MongoRunner.stopMongod(st.c0);
testNormalCount();
testCountWithQuery();
testInvalidCount();
-firstConfigOpts.restart = true;
-MongoRunner.runMongod( firstConfigOpts );
-
-// Test with the second config server down
-MongoRunner.stopMongod( st.c1.commandLine.port );
+// Test with the first and second config server down
+MongoRunner.stopMongod(st.c1);
jsTest.log( 'Second server is down' );
testNormalCount();
@@ -60,4 +61,4 @@ testInvalidCount();
st.stop();
-})();
+}());
diff --git a/jstests/sharding/sync_cluster_config/parallel.js b/jstests/sharding/parallel.js
index facc29ea361..e4c2b462851 100644
--- a/jstests/sharding/sync_cluster_config/parallel.js
+++ b/jstests/sharding/parallel.js
@@ -1,23 +1,23 @@
// This test fails when run with authentication because benchRun with auth is broken: SERVER-6388
-var numShards = 3
+(function() {
+"use strict";
-var s = new ShardingTest({ name: "parallel",
- shards: numShards,
- mongos: 2,
- verbose: 1,
- other: { sync : true } });
+var numShards = 3
+var s = new ShardingTest({name: "parallel", shards: numShards, mongos: 2});
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
-db = s.getDB( "test" );
+var db = s.getDB( "test" );
-N = 10000;
+var N = 10000;
-for ( i=0; i<N; i+=(N/12) ) {
- s.adminCommand( { split : "test.foo" , middle : { _id : i } } )
- sh.moveChunk( "test.foo", { _id : i } , "shard000" + Math.floor( Math.random() * numShards ) )
+for (var i=0; i<N; i+=(N/12)) {
+ s.adminCommand({split: "test.foo", middle: {_id: i}});
+ s.s.getDB('admin').runCommand({moveChunk: "test.foo",
+ find: {_id: i},
+ to: "shard000" + Math.floor(Math.random() * numShards)});
}
s.setBalancer( true )
@@ -26,11 +26,11 @@ for ( i=0; i<N; i++ )
bulk.insert({ _id: i });
assert.writeOK(bulk.execute());
-doCommand = function( dbname , cmd ) {
- x = benchRun( { ops : [ { op : "findOne" , ns : dbname + ".$cmd" , query : cmd } ] ,
+var doCommand = function( dbname , cmd ) {
+ x = benchRun( { ops : [ { op : "findOne" , ns : dbname + ".$cmd" , query : cmd } ] ,
host : db.getMongo().host , parallel : 2 , seconds : 2 } )
printjson(x)
- x = benchRun( { ops : [ { op : "findOne" , ns : dbname + ".$cmd" , query : cmd } ] ,
+ x = benchRun( { ops : [ { op : "findOne" , ns : dbname + ".$cmd" , query : cmd } ] ,
host : s._mongos[1].host , parallel : 2 , seconds : 2 } )
printjson(x)
}
@@ -38,8 +38,9 @@ doCommand = function( dbname , cmd ) {
doCommand( "test" , { dbstats : 1 } )
doCommand( "config" , { dbstats : 1 } )
-x = s.getDB( "config" ).stats()
+var x = s.getDB( "config" ).stats()
assert( x.ok , tojson(x) )
printjson(x)
s.stop()
+}()); \ No newline at end of file
diff --git a/jstests/sharding/sync_cluster_config/repl_monitor_refresh.js b/jstests/sharding/repl_monitor_refresh.js
index ba25c9c5234..6767a165d9d 100644
--- a/jstests/sharding/sync_cluster_config/repl_monitor_refresh.js
+++ b/jstests/sharding/repl_monitor_refresh.js
@@ -2,10 +2,11 @@
* Test for making sure that the replica seed list in the config server does not
* become invalid when a replica set reconfig happens.
*/
+(function() {
+"use strict";
var NODE_COUNT = 3;
-var st = new ShardingTest({ shards: { rs0: { nodes: NODE_COUNT, oplogSize: 10 }},
- config : 3, sync: true });
+var st = new ShardingTest({ shards: { rs0: { nodes: NODE_COUNT, oplogSize: 10 }}});
var replTest = st.rs0;
var mongos = st.s;
@@ -71,3 +72,4 @@ assert.soon(function() {
st.stop();
+}()); \ No newline at end of file
diff --git a/jstests/sharding/replset_config/batch_write_command_sharded.js b/jstests/sharding/replset_config/batch_write_command_sharded.js
new file mode 100644
index 00000000000..2b88228477b
--- /dev/null
+++ b/jstests/sharding/replset_config/batch_write_command_sharded.js
@@ -0,0 +1,250 @@
+//
+// Tests sharding-related batch write protocol functionality
+// NOTE: Basic write functionality is tested via the passthrough tests, this file should contain
+// *only* mongos-specific tests.
+//
+(function() {
+"use strict";
+
+// Only reason for using localhost name is to make the test consistent with naming host so it
+// will be easier to check for the host name inside error objects.
+var options = {useHostname: false};
+var st = new ShardingTest({shards: 2, mongos: 1, config: 3, other: options});
+st.stopBalancer();
+
+var mongos = st.s0;
+var admin = mongos.getDB( "admin" );
+var config = mongos.getDB( "config" );
+var shards = config.shards.find().toArray();
+var configConnStr = st._configDB;
+
+jsTest.log("Starting sharding batch write tests...");
+
+var request;
+var result;
+
+// NOTE: ALL TESTS BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
+
+//
+//
+// Mongos _id autogeneration tests for sharded collections
+
+var coll = mongos.getCollection("foo.bar");
+assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB().toString() }));
+st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
+assert.commandWorked(admin.runCommand({ shardCollection : coll.toString(),
+ key : { _id : 1 } }));
+
+//
+// Basic insert no _id
+coll.remove({});
+printjson( request = {insert : coll.getName(),
+ documents: [{ a : 1 }] } );
+printjson( result = coll.runCommand(request) );
+assert(result.ok);
+assert.eq(1, result.n);
+assert.eq(1, coll.count());
+
+//
+// Multi insert some _ids
+coll.remove({});
+printjson( request = {insert : coll.getName(),
+ documents: [{ _id : 0, a : 1 }, { a : 2 }] } );
+printjson( result = coll.runCommand(request) );
+assert(result.ok);
+assert.eq(2, result.n);
+assert.eq(2, coll.count());
+assert.eq(1, coll.count({ _id : 0 }));
+
+//
+// Ensure generating many _ids don't push us over limits
+var maxDocSize = (16 * 1024 * 1024) / 1000;
+var baseDocSize = Object.bsonsize({ a : 1, data : "" });
+var dataSize = maxDocSize - baseDocSize;
+
+var data = "";
+for (var i = 0; i < dataSize; i++)
+ data += "x";
+
+var documents = [];
+for (var i = 0; i < 1000; i++) documents.push({ a : i, data : data });
+
+assert.commandWorked(coll.getMongo().getDB("admin").runCommand({ setParameter : 1, logLevel : 4 }));
+coll.remove({});
+request = { insert : coll.getName(),
+ documents: documents };
+printjson( result = coll.runCommand(request) );
+assert(result.ok);
+assert.eq(1000, result.n);
+assert.eq(1000, coll.count());
+
+//
+//
+// Config server upserts (against admin db, for example) require _id test
+var adminColl = admin.getCollection(coll.getName());
+
+//
+// Without _id
+adminColl.remove({});
+printjson( request = {update : adminColl.getName(),
+ updates : [{ q : { a : 1 }, u : { a : 1 }, upsert : true }]});
+var result = adminColl.runCommand(request);
+assert.commandWorked(result);
+assert.eq(1, result.n);
+assert.eq(1, adminColl.count());
+
+//
+// With _id
+adminColl.remove({});
+printjson( request = {update : adminColl.getName(),
+ updates : [{ q : { _id : 1, a : 1 }, u : { a : 1 }, upsert : true }]});
+assert.commandWorked(adminColl.runCommand(request));
+assert.eq(1, result.n);
+assert.eq(1, adminColl.count());
+
+//
+//
+// Stale config progress tests
+// Set up a new collection across two shards, then revert the chunks to an earlier state to put
+// mongos and mongod permanently out of sync.
+
+// START SETUP
+var brokenColl = mongos.getCollection( "broken.coll" );
+assert.commandWorked(admin.runCommand({ enableSharding : brokenColl.getDB().toString() }));
+printjson(admin.runCommand({ movePrimary : brokenColl.getDB().toString(), to : shards[0]._id }));
+assert.commandWorked(admin.runCommand({ shardCollection : brokenColl.toString(),
+ key : { _id : 1 } }));
+assert.commandWorked(admin.runCommand({ split : brokenColl.toString(),
+ middle : { _id : 0 } }));
+
+var oldChunks = config.chunks.find().toArray();
+
+// Start a new mongos and bring it up-to-date with the chunks so far
+
+var staleMongos = MongoRunner.runMongos({ configdb : configConnStr });
+brokenColl = staleMongos.getCollection(brokenColl.toString());
+assert.writeOK(brokenColl.insert({ hello : "world" }));
+
+// Modify the chunks to make shards at a higher version
+
+assert.commandWorked(admin.runCommand({ moveChunk : brokenColl.toString(),
+ find : { _id : 0 },
+ to : shards[1]._id }));
+
+// Rewrite the old chunks back to the config server
+
+assert.writeOK(config.chunks.remove({}));
+for ( var i = 0; i < oldChunks.length; i++ )
+ assert.writeOK(config.chunks.insert(oldChunks[i]));
+
+// Stale mongos can no longer bring itself up-to-date!
+// END SETUP
+
+//
+// Config server insert, repeatedly stale
+printjson( request = {insert : brokenColl.getName(),
+ documents: [{_id:-1}]} );
+printjson( result = brokenColl.runCommand(request) );
+assert(result.ok);
+assert.eq(0, result.n);
+assert.eq(1, result.writeErrors.length);
+assert.eq(0, result.writeErrors[0].index);
+assert.eq(result.writeErrors[0].code, 82); // No Progress Made
+
+//
+// Config server insert to other shard, repeatedly stale
+printjson( request = {insert : brokenColl.getName(),
+ documents: [{_id:1}]} );
+printjson( result = brokenColl.runCommand(request) );
+assert(result.ok);
+assert.eq(0, result.n);
+assert.eq(1, result.writeErrors.length);
+assert.eq(0, result.writeErrors[0].index);
+assert.eq(result.writeErrors[0].code, 82); // No Progress Made
+
+//
+//
+// Tests against config server
+var configColl = config.getCollection( "batch_write_protocol_sharded" );
+
+//
+// Basic config server insert
+configColl.remove({});
+printjson( request = {insert : configColl.getName(),
+ documents: [{a:1}]} );
+var result = configColl.runCommand(request);
+assert.commandWorked(result);
+assert.eq(1, result.n);
+
+st.configRS.awaitReplication();
+assert.eq(1, st.config0.getCollection(configColl + "").count());
+assert.eq(1, st.config1.getCollection(configColl + "").count());
+assert.eq(1, st.config2.getCollection(configColl + "").count());
+
+//
+// Basic config server update
+configColl.remove({});
+configColl.insert({a:1});
+printjson( request = {update : configColl.getName(),
+ updates: [{q: {a:1}, u: {$set: {b:2}}}]} );
+printjson( result = configColl.runCommand(request) );
+assert(result.ok);
+assert.eq(1, result.n);
+
+st.configRS.awaitReplication();
+assert.eq(1, st.config0.getCollection(configColl + "").count({b:2}));
+assert.eq(1, st.config1.getCollection(configColl + "").count({b:2}));
+assert.eq(1, st.config2.getCollection(configColl + "").count({b:2}));
+
+//
+// Basic config server delete
+configColl.remove({});
+configColl.insert({a:1});
+printjson( request = {'delete' : configColl.getName(),
+ deletes: [{q: {a:1}, limit: 0}]} );
+printjson( result = configColl.runCommand(request) );
+assert(result.ok);
+assert.eq(1, result.n);
+
+st.configRS.awaitReplication();
+assert.eq(0, st.config0.getCollection(configColl + "").count());
+assert.eq(0, st.config1.getCollection(configColl + "").count());
+assert.eq(0, st.config2.getCollection(configColl + "").count());
+
+MongoRunner.stopMongod(st.config1);
+MongoRunner.stopMongod(st.config2);
+st.configRS.awaitNoPrimary();
+
+// Config server insert with no config PRIMARY
+configColl.remove({});
+printjson( request = {insert : configColl.getName(),
+ documents: [{a:1}]} );
+printjson( result = configColl.runCommand(request) );
+assert(!result.ok);
+assert(result.errmsg != null);
+
+
+// Config server insert with no config PRIMARY
+configColl.remove({});
+configColl.insert({a:1});
+printjson( request = {update : configColl.getName(),
+ updates: [{q: {a:1}, u: {$set: {b:2}}}]} );
+printjson( result = configColl.runCommand(request) );
+assert(!result.ok);
+assert(result.errmsg != null);
+
+// Config server insert with no config PRIMARY
+configColl.remove({});
+configColl.insert({a:1});
+printjson( request = {delete : configColl.getName(),
+ deletes: [{q: {a:1}, limit: 0}]} );
+printjson( result = configColl.runCommand(request) );
+assert(!result.ok);
+assert(result.errmsg != null);
+
+jsTest.log("DONE!");
+
+MongoRunner.stopMongos( staleMongos );
+st.stop();
+
+}()); \ No newline at end of file
diff --git a/jstests/sharding/replset_config/config_rs_no_primary.js b/jstests/sharding/replset_config/config_rs_no_primary.js
index 4a67f865249..110320ded60 100644
--- a/jstests/sharding/replset_config/config_rs_no_primary.js
+++ b/jstests/sharding/replset_config/config_rs_no_primary.js
@@ -16,9 +16,7 @@ st.s.getDB('test').foo.insert({a:1});
// Take down two of the config servers so the remaining one goes into SECONDARY state.
st.configRS.stop(1);
st.configRS.stop(2);
-assert.soon(function() {
- return st.configRS.callIsMaster() == false;
- }, "Timed out waiting for there to be no primary in config replset");
+st.configRS.awaitNoPrimary();
jsTestLog("Starting a new mongos when the config servers have no primary which should work");
var mongos2 = MongoRunner.runMongos({configdb: st.configRS.getURL()});
diff --git a/jstests/sharding/sync_cluster_config/rs_stepdown_and_pooling.js b/jstests/sharding/rs_stepdown_and_pooling.js
index d40cbf25e21..c66e5037825 100644
--- a/jstests/sharding/sync_cluster_config/rs_stepdown_and_pooling.js
+++ b/jstests/sharding/rs_stepdown_and_pooling.js
@@ -1,10 +1,10 @@
//
// Tests what happens when a replica set primary goes down with pooled connections.
//
+(function() {
+"use strict";
-var options = {sync : true, mongosOptions : { verbose : 2 }};
-
-var st = new ShardingTest({shards : {rs0 : {nodes : 2}}, mongos : 1, other : options});
+var st = new ShardingTest({shards : {rs0 : {nodes : 2}}, mongos : 1});
// Stop balancer to eliminate weird conn stuff
st.stopBalancer();
@@ -113,3 +113,4 @@ else {
jsTest.log("DONE!");
st.stop();
+}()); \ No newline at end of file
diff --git a/jstests/sharding/sync_cluster_config/batch_write_command_sharded.js b/jstests/sharding/sync_cluster_config/batch_write_command_sharded.js
index a7aa05c0b4e..759aa686b55 100644
--- a/jstests/sharding/sync_cluster_config/batch_write_command_sharded.js
+++ b/jstests/sharding/sync_cluster_config/batch_write_command_sharded.js
@@ -3,6 +3,8 @@
// NOTE: Basic write functionality is tested via the passthrough tests, this file should contain
// *only* mongos-specific tests.
//
+(function() {
+"use strict";
// Only reason for using localhost name is to make the test consistent with naming host so it
// will be easier to check for the host name inside error objects.
@@ -271,3 +273,5 @@ jsTest.log("DONE!");
MongoRunner.stopMongos( staleMongos );
st.stop();
+
+}()); \ No newline at end of file
diff --git a/src/mongo/shell/replsettest.js b/src/mongo/shell/replsettest.js
index 360feaceee1..33cf7f87895 100644
--- a/src/mongo/shell/replsettest.js
+++ b/src/mongo/shell/replsettest.js
@@ -406,6 +406,16 @@ ReplSetTest.prototype.getMaster = function( timeout ) {
ReplSetTest.prototype.getPrimary = ReplSetTest.prototype.getMaster
+ReplSetTest.prototype.awaitNoPrimary = function(msg, timeout) {
+ msg = msg || "Timed out waiting for there to be no primary in replset: " + this.name;
+ timeout = timeout || 30000;
+ var self = this;
+ assert.soon(function() {
+ return self.callIsMaster() == false;
+ }, msg, timeout);
+
+}
+
ReplSetTest.prototype.getSecondaries = function( timeout ){
var master = this.getMaster( timeout )
var secs = []