summaryrefslogtreecommitdiff
path: root/jstests/slow1
diff options
context:
space:
mode:
authorMathias Stearn <mathias@10gen.com>2014-03-27 16:43:39 -0400
committerMathias Stearn <mathias@10gen.com>2014-03-27 17:35:16 -0400
commit43d933a286bff0e98845368c84c10248388bc4a5 (patch)
tree5ccfb8cca98925cf5b9fadd55f3eea0504a209f0 /jstests/slow1
parentd0a1e84ab2fa1b6aa699721b5cb9a4f8d0bf3692 (diff)
downloadmongo-43d933a286bff0e98845368c84c10248388bc4a5.tar.gz
SERVER-13391 Move slowest tests from noPassthrough* (formerly slow{Weekly,Nightly}) to slow*
slow1 and slow2 are new suites to hold very slow tests. The idea is that we can create slow3, slow4, etc to keep the time to run the longest suite down.
Diffstat (limited to 'jstests/slow1')
-rw-r--r--jstests/slow1/large_role_chain.js37
-rw-r--r--jstests/slow1/memory.js42
-rw-r--r--jstests/slow1/replsets_priority1.js196
-rw-r--r--jstests/slow1/sharding_multiple_collections.js53
4 files changed, 328 insertions, 0 deletions
diff --git a/jstests/slow1/large_role_chain.js b/jstests/slow1/large_role_chain.js
new file mode 100644
index 00000000000..581db988be5
--- /dev/null
+++ b/jstests/slow1/large_role_chain.js
@@ -0,0 +1,37 @@
+// Tests SERVER-11475 - Make sure server does't crash when many user defined roles are created where
+// each role is a member of the next, creating a large chain.
+
+function runTest(conn) {
+
+ var testdb = conn.getDB("rolechain");
+ testdb.runCommand({dropAllRolesFromDatabase:1});
+ var chainLen = 2000;
+
+
+ jsTestLog("Generating a chain of " + chainLen + " linked roles");
+
+ var roleNameBase = "chainRole";
+ for (var i = 0; i < chainLen; i++) {
+ var name = roleNameBase + i;
+ if (i == 0) {
+ testdb.runCommand({createRole: name, privileges: [], roles: []});
+ }
+ else {
+ jsTestLog("Creating role " + i);
+ var prevRole = roleNameBase + (i - 1);
+ testdb.runCommand({createRole: name, privileges: [], roles: [ prevRole ]});
+ var roleInfo = testdb.getRole(name);
+ }
+ }
+
+}
+
+// run all tests standalone
+var conn = MongoRunner.runMongod();
+runTest(conn);
+MongoRunner.stopMongod(conn);
+
+// run all tests sharded
+conn = new ShardingTest({shards: 2, mongos: 1, config: 3});
+runTest(conn);
+conn.stop();
diff --git a/jstests/slow1/memory.js b/jstests/slow1/memory.js
new file mode 100644
index 00000000000..48265e77b31
--- /dev/null
+++ b/jstests/slow1/memory.js
@@ -0,0 +1,42 @@
+var col = db.memoryTest;
+
+// test creating many collections to make sure no internal cache goes OOM
+for (var i = 0; i < 10000; ++i) {
+ name = "memoryTest" + i;
+ if ((i % 1000) == 0) print("Processing " + name);
+ db.eval(function(col) { for (var i = 0; i < 100; ++i) {db[col + "_" + i].find();} }, name);
+}
+
+// test recovery of JS engine after out of memory
+db.system.js.save( { "_id" : "f1", "value" : function(n) {
+ a = [];
+ b = [];
+ c = [];
+ for (i = 0; i < n; i++) {
+ a.push(Math.random());
+ b.push(Math.random());
+ c.push(Math.random());
+ }
+} })
+
+// do mix of calls to make sure OOM is handled with no permanent damage
+db.eval("f1(10)");
+assert.throws(function() { db.eval("f1(100000000)"); } );
+db.eval("f1(10)");
+assert.throws(function() { db.eval("f1(1000000000)"); } );
+db.eval("f1(1000000)");
+db.eval("f1(1000000)");
+db.eval("f1(1000000)");
+assert.throws(function() { db.eval("f1(100000000)"); } );
+db.eval("f1(10)");
+db.eval("f1(1000000)");
+db.eval("f1(1000000)");
+db.eval("f1(1000000)");
+
+// also test $where
+col.drop();
+col.insert({a: 1});
+col.findOne({$where: "var arr = []; for (var i = 0; i < 1000000; ++i) {arr.push(0);}"});
+assert.throws(function() { col.findOne({$where: "var arr = []; for (var i = 0; i < 1000000000; ++i) {arr.push(0);}"}); });
+col.findOne({$where: "var arr = []; for (var i = 0; i < 1000000; ++i) {arr.push(0);}"});
+
diff --git a/jstests/slow1/replsets_priority1.js b/jstests/slow1/replsets_priority1.js
new file mode 100644
index 00000000000..622331b311a
--- /dev/null
+++ b/jstests/slow1/replsets_priority1.js
@@ -0,0 +1,196 @@
+// come up with random priorities and make sure that the right member gets
+// elected. then kill that member and make sure then next one gets elected.
+
+print("\n\n\nreplsets_priority1.js BEGIN\n");
+
+load("jstests/replsets/rslib.js");
+
+var rs = new ReplSetTest( {name: 'testSet', nodes: 3} );
+var nodes = rs.startSet();
+rs.initiate();
+
+var master = rs.getMaster();
+
+var everyoneOkSoon = function() {
+ var status;
+ assert.soon(function() {
+ var ok = true;
+ status = master.adminCommand({replSetGetStatus : 1});
+
+ if (!status.members) {
+ return false;
+ }
+
+ for (var i in status.members) {
+ if (status.members[i].health == 0) {
+ continue;
+ }
+ ok &= status.members[i].state == 1 || status.members[i].state == 2;
+ }
+ return ok;
+ }, tojson(status));
+};
+
+var checkPrimaryIs = function (node) {
+
+ print("nreplsets_priority1.js checkPrimaryIs(" + node + ")");
+
+ var status;
+
+ assert.soon(function () {
+ var ok = true;
+
+ try {
+ status = master.adminCommand({ replSetGetStatus: 1 });
+ }
+ catch (e) {
+ print(e);
+ print("nreplsets_priority1.js checkPrimaryIs reconnecting");
+ reconnect(master);
+ status = master.adminCommand({ replSetGetStatus: 1 });
+ }
+
+ var str = "goal: " + node.host + "==1 states: ";
+ if (!status || !status.members) {
+ return false;
+ }
+ status.members.forEach(function (m) {
+ str += m.name + ": " + m.state + " ";
+
+ if (m.name == node.host) {
+ ok &= m.state == 1;
+ }
+ else {
+ ok &= m.state != 1 || (m.state == 1 && m.health == 0);
+ }
+ });
+ print();
+ print(str);
+ print();
+
+ occasionally(function () {
+ print("\nstatus:");
+ printjson(status);
+ print();
+ }, 15);
+
+ return ok;
+ }, node.host + '==1', 60000, 1000);
+
+ everyoneOkSoon();
+};
+
+everyoneOkSoon();
+
+print("\n\nreplsets_priority1.js initial sync");
+
+// intial sync
+master.getDB("foo").bar.insert({x:1});
+rs.awaitReplication();
+
+print("\n\nreplsets_priority1.js starting loop");
+
+var n = 5;
+for (i=0; i<n; i++) {
+ print("Round "+i+": FIGHT!");
+
+ var max = null;
+ var second = null;
+ reconnect(master);
+ var config = master.getDB("local").system.replset.findOne();
+
+ var version = config.version;
+ config.version++;
+
+ for (var j=0; j<config.members.length; j++) {
+ var priority = Math.random() * 100;
+ print("random priority : " + priority);
+ config.members[j].priority = priority;
+
+ if (!max || priority > max.priority) {
+ max = config.members[j];
+ }
+ }
+
+ for (var j=0; j<config.members.length; j++) {
+ if (config.members[j] == max) {
+ continue;
+ }
+ if (!second || config.members[j].priority > second.priority) {
+ second = config.members[j];
+ }
+ }
+
+ print("\n\nreplsets_priority1.js max is " + max.host + " with priority " + max.priority + ", reconfiguring...");
+
+ var count = 0;
+ while (config.version != version && count < 100) {
+ reconnect(master);
+
+ occasionally(function() {
+ print("version is "+version+", trying to update to "+config.version);
+ });
+
+ try {
+ master.adminCommand({replSetReconfig : config});
+ master = rs.getMaster();
+ reconnect(master);
+
+ version = master.getDB("local").system.replset.findOne().version;
+ }
+ catch (e) {
+ print("nreplsets_priority1.js Caught exception: " + e);
+ }
+
+ count++;
+ }
+
+ print("\nreplsets_priority1.js wait for 2 slaves");
+
+ assert.soon(function() {
+ rs.getMaster();
+ return rs.liveNodes.slaves.length == 2;
+ }, "2 slaves");
+
+ print("\nreplsets_priority1.js wait for new config version " + config.version);
+
+ assert.soon(function() {
+ versions = [0,0];
+ rs.liveNodes.slaves[0].setSlaveOk();
+ versions[0] = rs.liveNodes.slaves[0].getDB("local").system.replset.findOne().version;
+ rs.liveNodes.slaves[1].setSlaveOk();
+ versions[1] = rs.liveNodes.slaves[1].getDB("local").system.replset.findOne().version;
+ return versions[0] == config.version && versions[1] == config.version;
+ });
+
+ print("replsets_priority1.js awaitReplication");
+
+ // the reconfiguration needs to be replicated! the hb sends it out
+ // separately from the repl
+ rs.awaitReplication();
+
+ print("reconfigured. Checking statuses.");
+
+ checkPrimaryIs(max);
+
+ print("rs.stop");
+
+ rs.stop(max._id);
+
+ var master = rs.getMaster();
+
+ print("\nkilled max primary. Checking statuses.");
+
+ print("second is "+second.host+" with priority "+second.priority);
+ checkPrimaryIs(second);
+
+ print("restart max " + max._id);
+
+ rs.restart(max._id);
+ master = rs.getMaster();
+
+ print("max restarted. Checking statuses.");
+ checkPrimaryIs(max);
+}
+
+print("\n\n\n\n\nreplsets_priority1.js SUCCESS!\n\n");
diff --git a/jstests/slow1/sharding_multiple_collections.js b/jstests/slow1/sharding_multiple_collections.js
new file mode 100644
index 00000000000..61d9911afca
--- /dev/null
+++ b/jstests/slow1/sharding_multiple_collections.js
@@ -0,0 +1,53 @@
+// multcollections.js
+
+s = new ShardingTest( "multcollections" , 2 , 1 , 1 , { chunksize : 1 } );
+
+s.adminCommand( { enablesharding : "test" } );
+
+db = s.getDB( "test" )
+
+N = 100000
+
+S = ""
+while ( S.length < 500 )
+ S += "123123312312";
+
+for ( i=0; i<N; i++ ){
+ db.foo.insert( { _id : i , s : S } )
+ db.bar.insert( { _id : i , s : S , s2 : S } )
+ db.getLastError()
+}
+
+db.printShardingStatus()
+
+function mytest( coll , i , loopNumber ){
+ x = coll.find( { _id : i } ).explain();
+ if ( x )
+ return;
+ throw "can't find " + i + " in " + coll.getName() + " on loopNumber: " + loopNumber + " explain: " + tojson( x );
+}
+
+loopNumber = 0
+while ( 1 ){
+ for ( i=0; i<N; i++ ){
+ mytest( db.foo , i , loopNumber );
+ mytest( db.bar , i , loopNumber );
+ if ( i % 1000 == 0 )
+ print( i )
+ }
+ db.printShardingStatus()
+ loopNumber++;
+
+ if ( loopNumber == 1 ){
+ s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+ s.adminCommand( { shardcollection : "test.bar" , key : { _id : 1 } } );
+ }
+
+ assert( loopNumber < 1000 , "taking too long" );
+
+ if ( s.chunkDiff( "foo" ) < 12 && s.chunkDiff( "bar" ) < 12 )
+ break
+}
+
+s.stop()
+