diff options
author | Mark Benvenuto <mark.benvenuto@mongodb.com> | 2014-04-09 17:41:25 -0400 |
---|---|---|
committer | Mark Benvenuto <mark.benvenuto@mongodb.com> | 2014-04-11 11:34:40 -0400 |
commit | 84022c1f5ac4023e56c52f5ca8e5a94eb59b4827 (patch) | |
tree | 52796f98486a35afac6b781eee009d396adb66aa /jstests/slow2 | |
parent | 6357353cab44c0a70cca1cb55f28a14ebdf7470a (diff) | |
download | mongo-84022c1f5ac4023e56c52f5ca8e5a94eb59b4827.tar.gz |
move tests from noPassthrough to slow2
Diffstat (limited to 'jstests/slow2')
-rw-r--r-- | jstests/slow2/conc_update.js | 56 | ||||
-rw-r--r-- | jstests/slow2/replsets_prefetch_stress.js | 37 | ||||
-rw-r--r-- | jstests/slow2/rollback4.js | 55 |
3 files changed, 148 insertions, 0 deletions
diff --git a/jstests/slow2/conc_update.js b/jstests/slow2/conc_update.js new file mode 100644 index 00000000000..0d778df047e --- /dev/null +++ b/jstests/slow2/conc_update.js @@ -0,0 +1,56 @@ +load( "jstests/libs/slow_weekly_util.js" ) +test = new SlowWeeklyMongod( "conc_update" ) +db = test.getDB("concurrency") +db.dropDatabase(); + +NRECORDS=3*1024*1024 + +print("loading "+NRECORDS+" documents (progress msg every 1024*1024 documents)") +for (i=0; i<(NRECORDS); i++) { + db.conc.insert({x:i}) + if ((i%(1024*1024))==0) + print("loaded " + i/(1024*1024) + " mibi-records") +} + +print("making an index (this will take a while)") +db.conc.ensureIndex({x:1}) + +var c1=db.conc.count({x:{$lt:NRECORDS}}) + +updater=startParallelShell("db=db.getSisterDB('concurrency');\ + db.concflag.insert( {inprog:true} );\ + sleep(20);\ + db.conc.update({}, {$inc:{x: "+NRECORDS+"}}, false, true);\ + e=db.getLastError();\ + print('update error: '+ e);\ + db.concflag.update({},{inprog:false});\ + assert.eq(e, null, 'update failed');"); + +assert.soon( function(){ var x = db.concflag.findOne(); return x && x.inprog; } , + "wait for fork" , 30000 , 1 ); + +querycount=0; +decrements=0; +misses=0 + +assert.soon( + function(){ + c2=db.conc.count({x:{$lt:NRECORDS}}) + print(c2) + querycount++; + if (c2<c1) + decrements++; + else + misses++; + c1 = c2; + return ! db.concflag.findOne().inprog; + } , + "update never finished" , 2 * 60 * 60 * 1000 , 10 ); + +print(querycount + " queries, " + decrements + " decrements, " + misses + " misses"); + +assert.eq( NRECORDS , db.conc.count() , "AT END 1" ) + +updater() // wait() + +test.stop(); diff --git a/jstests/slow2/replsets_prefetch_stress.js b/jstests/slow2/replsets_prefetch_stress.js new file mode 100644 index 00000000000..dcd523dd3a4 --- /dev/null +++ b/jstests/slow2/replsets_prefetch_stress.js @@ -0,0 +1,37 @@ +// Replication prefetching stress test. Insert many documents, each with a large number of multikey +// values on the same index. All multikey keys will be generated, but only the first will be +// prefetched from the index. + +var replTest = new ReplSetTest( { name:'testSet', nodes:3 } ); +var nodes = replTest.startSet(); +replTest.initiate(); +var master = replTest.getMaster(); +c = master.getDB( 'd' )[ 'c' ]; + +c.insert( { _id:0 } ); +master.getDB( 'd' ).getLastError(); +replTest.awaitReplication(); + +// Create a:1 index. +c.ensureIndex( { a:1 } ); + +// Create an array of multikey values. +multikeyValues = []; +for( i = 0; i < 10000; ++i ) { + multikeyValues.push( i ); +} + +// Insert documents with multikey values. +for( i = 0; i < 1000; ++i ) { + c.insert( { a:multikeyValues } ); +} +master.getDB( 'd' ).getLastError(); +replTest.awaitReplication(300000); + +// Check document counts on all nodes. On error a node might go down or fail to sync all data, see +// SERVER-6538. +assert.eq( 1001, c.count() ); +nodes.forEach( function( node ) { + node.setSlaveOk(); + assert.eq( 1001, node.getDB( 'd' )[ 'c' ].count() ); + } ); diff --git a/jstests/slow2/rollback4.js b/jstests/slow2/rollback4.js new file mode 100644 index 00000000000..f1708b8be6d --- /dev/null +++ b/jstests/slow2/rollback4.js @@ -0,0 +1,55 @@ +// Test a large rollback SERVER-2737 + +var replTest = new ReplSetTest({ name: 'unicomplex', + nodes: 3, + oplogSize: 2000 }); +var nodes = replTest.nodeList(); + +var conns = replTest.startSet(); +var r = replTest.initiate({ "_id": "unicomplex", + "members": [ + { "_id": 0, "host": nodes[0] }, + { "_id": 1, "host": nodes[1] }, + { "_id": 2, "host": nodes[2], arbiterOnly: true}] + }, 'replSetInitiate', 600000); + +// Make sure we have a master +var master = replTest.getMaster(); +b_conn = conns[1]; +b_conn.setSlaveOk(); +B = b_conn.getDB("admin"); + +// Make sure we have an arbiter +assert.soon(function () { + res = conns[2].getDB("admin").runCommand({ replSetGetStatus: 1 }); + return res.myState == 7; + }, "Arbiter failed to initialize."); + +// Wait for initial replication +replTest.awaitReplication(); + +// Insert into master +var big = { b:new Array( 1000 ).toString() }; +for( var i = 0; i < 1000000; ++i ) { + if ( i % 10000 == 0 ) { + print( i ); + } + master.getDB( 'db' ).c.insert( big ); +} + +// Stop master +replTest.stop( 0 ); + +// Wait for slave to take over +// This can take a while if the secondary has queued up many writes in its +// buffer, since it needs to flush those out before it can assume the primaryship. +assert.soon(function () { return B.isMaster().ismaster; }, "waiting for new primary", 60000); +master = replTest.getMaster(); + +// Save to new master, forcing rollback of old master +master.getDB( 'db' ).c.save( big ); + +// Restart old master +replTest.restart( 0 ); +// Wait five minutes to ensure there is enough time for rollback +replTest.awaitReplication(5*60*1000); |