diff options
Diffstat (limited to 'jstests/slow2')
-rw-r--r-- | jstests/slow2/32bit.js | 72 | ||||
-rw-r--r-- | jstests/slow2/conc_update.js | 47 | ||||
-rw-r--r-- | jstests/slow2/cursor_timeout.js | 63 | ||||
-rw-r--r-- | jstests/slow2/mr_during_migrate.js | 109 | ||||
-rw-r--r-- | jstests/slow2/remove_during_mr.js | 5 | ||||
-rw-r--r-- | jstests/slow2/replsets_killop.js | 55 |
6 files changed, 189 insertions, 162 deletions
diff --git a/jstests/slow2/32bit.js b/jstests/slow2/32bit.js index f76b04e5c21..d8b2c5ff728 100644 --- a/jstests/slow2/32bit.js +++ b/jstests/slow2/32bit.js @@ -7,10 +7,9 @@ var forceSeedToBe = null; if (forceSeedToBe) { print("\n32bit.js WARNING FORCING A SPECIFIC SEED"); - print("seed="+ forceSeedToBe); + print("seed=" + forceSeedToBe); Random.srand(forceSeedToBe); -} -else { +} else { Random.setRandomSeed(); } @@ -18,7 +17,7 @@ function f() { 'use strict'; var pass = 1; - var mydb = db.getSisterDB( "test_32bit" ); + var mydb = db.getSisterDB("test_32bit"); var t = mydb.colltest_32bit; mydb.dropDatabase(); @@ -30,16 +29,16 @@ function f() { print("32bit.js PASS #" + pass); pass++; - t.insert({x:1}); - t.ensureIndex({a:1}); - t.ensureIndex({b:1}, true); - t.ensureIndex({x:1}); + t.insert({x: 1}); + t.ensureIndex({a: 1}); + t.ensureIndex({b: 1}, true); + t.ensureIndex({x: 1}); if (Random.rand() < 0.3) { - t.ensureIndex({c:1}); + t.ensureIndex({c: 1}); } - t.ensureIndex({d:1}); - t.ensureIndex({e:1}); - t.ensureIndex({f:1}); + t.ensureIndex({d: 1}); + t.ensureIndex({e: 1}); + t.ensureIndex({f: 1}); // create 448 byte string var big = 'a b'; @@ -70,34 +69,43 @@ function f() { cc = null; } - var res = t.insert({ a: a, b: b, c: cc, d: d, f: f }); + var res = t.insert({a: a, b: b, c: cc, d: d, f: f}); if (res.hasWriteError()) { // Presumably we have mmap error on 32 bit. try a few more manipulations // attempting to break things. - t.insert({a:33,b:44,c:55,d:66,f:66}); - t.insert({a:33,b:44000,c:55,d:66}); - t.insert({a:33,b:440000,c:55}); - t.insert({a:33,b:4400000}); - t.update({a:20},{'$set':{c:'abc'}}); - t.update({a:21},{'$set':{c:'aadsfbc'}}); - t.update({a:22},{'$set':{c:'c'}}); - t.update({a:23},{'$set':{b:cc}}); - t.remove({a:22}); + t.insert({a: 33, b: 44, c: 55, d: 66, f: 66}); + t.insert({a: 33, b: 44000, c: 55, d: 66}); + t.insert({a: 33, b: 440000, c: 55}); + t.insert({a: 33, b: 4400000}); + t.update({a: 20}, {'$set': {c: 'abc'}}); + t.update({a: 21}, {'$set': {c: 'aadsfbc'}}); + t.update({a: 22}, {'$set': {c: 'c'}}); + t.update({a: 23}, {'$set': {b: cc}}); + t.remove({a: 22}); break; } if (Random.rand() < 0.01) { - t.remove({a:a}); - t.remove({b:Random.rand()}); - t.insert({e:1}); - t.insert({f:'aaaaaaaaaa'}); - - if (Random.rand() < 0.00001) { print("remove cc"); t.remove({c:cc}); } - if (Random.rand() < 0.0001) { print("update cc"); t.update({c:cc},{'$set':{c:1}},false,true); } - if (Random.rand() < 0.00001) { print("remove e"); t.remove({e:1}); } + t.remove({a: a}); + t.remove({b: Random.rand()}); + t.insert({e: 1}); + t.insert({f: 'aaaaaaaaaa'}); + + if (Random.rand() < 0.00001) { + print("remove cc"); + t.remove({c: cc}); + } + if (Random.rand() < 0.0001) { + print("update cc"); + t.update({c: cc}, {'$set': {c: 1}}, false, true); + } + if (Random.rand() < 0.00001) { + print("remove e"); + t.remove({e: 1}); + } } if (a == 20000) { - var delta_ms = (new Date())-start; + var delta_ms = (new Date()) - start; // 2MM / 20000 = 100. 1000ms/sec. var eta_secs = delta_ms * (100 / 1000); print("32bit.js eta_secs:" + eta_secs); @@ -124,7 +132,7 @@ function f() { print("32bit.js FAIL validating"); print(res.result); printjson(res); - //mydb.dropDatabase(); + // mydb.dropDatabase(); throw Error("fail validating 32bit.js"); } diff --git a/jstests/slow2/conc_update.js b/jstests/slow2/conc_update.js index 5cde7489090..b7b8b836831 100644 --- a/jstests/slow2/conc_update.js +++ b/jstests/slow2/conc_update.js @@ -1,52 +1,57 @@ -load( "jstests/libs/slow_weekly_util.js" ); -test = new SlowWeeklyMongod( "conc_update" ); +load("jstests/libs/slow_weekly_util.js"); +test = new SlowWeeklyMongod("conc_update"); db = test.getDB("concurrency"); db.dropDatabase(); -NRECORDS=3*1024*1024; +NRECORDS = 3 * 1024 * 1024; -print("loading "+NRECORDS+" documents (progress msg every 1024*1024 documents)"); +print("loading " + NRECORDS + " documents (progress msg every 1024*1024 documents)"); var bulk = db.conc.initializeUnorderedBulkOp(); for (var i = 0; i < NRECORDS; i++) { - bulk.insert({ x: i }); + bulk.insert({x: i}); } assert.writeOK(bulk.execute()); print("making an index (this will take a while)"); -db.conc.ensureIndex({x:1}); +db.conc.ensureIndex({x: 1}); -var c1=db.conc.count({x:{$lt:NRECORDS}}); +var c1 = db.conc.count({x: {$lt: NRECORDS}}); -updater = startParallelShell("db = db.getSisterDB('concurrency');\ +updater = startParallelShell( + "db = db.getSisterDB('concurrency');\ db.concflag.insert({ inprog: true });\ sleep(20);\ assert.writeOK(db.conc.update({}, \ - { $inc: { x: " + NRECORDS + "}}, false, true)); \ + { $inc: { x: " + + NRECORDS + + "}}, false, true)); \ assert.writeOK(db.concflag.update({}, { inprog: false }));"); -assert.soon( function(){ var x = db.concflag.findOne(); return x && x.inprog; } , - "wait for fork" , 30000 , 1 ); +assert.soon(function() { + var x = db.concflag.findOne(); + return x && x.inprog; +}, "wait for fork", 30000, 1); -querycount=0; -decrements=0; -misses=0; +querycount = 0; +decrements = 0; +misses = 0; -assert.soon(function(){ - c2=db.conc.count({x:{$lt:NRECORDS}}); +assert.soon(function() { + c2 = db.conc.count({x: {$lt: NRECORDS}}); print(c2); querycount++; - if (c2<c1) + if (c2 < c1) decrements++; else misses++; c1 = c2; - return ! db.concflag.findOne().inprog; -}, "update never finished" , 2 * 60 * 60 * 1000 , 10 ); + return !db.concflag.findOne().inprog; +}, "update never finished", 2 * 60 * 60 * 1000, 10); print(querycount + " queries, " + decrements + " decrements, " + misses + " misses"); -assert.eq( NRECORDS , db.conc.count() , "AT END 1" ); +assert.eq(NRECORDS, db.conc.count(), "AT END 1"); -updater(); // wait() +updater(); // wait() test.stop(); diff --git a/jstests/slow2/cursor_timeout.js b/jstests/slow2/cursor_timeout.js index ed70471d918..f74521b9bc9 100644 --- a/jstests/slow2/cursor_timeout.js +++ b/jstests/slow2/cursor_timeout.js @@ -9,50 +9,58 @@ // After a period of inactivity, the test asserts that cursors #1 and #2 are still alive, and that // #3 and #4 have been killed. -var st = - new ShardingTest( { shards: 2, - other: { chunkSize: 1, - shardOptions: { setParameter: "cursorTimeoutMillis=1000" }, - mongosOptions: { setParameter: "cursorTimeoutMillis=1000" } } } ); +var st = new ShardingTest({ + shards: 2, + other: { + chunkSize: 1, + shardOptions: {setParameter: "cursorTimeoutMillis=1000"}, + mongosOptions: {setParameter: "cursorTimeoutMillis=1000"} + } +}); st.stopBalancer(); var adminDB = st.admin; var configDB = st.config; -var coll = st.s.getDB( 'test' ).user; +var coll = st.s.getDB('test').user; -adminDB.runCommand({ enableSharding: coll.getDB().getName() }); +adminDB.runCommand({enableSharding: coll.getDB().getName()}); st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001'); -adminDB.runCommand({ shardCollection: coll.getFullName(), key: { x: 1 }}); +adminDB.runCommand({shardCollection: coll.getFullName(), key: {x: 1}}); var data = 'c'; -for( var x = 0; x < 18; x++ ){ +for (var x = 0; x < 18; x++) { data += data; } -for( x = 0; x < 200; x++ ){ - coll.insert({ x: x, v: data }); +for (x = 0; x < 200; x++) { + coll.insert({x: x, v: data}); } var chunkDoc = configDB.chunks.findOne(); var chunkOwner = chunkDoc.shard; -var toShard = configDB.shards.findOne({ _id: { $ne: chunkOwner }})._id; -var cmd = { moveChunk: coll.getFullName(), find: chunkDoc.min, to: toShard, _waitForDelete: true }; -var res = adminDB.runCommand( cmd ); +var toShard = configDB.shards.findOne({_id: {$ne: chunkOwner}})._id; +var cmd = { + moveChunk: coll.getFullName(), + find: chunkDoc.min, + to: toShard, + _waitForDelete: true +}; +var res = adminDB.runCommand(cmd); -jsTest.log( 'move result: ' + tojson( res )); +jsTest.log('move result: ' + tojson(res)); var shardedCursorWithTimeout = coll.find(); var shardedCursorWithNoTimeout = coll.find(); -shardedCursorWithNoTimeout.addOption( DBQuery.Option.noTimeout ); +shardedCursorWithNoTimeout.addOption(DBQuery.Option.noTimeout); // Query directly to mongod -var shardHost = configDB.shards.findOne({ _id: chunkOwner }).host; -var mongod = new Mongo( shardHost ); -var shardColl = mongod.getCollection( coll.getFullName() ); +var shardHost = configDB.shards.findOne({_id: chunkOwner}).host; +var mongod = new Mongo(shardHost); +var shardColl = mongod.getCollection(coll.getFullName()); var cursorWithTimeout = shardColl.find(); var cursorWithNoTimeout = shardColl.find(); -cursorWithNoTimeout.addOption( DBQuery.Option.noTimeout ); +cursorWithNoTimeout.addOption(DBQuery.Option.noTimeout); shardedCursorWithTimeout.next(); shardedCursorWithNoTimeout.next(); @@ -63,15 +71,18 @@ cursorWithNoTimeout.next(); // Wait until the idle cursor background job has killed the cursors that do not have the "no // timeout" flag set. We use the "cursorTimeoutMillis" setParameter above to reduce the amount of // time we need to wait here. -sleep( 5000 ); +sleep(5000); -assert.throws( function(){ shardedCursorWithTimeout.itcount(); } ); -assert.throws( function(){ cursorWithTimeout.itcount(); } ); +assert.throws(function() { + shardedCursorWithTimeout.itcount(); +}); +assert.throws(function() { + cursorWithTimeout.itcount(); +}); // +1 because we already advanced once -assert.eq( coll.count(), shardedCursorWithNoTimeout.itcount() + 1 ); +assert.eq(coll.count(), shardedCursorWithNoTimeout.itcount() + 1); -assert.eq( shardColl.count(), cursorWithNoTimeout.itcount() + 1 ); +assert.eq(shardColl.count(), cursorWithNoTimeout.itcount() + 1); st.stop(); - diff --git a/jstests/slow2/mr_during_migrate.js b/jstests/slow2/mr_during_migrate.js index 66962ff0e0f..cb439aeb241 100644 --- a/jstests/slow2/mr_during_migrate.js +++ b/jstests/slow2/mr_during_migrate.js @@ -1,52 +1,55 @@ // Do parallel ops with migrates occurring -var st = new ShardingTest({ shards : 10, mongos : 2, verbose : 2 }); +var st = new ShardingTest({shards: 10, mongos: 2, verbose: 2}); -jsTest.log( "Doing parallel operations..." ); +jsTest.log("Doing parallel operations..."); -//Stop balancer, since it'll just get in the way of these +// Stop balancer, since it'll just get in the way of these st.stopBalancer(); var mongos = st.s0; var admin = mongos.getDB("admin"); -var coll = st.s.getCollection( jsTest.name() + ".coll" ); +var coll = st.s.getCollection(jsTest.name() + ".coll"); var numDocs = 1024 * 1024; -var dataSize = 1024; // bytes, must be power of 2 +var dataSize = 1024; // bytes, must be power of 2 var data = "x"; -while( data.length < dataSize ) data += data; +while (data.length < dataSize) + data += data; var bulk = coll.initializeUnorderedBulkOp(); -for( var i = 0; i < numDocs; i++ ){ - bulk.insert({ _id: i, data: data }); +for (var i = 0; i < numDocs; i++) { + bulk.insert({_id: i, data: data}); } assert.writeOK(bulk.execute()); // Make sure everything got inserted -assert.eq( numDocs, coll.find().itcount() ); +assert.eq(numDocs, coll.find().itcount()); - -jsTest.log( "Inserted " + sh._dataFormat( dataSize * numDocs ) + " of data." ); +jsTest.log("Inserted " + sh._dataFormat(dataSize * numDocs) + " of data."); // Shard collection -st.shardColl( coll, { _id : 1 }, false ); +st.shardColl(coll, {_id: 1}, false); st.printShardingStatus(); -jsTest.log( "Sharded collection now initialized, starting migrations..." ); +jsTest.log("Sharded collection now initialized, starting migrations..."); -var checkMigrate = function(){ print( "Result of migrate : " ); printjson( this ); }; +var checkMigrate = function() { + print("Result of migrate : "); + printjson(this); +}; // Creates a number of migrations of random chunks to diff shard servers var ops = []; -for(var i = 0; i < st._connections.length; i++) { +for (var i = 0; i < st._connections.length; i++) { ops.push({ op: "command", ns: "admin", command: { moveChunk: "" + coll, - find: { _id: { "#RAND_INT" : [ 0, numDocs ] }}, + find: {_id: {"#RAND_INT": [0, numDocs]}}, to: st._connections[i].shardName, _waitForDelete: true }, @@ -56,55 +59,55 @@ for(var i = 0; i < st._connections.length; i++) { // TODO: Also migrate output collection -jsTest.log( "Starting migrations now..." ); +jsTest.log("Starting migrations now..."); -var bid = benchStart({ ops : ops, - host : st.s.host, - parallel : 1, - handleErrors : false }); +var bid = benchStart({ops: ops, host: st.s.host, parallel: 1, handleErrors: false}); //####################### // Tests during migration var numTests = 5; -for( var t = 0; t < numTests; t++ ){ - - jsTest.log( "Test #" + t ); - - var mongos = st.s1; // use other mongos so we get stale shard versions - var coll = mongos.getCollection( coll + "" ); - var outputColl = mongos.getCollection( coll + "_output" ); - +for (var t = 0; t < numTests; t++) { + jsTest.log("Test #" + t); + + var mongos = st.s1; // use other mongos so we get stale shard versions + var coll = mongos.getCollection(coll + ""); + var outputColl = mongos.getCollection(coll + "_output"); + var numTypes = 32; - var map = function(){ emit( this._id % 32 /* must be hardcoded */, { c : 1 } ); }; - var reduce = function( k, vals ){ + var map = function() { + emit(this._id % 32 /* must be hardcoded */, {c: 1}); + }; + var reduce = function(k, vals) { var total = 0; - for( var i = 0; i < vals.length; i++ ) total += vals[i].c; - return { c : total }; + for (var i = 0; i < vals.length; i++) + total += vals[i].c; + return { + c: total + }; }; - - printjson( coll.find({ _id : 0 }).itcount() ); - - jsTest.log( "Starting new mapReduce run #" + t ); - - //assert.eq( coll.find().itcount(), numDocs ) - - coll.getMongo().getDB("admin").runCommand({ setParameter : 1, traceExceptions : true }); - - printjson( coll.mapReduce( map, reduce, { out : { replace : outputColl.getName(), db : outputColl.getDB() + "" } }) ); - - jsTest.log( "MapReduce run #" + t + " finished." ); - - assert.eq( outputColl.find().itcount(), numTypes ); - - outputColl.find().forEach( function( x ){ - assert.eq( x.value.c, numDocs / numTypes ); + + printjson(coll.find({_id: 0}).itcount()); + + jsTest.log("Starting new mapReduce run #" + t); + + // assert.eq( coll.find().itcount(), numDocs ) + + coll.getMongo().getDB("admin").runCommand({setParameter: 1, traceExceptions: true}); + + printjson(coll.mapReduce( + map, reduce, {out: {replace: outputColl.getName(), db: outputColl.getDB() + ""}})); + + jsTest.log("MapReduce run #" + t + " finished."); + + assert.eq(outputColl.find().itcount(), numTypes); + + outputColl.find().forEach(function(x) { + assert.eq(x.value.c, numDocs / numTypes); }); - } - -printjson( benchFinish( bid ) ); +printjson(benchFinish(bid)); st.stop(); diff --git a/jstests/slow2/remove_during_mr.js b/jstests/slow2/remove_during_mr.js index 16374adeb24..9b632a11a56 100644 --- a/jstests/slow2/remove_during_mr.js +++ b/jstests/slow2/remove_during_mr.js @@ -5,13 +5,12 @@ load('jstests/libs/parallelTester.js'); function client1() { Random.setRandomSeed(); - for(var i = 0; i < 1000; i++) { + for (var i = 0; i < 1000; i++) { db.remove_during_mr.remove({rand: {$gte: Random.rand()}}, {justOne: true}); } } function client2() { - function mapper() { emit(this.key, 1); } @@ -20,7 +19,7 @@ function client2() { return {}; } - for(var i = 0; i < 1000; i++) { + for (var i = 0; i < 1000; i++) { var options = { out: {replace: 'bar'}, sort: {_id: -1} diff --git a/jstests/slow2/replsets_killop.js b/jstests/slow2/replsets_killop.js index 603e1f9c63e..ea1cd560b91 100644 --- a/jstests/slow2/replsets_killop.js +++ b/jstests/slow2/replsets_killop.js @@ -4,66 +4,67 @@ numDocs = 1e5; // Set up a replica set. -replTest = new ReplSetTest( { name:'test', nodes:3 } ); +replTest = new ReplSetTest({name: 'test', nodes: 3}); nodes = replTest.startSet(); replTest.initiate(); primary = replTest.getPrimary(); secondary = replTest.getSecondary(); -db = primary.getDB( 'test' ); -db.test.save( { a:0 } ); +db = primary.getDB('test'); +db.test.save({a: 0}); replTest.awaitReplication(); -assert.soon( function() { return secondary.getDB( 'test' ).test.count() == 1; } ); +assert.soon(function() { + return secondary.getDB('test').test.count() == 1; +}); // Start a parallel shell to insert new documents on the primary. -inserter = startParallelShell( - 'var bulk = db.test.initializeUnorderedBulkOp(); \ - for( i = 1; i < ' + numDocs + '; ++i ) { \ +inserter = startParallelShell('var bulk = db.test.initializeUnorderedBulkOp(); \ + for( i = 1; i < ' + numDocs + + '; ++i ) { \ bulk.insert({ a: i }); \ } \ - bulk.execute();' -); + bulk.execute();'); // Periodically kill replication get mores. -for( i = 0; i < 1e3; ++i ) { +for (i = 0; i < 1e3; ++i) { allOps = db.currentOp(); - for( j in allOps.inprog ) { - op = allOps.inprog[ j ]; - if ( op.ns == 'local.oplog.rs' && op.op == 'getmore' ) { - db.killOp( op.opid ); + for (j in allOps.inprog) { + op = allOps.inprog[j]; + if (op.ns == 'local.oplog.rs' && op.op == 'getmore') { + db.killOp(op.opid); } } - sleep( 100 ); + sleep(100); } // Wait for the inserter to finish. inserter(); -assert.eq( numDocs, db.test.count() ); +assert.eq(numDocs, db.test.count()); // Return true when the correct number of documents are present on the secondary. Otherwise print // which documents are missing and return false. function allReplicated() { - count = secondary.getDB( 'test' ).test.count(); - if ( count == numDocs ) { + count = secondary.getDB('test').test.count(); + if (count == numDocs) { // Return true if the count is as expected. return true; } - + // Identify and print the missing a-values. foundSet = {}; - c = secondary.getDB( 'test' ).test.find(); - while( c.hasNext() ) { - foundSet[ '' + c.next().a ] = true; + c = secondary.getDB('test').test.find(); + while (c.hasNext()) { + foundSet['' + c.next().a] = true; } missing = []; - for( i = 0; i < numDocs; ++i ) { - if ( !( ( '' + i ) in foundSet ) ) { - missing.push( i ); + for (i = 0; i < numDocs; ++i) { + if (!(('' + i) in foundSet)) { + missing.push(i); } } - print( 'count: ' + count + ' missing: ' + missing ); + print('count: ' + count + ' missing: ' + missing); return false; } // Wait for the correct number of (replicated) documents to be present on the secondary. -assert.soon( allReplicated, "didn't replicate all docs", 5 * 60 * 1000 ); +assert.soon(allReplicated, "didn't replicate all docs", 5 * 60 * 1000); |