diff options
Diffstat (limited to 'jstests/slow2')
-rwxr-xr-x | jstests/slow2/32bit.js | 118 | ||||
-rw-r--r-- | jstests/slow2/conc_update.js | 48 | ||||
-rw-r--r-- | jstests/slow2/dur_big_atomic_update.js | 21 | ||||
-rw-r--r-- | jstests/slow2/mr_during_migrate.js | 6 | ||||
-rw-r--r-- | jstests/slow2/replsets_killop.js | 10 | ||||
-rw-r--r-- | jstests/slow2/replsets_prefetch_stress.js | 8 | ||||
-rw-r--r-- | jstests/slow2/rollback4.js | 7 |
7 files changed, 109 insertions, 109 deletions
diff --git a/jstests/slow2/32bit.js b/jstests/slow2/32bit.js index a149ea3e8dd..d80cc7821c3 100755 --- a/jstests/slow2/32bit.js +++ b/jstests/slow2/32bit.js @@ -7,97 +7,97 @@ if (forceSeedToBe) function f() {
seed = forceSeedToBe || Math.random();
-
+
pass = 1;
var mydb = db.getSisterDB( "test_32bit" );
mydb.dropDatabase();
while( 1 ) {
- if( pass >= 2 )
- break;
+ if( pass >= 2 )
+ break;
print("32bit.js PASS #" + pass);
pass++;
-
+
t = mydb.colltest_32bit;
print("seed=" + seed);
-
+
t.insert({x:1});
t.ensureIndex({a:1});
t.ensureIndex({b:1}, true);
t.ensureIndex({x:1});
if( Math.random() < 0.3 )
- t.ensureIndex({c:1});
+ t.ensureIndex({c:1});
t.ensureIndex({d:1});
t.ensureIndex({e:1});
t.ensureIndex({f:1});
-
+
big = 'a b';
big = big + big;
k = big;
big = big + big;
big = big + big;
big = big + big;
-
+
a = 0;
c = 'kkk';
var start = new Date();
- while( 1 ) {
- b = Math.random(seed);
- d = c + -a;
+ while( 1 ) {
+ b = Math.random(seed);
+ d = c + -a;
f = Math.random(seed) + a;
a++;
- cc = big;
- if( Math.random(seed) < .1 )
- cc = null;
- var res = t.insert({ a: a, b: b, c: cc, d: d, f: f });
- if( Math.random(seed) < 0.01 ) {
- if (res.hasWriteError()) {
- // Presumably we have mmap error on 32 bit. try a few more manipulations
- // attempting to break things.
- t.insert({a:33,b:44,c:55,d:66,f:66});
- t.insert({a:33,b:44000,c:55,d:66});
- t.insert({a:33,b:440000,c:55});
- t.insert({a:33,b:4400000});
- t.update({a:20},{'$set':{c:'abc'}});
- t.update({a:21},{'$set':{c:'aadsfbc'}});
- t.update({a:22},{'$set':{c:'c'}});
- t.update({a:23},{'$set':{b:cc}});
- t.remove({a:22});
- break;
- }
-
- t.remove({a:a});
- t.remove({b:Math.random(seed)});
- t.insert({e:1});
- t.insert({f:'aaaaaaaaaa'});
-
+ cc = big;
+ if( Math.random(seed) < .1 )
+ cc = null;
+ t.insert({a:a,b:b,c:cc,d:d,f:f});
+ if( Math.random(seed) < 0.01 ) {
+
+ if( mydb.getLastError() ) {
+ /* presumably we have mmap error on 32 bit. try a few more manipulations attempting to break things */
+ t.insert({a:33,b:44,c:55,d:66,f:66});
+ t.insert({a:33,b:44000,c:55,d:66});
+ t.insert({a:33,b:440000,c:55});
+ t.insert({a:33,b:4400000});
+ t.update({a:20},{'$set':{c:'abc'}});
+ t.update({a:21},{'$set':{c:'aadsfbc'}});
+ t.update({a:22},{'$set':{c:'c'}});
+ t.update({a:23},{'$set':{b:cc}});
+ t.remove({a:22});
+ break;
+ }
+
+ t.remove({a:a});
+ t.remove({b:Math.random(seed)});
+ t.insert({e:1});
+ t.insert({f:'aaaaaaaaaa'});
+
if( Math.random() < 0.00001 ) { print("remove cc"); t.remove({c:cc}); }
if( Math.random() < 0.0001 ) { print("update cc"); t.update({c:cc},{'$set':{c:1}},false,true); }
if( Math.random() < 0.00001 ) { print("remove e"); t.remove({e:1}); }
- }
- if (a == 20000 ) {
- var delta_ms = (new Date())-start;
- // 2MM / 20000 = 100. 1000ms/sec.
- var eta_secs = delta_ms * (100 / 1000);
- print("32bit.js eta_secs:" + eta_secs);
- if( eta_secs > 1000 ) {
- print("32bit.js machine is slow, stopping early. a:" + a);
- mydb.dropDatabase();
- return;
- }
- }
- if( a % 100000 == 0 ) {
- print(a);
- // on 64 bit we won't error out, so artificially stop. on 32 bit we will hit
- // mmap limit ~1.6MM but may vary by a factor of 2x by platform
- if( a >= 2200000 ) {
+ }
+ if (a == 20000 ) {
+ var delta_ms = (new Date())-start;
+ // 2MM / 20000 = 100. 1000ms/sec.
+ var eta_secs = delta_ms * (100 / 1000);
+ print("32bit.js eta_secs:" + eta_secs);
+ if( eta_secs > 1000 ) {
+ print("32bit.js machine is slow, stopping early. a:" + a);
+ mydb.dropDatabase();
+ return;
+ }
+ }
+ if( a % 100000 == 0 ) {
+ print(a);
+ // on 64 bit we won't error out, so artificially stop. on 32 bit we will hit mmap limit ~1.6MM but may
+ // vary by a factor of 2x by platform
+ if( a >= 2200000 ) {
mydb.dropDatabase();
- return;
- }
+ return;
+ }
}
- }
+ }
print("count: " + t.count());
var res = t.validate();
@@ -105,11 +105,11 @@ function f() { print("32bit.js FAIL validating");
print(res.result);
printjson(res);
- //mydb.dropDatabase();
- throw "fail validating 32bit.js";
+ //mydb.dropDatabase();
+ throw "fail validating 32bit.js";
}
- mydb.dropDatabase();
+ mydb.dropDatabase();
}
print("32bit.js SUCCESS");
diff --git a/jstests/slow2/conc_update.js b/jstests/slow2/conc_update.js index 4ee5bd22ca7..0d778df047e 100644 --- a/jstests/slow2/conc_update.js +++ b/jstests/slow2/conc_update.js @@ -6,42 +6,46 @@ db.dropDatabase(); NRECORDS=3*1024*1024 print("loading "+NRECORDS+" documents (progress msg every 1024*1024 documents)") -var bulk = db.conc.initializeUnorderedBulkOp(); -for (var i = 0; i < NRECORDS; i++) { - bulk.insert({ x: i }); +for (i=0; i<(NRECORDS); i++) { + db.conc.insert({x:i}) + if ((i%(1024*1024))==0) + print("loaded " + i/(1024*1024) + " mibi-records") } -assert.writeOK(bulk.execute()); print("making an index (this will take a while)") db.conc.ensureIndex({x:1}) var c1=db.conc.count({x:{$lt:NRECORDS}}) -updater = startParallelShell("db = db.getSisterDB('concurrency');\ - db.concflag.insert({ inprog: true });\ - sleep(20);\ - assert.writeOK(db.conc.update({}, \ - { $inc: { x: " + NRECORDS + "}}, false, true)); \ - assert.writeOK(db.concflag.update({}, { inprog: false }));"); +updater=startParallelShell("db=db.getSisterDB('concurrency');\ + db.concflag.insert( {inprog:true} );\ + sleep(20);\ + db.conc.update({}, {$inc:{x: "+NRECORDS+"}}, false, true);\ + e=db.getLastError();\ + print('update error: '+ e);\ + db.concflag.update({},{inprog:false});\ + assert.eq(e, null, 'update failed');"); -assert.soon( function(){ var x = db.concflag.findOne(); return x && x.inprog; } , +assert.soon( function(){ var x = db.concflag.findOne(); return x && x.inprog; } , "wait for fork" , 30000 , 1 ); querycount=0; decrements=0; misses=0 -assert.soon(function(){ - c2=db.conc.count({x:{$lt:NRECORDS}}) - print(c2) - querycount++; - if (c2<c1) - decrements++; - else - misses++; - c1 = c2; - return ! db.concflag.findOne().inprog; -}, "update never finished" , 2 * 60 * 60 * 1000 , 10 ); +assert.soon( + function(){ + c2=db.conc.count({x:{$lt:NRECORDS}}) + print(c2) + querycount++; + if (c2<c1) + decrements++; + else + misses++; + c1 = c2; + return ! db.concflag.findOne().inprog; + } , + "update never finished" , 2 * 60 * 60 * 1000 , 10 ); print(querycount + " queries, " + decrements + " decrements, " + misses + " misses"); diff --git a/jstests/slow2/dur_big_atomic_update.js b/jstests/slow2/dur_big_atomic_update.js index b8d3ba60be1..18a7c4a68f2 100644 --- a/jstests/slow2/dur_big_atomic_update.js +++ b/jstests/slow2/dur_big_atomic_update.js @@ -8,29 +8,26 @@ conn = startMongodEmpty("--port", 30001, "--dbpath", path, "--dur", "--durOption d = conn.getDB("test"); d.foo.drop(); -var bulk = d.foo.initializeUnorderedBulkOp(); -for (var i = 0; i < 1024; i++){ - bulk.insert({ _id: i }); +for (var i=0; i<1024; i++){ + d.foo.insert({_id:i}); } -assert.writeOK(bulk.execute()); big_string = 'xxxxxxxxxxxxxxxx'; while (big_string.length < 1024*1024) { big_string += big_string; } -var res = assert.writeOK(d.foo.update({ $atomic: 1 }, - { $set: { big_string: big_string }}, - false, true /* multi */ )); -assert.eq(1024, res.nModified); +d.foo.update({$atomic:1}, {$set: {big_string: big_string}}, false, /*multi*/true); +err = d.getLastErrorObj(); + +assert(err.err == null); +assert(err.n == 1024); d.dropDatabase(); -bulk = d.foo.initializeUnorderedBulkOp(); -for (var i = 0; i < 1024; i++){ - bulk.insert({ _id: i }); +for (var i=0; i<1024; i++){ + d.foo.insert({_id:i}); } -assert.writeOK(bulk.execute()); // Do it again but in a db.eval d.eval( diff --git a/jstests/slow2/mr_during_migrate.js b/jstests/slow2/mr_during_migrate.js index 6ab9754c4f6..53ffd10fbec 100644 --- a/jstests/slow2/mr_during_migrate.js +++ b/jstests/slow2/mr_during_migrate.js @@ -17,13 +17,11 @@ var dataSize = 1024 // bytes, must be power of 2 var data = "x" while( data.length < dataSize ) data += data -var bulk = coll.initializeUnorderedBulkOp(); for( var i = 0; i < numDocs; i++ ){ - bulk.insert({ _id: i, data: data }); + coll.insert({ _id : i, data : data }) } -assert.writeOK(bulk.execute()); - // Make sure everything got inserted +assert.eq( null, coll.getDB().getLastError() ) assert.eq( numDocs, coll.find().itcount() ) diff --git a/jstests/slow2/replsets_killop.js b/jstests/slow2/replsets_killop.js index e035ae3cd40..3d3ee51f709 100644 --- a/jstests/slow2/replsets_killop.js +++ b/jstests/slow2/replsets_killop.js @@ -16,11 +16,11 @@ assert.soon( function() { return secondary.getDB( 'test' ).test.count() == 1; } // Start a parallel shell to insert new documents on the primary. inserter = startParallelShell( - 'var bulk = db.test.initializeUnorderedBulkOp(); \ - for( i = 1; i < ' + numDocs + '; ++i ) { \ - bulk.insert({ a: i }); \ - } \ - bulk.execute();' + 'for( i = 1; i < ' + numDocs + '; ++i ) { \ + db.test.save( { a:i } ); \ + sleep( 1 ); \ + } \ + db.getLastError();' ); // Periodically kill replication get mores. diff --git a/jstests/slow2/replsets_prefetch_stress.js b/jstests/slow2/replsets_prefetch_stress.js index 4273cb594fd..dcd523dd3a4 100644 --- a/jstests/slow2/replsets_prefetch_stress.js +++ b/jstests/slow2/replsets_prefetch_stress.js @@ -8,7 +8,8 @@ replTest.initiate(); var master = replTest.getMaster(); c = master.getDB( 'd' )[ 'c' ]; -assert.writeOK(c.insert({ _id: 0 })); +c.insert( { _id:0 } ); +master.getDB( 'd' ).getLastError(); replTest.awaitReplication(); // Create a:1 index. @@ -21,11 +22,10 @@ for( i = 0; i < 10000; ++i ) { } // Insert documents with multikey values. -var bulk = c.initializeUnorderedBulkOp(); for( i = 0; i < 1000; ++i ) { - bulk.insert({ a: multikeyValues }); + c.insert( { a:multikeyValues } ); } -assert.writeOK(bulk.execute()); +master.getDB( 'd' ).getLastError(); replTest.awaitReplication(300000); // Check document counts on all nodes. On error a node might go down or fail to sync all data, see diff --git a/jstests/slow2/rollback4.js b/jstests/slow2/rollback4.js index 7d511568ed9..4929e3777ff 100644 --- a/jstests/slow2/rollback4.js +++ b/jstests/slow2/rollback4.js @@ -31,11 +31,12 @@ replTest.awaitReplication(); // Insert into master var big = { b:new Array( 1000 ).toString() }; -var bulk = master.getDB('db').c.initializeUnorderedBulkOp(); for( var i = 0; i < 1000000; ++i ) { - bulk.insert( big ); + if ( i % 10000 == 0 ) { + print( i ); + } + master.getDB( 'db' ).c.insert( big ); } -assert.writeOK(bulk.execute()); // Stop master replTest.stop( 0 ); |