summaryrefslogtreecommitdiff
path: root/jstests/disk
diff options
context:
space:
mode:
authorDavid Storch <david.storch@10gen.com>2014-05-06 19:00:56 -0400
committerDavid Storch <david.storch@10gen.com>2014-05-06 19:00:56 -0400
commit72380726608df663a85bee24d69a20ed2ca8287d (patch)
tree735b7724ddc814fdf385d754bd7921975b5de491 /jstests/disk
parent3061ab54eb2cc642a279becfca0b93f5e17db117 (diff)
downloadmongo-72380726608df663a85bee24d69a20ed2ca8287d.tar.gz
Revert "SERVER-13741 Migrate remaining tests to use write commands"
This reverts commit 87dc3ae516e1d12a632dc604710661e38ed7b3dd.
Diffstat (limited to 'jstests/disk')
-rw-r--r--jstests/disk/diskfull.js10
-rw-r--r--jstests/disk/killall.js3
-rw-r--r--jstests/disk/preallocate_directoryperdb.js16
-rw-r--r--jstests/disk/quota.js10
-rw-r--r--jstests/disk/quota2.js16
5 files changed, 28 insertions, 27 deletions
diff --git a/jstests/disk/diskfull.js b/jstests/disk/diskfull.js
index f236a9d4bc7..deaab67a969 100644
--- a/jstests/disk/diskfull.js
+++ b/jstests/disk/diskfull.js
@@ -22,16 +22,16 @@ if ( doIt ) {
m = startMongoProgram( "mongod", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1", '--nojournal' );
d = m.getDB( "diskfulltest" );
c = d.getCollection( "diskfulltest" );
- assert.writeError(c.insert( { a: 6 } ));
-
+ c.save( { a: 6 } );
+ assert(d.getLastError().length );
+ printjson( d.getLastErrorObj() );
assert.soon(
function() { c.save( { a : 6 } );
return rawMongoProgramOutput().match( /file allocation failure/ );
},
"didn't see 'file allocation failure'" );
- res = assert.writeError(c.insert({ a: 6 }));
- var errmsg = res.getWriteError().errmsg;
- assert.eq(errmsg, "Can't take a write lock while out of disk space"); // every following fail
+ c.save( { a: 6 } );
+ assert.eq(d.getLastError(), "Can't take a write lock while out of disk space"); // every following fail
sleep( 3000 );
diff --git a/jstests/disk/killall.js b/jstests/disk/killall.js
index 3be9f530780..a46a3588241 100644
--- a/jstests/disk/killall.js
+++ b/jstests/disk/killall.js
@@ -21,7 +21,8 @@ var mongod = startMongod( "--port", port, "--dbpath", dbpath, "--nohttpinterface
var db = mongod.getDB( "test" );
var collection = db.getCollection( baseName );
-assert.writeOK(collection.insert({}));
+collection.save( {} );
+assert( ! db.getLastError() );
s1 = startParallelShell( "db." + baseName + ".count( { $where: function() { while( 1 ) { ; } } } )", port );
// HACK(schwerin): startParallelShell's return value should allow you to block until the command has
diff --git a/jstests/disk/preallocate_directoryperdb.js b/jstests/disk/preallocate_directoryperdb.js
index 38f55866f60..8b3d551b5d4 100644
--- a/jstests/disk/preallocate_directoryperdb.js
+++ b/jstests/disk/preallocate_directoryperdb.js
@@ -22,15 +22,14 @@ function checkDb2DirAbsent() {
var m = startMongod( "--smallfiles", "--directoryperdb", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
db = m.getDB( baseName );
db2 = m.getDB( baseName2 );
-var bulk = db[ baseName ].initializeUnorderedBulkOp();
-var bulk2 = db2[ baseName2 ].initializeUnorderedBulkOp();
-var big = new Array( 5000 ).toString();
+c = db[ baseName ];
+c2 = db2[ baseName2 ];
+big = new Array( 5000 ).toString();
for( var i = 0; i < 3000; ++i ) {
- bulk.insert({ b:big });
- bulk2.insert({ b:big });
+ c.save( { b:big } );
+ c2.save( { b:big } );
+ db.getLastError();
}
-assert.writeOK(bulk.execute());
-assert.writeOK(bulk2.execute());
// Due to our write pattern, we expect db2's .3 file to be queued up in the file
// allocator behind db's .3 file at the time db2 is dropped. This will
@@ -44,7 +43,8 @@ db.dropDatabase();
// Try writing a new database, to ensure file allocator is still working.
db3 = m.getDB( baseName3 );
c3 = db[ baseName3 ];
-assert.writeOK(c3.insert( {} ));
+c3.save( {} );
+assert( !db3.getLastError() );
assert.eq( 1, c3.count() );
checkDb2DirAbsent();
diff --git a/jstests/disk/quota.js b/jstests/disk/quota.js
index e8476072ff1..aa8963c6263 100644
--- a/jstests/disk/quota.js
+++ b/jstests/disk/quota.js
@@ -11,11 +11,10 @@ db = m.getDB( baseName );
big = new Array( 10000 ).toString();
// Insert documents until quota is exhausted.
-var coll = db[ baseName ];
-var res = coll.insert({ b: big });
-while( !res.hasWriteError() ) {
- res = coll.insert({ b: big });
+while( !db.getLastError() ) {
+ db[ baseName ].save( {b:big} );
}
+printjson( db.getLastError() );
dotTwoDataFile = baseName + ".2";
files = listFiles( dbpath );
@@ -28,7 +27,8 @@ dotTwoDataFile = "local" + ".2";
// Check that quota does not apply to local db, and a .2 file can be created.
l = m.getDB( "local" )[ baseName ];
for( i = 0; i < 10000; ++i ) {
- assert.writeOK(l.insert({ b: big }));
+ l.save( {b:big} );
+ assert( !db.getLastError() );
dotTwoFound = false;
if ( i % 100 != 0 ) {
continue;
diff --git a/jstests/disk/quota2.js b/jstests/disk/quota2.js
index cdeda1a2a23..a5f07abec8a 100644
--- a/jstests/disk/quota2.js
+++ b/jstests/disk/quota2.js
@@ -13,12 +13,12 @@ db = m.getDB( baseName );
big = new Array( 10000 ).toString();
// Insert documents until quota is exhausted.
-var coll = db[ baseName ];
-var res = coll.insert({ b: big });
-while( !res.hasWriteError() ) {
- res = coll.insert({ b: big });
+while( !db.getLastError() ) {
+ db[ baseName ].save( {b:big} );
}
+db.resetError();
+
// Trigger allocation of an additional file for a 'special' namespace.
for( n = 0; !db.getLastError(); ++n ) {
db.createCollection( '' + n );
@@ -27,10 +27,10 @@ for( n = 0; !db.getLastError(); ++n ) {
// Check that new docs are saved in the .0 file.
for( i = 0; i < n; ++i ) {
c = db[ ''+i ];
- res = c.insert({ b: big });
- if( !res.hasWriteError() ) {
- assert.eq( 0, c.find()._addSpecial( "$showDiskLoc", true )[ 0 ].$diskLoc.file );
+ c.save( {b:big} );
+ if( !db.getLastError() ) {
+ assert.eq( 0, c.find()._addSpecial( "$showDiskLoc", true )[ 0 ].$diskLoc.file );
}
}
-}
+} \ No newline at end of file