summaryrefslogtreecommitdiff
path: root/jstests/disk
diff options
context:
space:
mode:
authorRandolph Tan <randolph@10gen.com>2014-05-14 14:11:11 -0400
committerRandolph Tan <randolph@10gen.com>2014-05-15 14:04:11 -0400
commit0dabee8227d445a18fa5e8e49b2be60ba2a0beef (patch)
tree5cd9058a4483202c2f7e2ab6f5c44079a74ac157 /jstests/disk
parent4de88387eec6c0bb08b10d0ba1574a656f56232d (diff)
downloadmongo-0dabee8227d445a18fa5e8e49b2be60ba2a0beef.tar.gz
SERVER-13741 Migrate remaining tests to use write commands
Diffstat (limited to 'jstests/disk')
-rw-r--r--jstests/disk/diskfull.js10
-rw-r--r--jstests/disk/killall.js3
-rw-r--r--jstests/disk/preallocate_directoryperdb.js16
-rw-r--r--jstests/disk/quota.js10
-rw-r--r--jstests/disk/quota2.js16
5 files changed, 27 insertions, 28 deletions
diff --git a/jstests/disk/diskfull.js b/jstests/disk/diskfull.js
index deaab67a969..f236a9d4bc7 100644
--- a/jstests/disk/diskfull.js
+++ b/jstests/disk/diskfull.js
@@ -22,16 +22,16 @@ if ( doIt ) {
m = startMongoProgram( "mongod", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1", '--nojournal' );
d = m.getDB( "diskfulltest" );
c = d.getCollection( "diskfulltest" );
- c.save( { a: 6 } );
- assert(d.getLastError().length );
- printjson( d.getLastErrorObj() );
+ assert.writeError(c.insert( { a: 6 } ));
+
assert.soon(
function() { c.save( { a : 6 } );
return rawMongoProgramOutput().match( /file allocation failure/ );
},
"didn't see 'file allocation failure'" );
- c.save( { a: 6 } );
- assert.eq(d.getLastError(), "Can't take a write lock while out of disk space"); // every following fail
+ res = assert.writeError(c.insert({ a: 6 }));
+ var errmsg = res.getWriteError().errmsg;
+ assert.eq(errmsg, "Can't take a write lock while out of disk space"); // every following fail
sleep( 3000 );
diff --git a/jstests/disk/killall.js b/jstests/disk/killall.js
index a46a3588241..3be9f530780 100644
--- a/jstests/disk/killall.js
+++ b/jstests/disk/killall.js
@@ -21,8 +21,7 @@ var mongod = startMongod( "--port", port, "--dbpath", dbpath, "--nohttpinterface
var db = mongod.getDB( "test" );
var collection = db.getCollection( baseName );
-collection.save( {} );
-assert( ! db.getLastError() );
+assert.writeOK(collection.insert({}));
s1 = startParallelShell( "db." + baseName + ".count( { $where: function() { while( 1 ) { ; } } } )", port );
// HACK(schwerin): startParallelShell's return value should allow you to block until the command has
diff --git a/jstests/disk/preallocate_directoryperdb.js b/jstests/disk/preallocate_directoryperdb.js
index 8b3d551b5d4..38f55866f60 100644
--- a/jstests/disk/preallocate_directoryperdb.js
+++ b/jstests/disk/preallocate_directoryperdb.js
@@ -22,14 +22,15 @@ function checkDb2DirAbsent() {
var m = startMongod( "--smallfiles", "--directoryperdb", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
db = m.getDB( baseName );
db2 = m.getDB( baseName2 );
-c = db[ baseName ];
-c2 = db2[ baseName2 ];
-big = new Array( 5000 ).toString();
+var bulk = db[ baseName ].initializeUnorderedBulkOp();
+var bulk2 = db2[ baseName2 ].initializeUnorderedBulkOp();
+var big = new Array( 5000 ).toString();
for( var i = 0; i < 3000; ++i ) {
- c.save( { b:big } );
- c2.save( { b:big } );
- db.getLastError();
+ bulk.insert({ b:big });
+ bulk2.insert({ b:big });
}
+assert.writeOK(bulk.execute());
+assert.writeOK(bulk2.execute());
// Due to our write pattern, we expect db2's .3 file to be queued up in the file
// allocator behind db's .3 file at the time db2 is dropped. This will
@@ -43,8 +44,7 @@ db.dropDatabase();
// Try writing a new database, to ensure file allocator is still working.
db3 = m.getDB( baseName3 );
c3 = db[ baseName3 ];
-c3.save( {} );
-assert( !db3.getLastError() );
+assert.writeOK(c3.insert( {} ));
assert.eq( 1, c3.count() );
checkDb2DirAbsent();
diff --git a/jstests/disk/quota.js b/jstests/disk/quota.js
index aa8963c6263..e8476072ff1 100644
--- a/jstests/disk/quota.js
+++ b/jstests/disk/quota.js
@@ -11,10 +11,11 @@ db = m.getDB( baseName );
big = new Array( 10000 ).toString();
// Insert documents until quota is exhausted.
-while( !db.getLastError() ) {
- db[ baseName ].save( {b:big} );
+var coll = db[ baseName ];
+var res = coll.insert({ b: big });
+while( !res.hasWriteError() ) {
+ res = coll.insert({ b: big });
}
-printjson( db.getLastError() );
dotTwoDataFile = baseName + ".2";
files = listFiles( dbpath );
@@ -27,8 +28,7 @@ dotTwoDataFile = "local" + ".2";
// Check that quota does not apply to local db, and a .2 file can be created.
l = m.getDB( "local" )[ baseName ];
for( i = 0; i < 10000; ++i ) {
- l.save( {b:big} );
- assert( !db.getLastError() );
+ assert.writeOK(l.insert({ b: big }));
dotTwoFound = false;
if ( i % 100 != 0 ) {
continue;
diff --git a/jstests/disk/quota2.js b/jstests/disk/quota2.js
index a5f07abec8a..cdeda1a2a23 100644
--- a/jstests/disk/quota2.js
+++ b/jstests/disk/quota2.js
@@ -13,12 +13,12 @@ db = m.getDB( baseName );
big = new Array( 10000 ).toString();
// Insert documents until quota is exhausted.
-while( !db.getLastError() ) {
- db[ baseName ].save( {b:big} );
+var coll = db[ baseName ];
+var res = coll.insert({ b: big });
+while( !res.hasWriteError() ) {
+ res = coll.insert({ b: big });
}
-db.resetError();
-
// Trigger allocation of an additional file for a 'special' namespace.
for( n = 0; !db.getLastError(); ++n ) {
db.createCollection( '' + n );
@@ -27,10 +27,10 @@ for( n = 0; !db.getLastError(); ++n ) {
// Check that new docs are saved in the .0 file.
for( i = 0; i < n; ++i ) {
c = db[ ''+i ];
- c.save( {b:big} );
- if( !db.getLastError() ) {
- assert.eq( 0, c.find()._addSpecial( "$showDiskLoc", true )[ 0 ].$diskLoc.file );
+ res = c.insert({ b: big });
+ if( !res.hasWriteError() ) {
+ assert.eq( 0, c.find()._addSpecial( "$showDiskLoc", true )[ 0 ].$diskLoc.file );
}
}
-} \ No newline at end of file
+}