summaryrefslogtreecommitdiff
path: root/jstests/noPassthrough
diff options
context:
space:
mode:
authorRandolph Tan <randolph@10gen.com>2014-04-25 14:04:36 -0400
committerRandolph Tan <randolph@10gen.com>2014-05-06 16:32:44 -0400
commit87dc3ae516e1d12a632dc604710661e38ed7b3dd (patch)
tree3a483a3d0c38ce00a7f4d7dba0e9cba7f7eba5f3 /jstests/noPassthrough
parent6b945ec15c61f6bd4bfbaf382624d886ec8441d2 (diff)
downloadmongo-87dc3ae516e1d12a632dc604710661e38ed7b3dd.tar.gz
SERVER-13741 Migrate remaining tests to use write commands
Diffstat (limited to 'jstests/noPassthrough')
-rw-r--r--jstests/noPassthrough/disk_reuse1.js27
-rw-r--r--jstests/noPassthrough/geo_full.js739
-rw-r--r--jstests/noPassthrough/geo_mnypts_plus_fields.js10
-rw-r--r--jstests/noPassthrough/gle_after_split_failure_during_migration.js136
-rw-r--r--jstests/noPassthrough/indexbg1.js30
-rw-r--r--jstests/noPassthrough/indexbg2.js2
-rw-r--r--jstests/noPassthrough/query_yield1.js8
-rw-r--r--jstests/noPassthrough/query_yield2.js8
-rw-r--r--jstests/noPassthrough/repair2.js19
-rw-r--r--jstests/noPassthrough/sync1.js49
-rw-r--r--jstests/noPassthrough/sync4.js19
-rw-r--r--jstests/noPassthrough/sync8.js13
-rw-r--r--jstests/noPassthrough/update_server-5552.js10
-rw-r--r--jstests/noPassthrough/update_yield1.js7
14 files changed, 417 insertions, 660 deletions
diff --git a/jstests/noPassthrough/disk_reuse1.js b/jstests/noPassthrough/disk_reuse1.js
index 249985edd1a..c208dcefb1f 100644
--- a/jstests/noPassthrough/disk_reuse1.js
+++ b/jstests/noPassthrough/disk_reuse1.js
@@ -16,31 +16,36 @@ while ( s.length < 1024 )
state = {}
-for ( i=0; i<N; i++ )
- t.insert( { _id : i , s : s } );
+var bulk = t.initializeUnorderedBulkOp();
+for (var i = 0; i < N; i++) {
+ bulk.insert({ _id: i, s: s });
+}
+assert.writeOK(bulk.execute());
orig = t.stats();
t.remove({});
-for ( i=0; i<N; i++ )
- t.insert( { _id : i , s : s } );
+bulk = t.initializeUnorderedBulkOp();
+for (i = 0; i < N; i++) {
+ bulk.insert({ _id: i, s: s });
+}
+assert.writeOK(bulk.execute());
assert.eq( orig.storageSize , t.stats().storageSize , "A" )
-for ( j=0; j<100; j++ ){
- for ( i=0; i<N; i++ ){
+for (j = 0; j < 100; j++){
+ for (i = 0; i < N; i++){
+ bulk = t.initializeUnorderedBulkOp();
var r = Math.random();
if ( r > .5 )
- t.remove( { _id : i } )
+ bulk.find({ _id: i }).remove();
else
- t.insert( { _id : i , s : s } )
+ bulk.find({ _id: i }).upsert().updateOne({ _id: i, s: s });
}
- //printjson( t.stats() );
-
+ assert.writeOK(bulk.execute());
assert.eq( orig.storageSize , t.stats().storageSize , "B" + j )
}
-
test.stop();
diff --git a/jstests/noPassthrough/geo_full.js b/jstests/noPassthrough/geo_full.js
index ffeb26a2606..9d9203242d3 100644
--- a/jstests/noPassthrough/geo_full.js
+++ b/jstests/noPassthrough/geo_full.js
@@ -22,96 +22,89 @@ testServer = new SlowWeeklyMongod( "geo_full" )
db = testServer.getDB( "test" );
var randEnvironment = function(){
-
- // Normal earth environment
- if( Random.rand() < 0.5 ){
- return { max : 180,
- min : -180,
- bits : Math.floor( Random.rand() * 32 ) + 1,
- earth : true,
- bucketSize : 360 / ( 4 * 1024 * 1024 * 1024 ) }
- }
-
- var scales = [ 0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000 ]
- var scale = scales[ Math.floor( Random.rand() * scales.length ) ]
- var offset = Random.rand() * scale
-
+
+ // Normal earth environment
+ if( Random.rand() < 0.5 ){
+ return { max : 180,
+ min : -180,
+ bits : Math.floor( Random.rand() * 32 ) + 1,
+ earth : true,
+ bucketSize : 360 / ( 4 * 1024 * 1024 * 1024 ) };
+ }
+
+ var scales = [ 0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000 ]
+ var scale = scales[ Math.floor( Random.rand() * scales.length ) ]
+ var offset = Random.rand() * scale
+
var max = Random.rand() * scale + offset
- var min = - Random.rand() * scale + offset
- var bits = Math.floor( Random.rand() * 32 ) + 1
- var bits = Math.floor( Random.rand() * 32 ) + 1
- var range = max - min
+ var min = - Random.rand() * scale + offset
+ var bits = Math.floor( Random.rand() * 32 ) + 1
+ var bits = Math.floor( Random.rand() * 32 ) + 1
+ var range = max - min
var bucketSize = range / ( 4 * 1024 * 1024 * 1024 )
-
- return { max : max,
- min : min,
- bits : bits,
- earth : false,
- bucketSize : bucketSize }
-
-}
+
+ return { max : max,
+ min : min,
+ bits : bits,
+ earth : false,
+ bucketSize : bucketSize }
+};
var randPoint = function( env, query ) {
-
- if( query && Random.rand() > 0.5 )
- return query.exact
-
- if( env.earth )
- return [ Random.rand() * 360 - 180, Random.rand() * 180 - 90 ]
-
- var range = env.max - env.min
- return [ Random.rand() * range + env.min, Random.rand() * range + env.min ];
+
+ if( query && Random.rand() > 0.5 )
+ return query.exact
+
+ if( env.earth )
+ return [ Random.rand() * 360 - 180, Random.rand() * 180 - 90 ]
+
+ var range = env.max - env.min
+ return [ Random.rand() * range + env.min, Random.rand() * range + env.min ];
}
var randLocType = function( loc, wrapIn ){
- return randLocTypes( [ loc ], wrapIn )[0]
+ return randLocTypes( [ loc ], wrapIn )[0]
}
var randLocTypes = function( locs, wrapIn ) {
-
- var rLocs = []
-
- for( var i = 0; i < locs.length; i++ ){
+
+ var rLocs = []
+
+ for( var i = 0; i < locs.length; i++ ){
rLocs.push( locs[i] )
- // {x:1, y:1} \ne [1,1].
- //if( Random.rand() < 0.5 )
- //rLocs.push( { x : locs[i][0], y : locs[i][1] } )
- //else
- }
-
- if( wrapIn ){
- var wrappedLocs = []
- for( var i = 0; i < rLocs.length; i++ ){
- var wrapper = {}
- wrapper[wrapIn] = rLocs[i]
- wrappedLocs.push( wrapper )
- }
-
- return wrappedLocs
- }
-
- return rLocs
-
-}
+ }
+
+ if( wrapIn ){
+ var wrappedLocs = []
+ for( var i = 0; i < rLocs.length; i++ ){
+ var wrapper = {}
+ wrapper[wrapIn] = rLocs[i]
+ wrappedLocs.push( wrapper )
+ }
+
+ return wrappedLocs
+ }
+
+ return rLocs
+};
var randDataType = function() {
- var scales = [ 1, 10, 100, 1000, 10000 ]
- var docScale = scales[ Math.floor( Random.rand() * scales.length ) ]
- var locScale = scales[ Math.floor( Random.rand() * scales.length ) ]
-
- var numDocs = 40000
- var maxLocs = 40000
- // Make sure we don't blow past our test resources
- while( numDocs * maxLocs > 40000 ){
- numDocs = Math.floor( Random.rand() * docScale ) + 1
- maxLocs = Math.floor( Random.rand() * locScale ) + 1
- }
-
- return { numDocs : numDocs,
- maxLocs : maxLocs }
-
-}
+ var scales = [ 1, 10, 100, 1000, 10000 ]
+ var docScale = scales[ Math.floor( Random.rand() * scales.length ) ]
+ var locScale = scales[ Math.floor( Random.rand() * scales.length ) ]
+
+ var numDocs = 40000
+ var maxLocs = 40000
+ // Make sure we don't blow past our test resources
+ while( numDocs * maxLocs > 40000 ){
+ numDocs = Math.floor( Random.rand() * docScale ) + 1
+ maxLocs = Math.floor( Random.rand() * locScale ) + 1
+ }
+
+ return { numDocs : numDocs,
+ maxLocs : maxLocs }
+};
function deg2rad(arg) { return arg * Math.PI / 180.0; }
function rad2deg(arg) { return arg * 180.0 / Math.PI; }
@@ -140,194 +133,181 @@ function pointIsOK(startPoint, radius, env) {
}
var randQuery = function( env ) {
-
- var center = randPoint( env )
-
- var sphereRadius = -1
- var sphereCenter = null
- if( env.earth ){
- // Get a start point that doesn't require wrapping
- // TODO: Are we a bit too aggressive with wrapping issues?
- var i
- for( i = 0; i < 5; i++ ){
+ var center = randPoint( env )
+
+ var sphereRadius = -1
+ var sphereCenter = null
+ if( env.earth ){
+ // Get a start point that doesn't require wrapping
+ // TODO: Are we a bit too aggressive with wrapping issues?
+ var i
+ for( i = 0; i < 5; i++ ){
sphereRadius = Random.rand() * 45 * Math.PI / 180
sphereCenter = randPoint( env )
if (pointIsOK(sphereCenter, sphereRadius, env)) { break; }
- /*
- var t = db.testSphere; t.drop(); t.ensureIndex({ loc : "2d" }, env )
- try{ t.find({ loc : { $within : { $centerSphere : [ sphereCenter, sphereRadius ] } } } ).count(); var err; if( err = db.getLastError() ) throw err; }
- catch(e) { print( e ); continue }
- print( " Radius " + sphereRadius + " and center " + sphereCenter + " ok ! ")
- break;
- */
- }
- if( i == 5 ) sphereRadius = -1;
-
- }
-
- var box = [ randPoint( env ), randPoint( env ) ]
-
- var boxPoly = [[ box[0][0], box[0][1] ],
- [ box[0][0], box[1][1] ],
- [ box[1][0], box[1][1] ],
- [ box[1][0], box[0][1] ] ]
-
- if( box[0][0] > box[1][0] ){
- var swap = box[0][0]
- box[0][0] = box[1][0]
- box[1][0] = swap
- }
-
- if( box[0][1] > box[1][1] ){
- var swap = box[0][1]
- box[0][1] = box[1][1]
- box[1][1] = swap
- }
-
- return { center : center,
- radius : box[1][0] - box[0][0],
- exact : randPoint( env ),
- sphereCenter : sphereCenter,
- sphereRadius : sphereRadius,
- box : box,
- boxPoly : boxPoly }
-
-}
+ }
+ if( i == 5 ) sphereRadius = -1;
+
+ }
+
+ var box = [ randPoint( env ), randPoint( env ) ]
+
+ var boxPoly = [[ box[0][0], box[0][1] ],
+ [ box[0][0], box[1][1] ],
+ [ box[1][0], box[1][1] ],
+ [ box[1][0], box[0][1] ] ]
+
+ if( box[0][0] > box[1][0] ){
+ var swap = box[0][0]
+ box[0][0] = box[1][0]
+ box[1][0] = swap
+ }
+
+ if( box[0][1] > box[1][1] ){
+ var swap = box[0][1]
+ box[0][1] = box[1][1]
+ box[1][1] = swap
+ }
+ return { center : center,
+ radius : box[1][0] - box[0][0],
+ exact : randPoint( env ),
+ sphereCenter : sphereCenter,
+ sphereRadius : sphereRadius,
+ box : box,
+ boxPoly : boxPoly }
+};
var resultTypes = {
"exact" : function( loc ){
- return query.exact[0] == loc[0] && query.exact[1] == loc[1]
+ return query.exact[0] == loc[0] && query.exact[1] == loc[1]
},
"center" : function( loc ){
- return Geo.distance( query.center, loc ) <= query.radius
+ return Geo.distance( query.center, loc ) <= query.radius
},
"box" : function( loc ){
- return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
- loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1]
-
-},
+ return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
+ loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1]
+
+},
"sphere" : function( loc ){
- return ( query.sphereRadius >= 0 ? ( Geo.sphereDistance( query.sphereCenter, loc ) <= query.sphereRadius ) : false )
-},
+ return ( query.sphereRadius >= 0 ?
+ ( Geo.sphereDistance( query.sphereCenter, loc ) <= query.sphereRadius ) : false );
+},
"poly" : function( loc ){
- return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
- loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1]
+ return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
+ loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1];
}}
var queryResults = function( locs, query, results ){
-
- if( ! results["center"] ){
- for( var type in resultTypes ){
- results[type] = {
- docsIn : 0,
- docsOut : 0,
- locsIn : 0,
- locsOut : 0
- }
- }
- }
-
- var indResults = {}
- for( var type in resultTypes ){
- indResults[type] = {
- docIn : false,
- locsIn : 0,
- locsOut : 0
- }
- }
-
- for( var type in resultTypes ){
-
- var docIn = false
- for( var i = 0; i < locs.length; i++ ){
- if( resultTypes[type]( locs[i] ) ){
- results[type].locsIn++
- indResults[type].locsIn++
- indResults[type].docIn = true
- }
- else{
- results[type].locsOut++
- indResults[type].locsOut++
- }
- }
- if( indResults[type].docIn ) results[type].docsIn++
- else results[type].docsOut++
-
- }
-
- return indResults
-
+
+ if( ! results["center"] ){
+ for( var type in resultTypes ){
+ results[type] = {
+ docsIn : 0,
+ docsOut : 0,
+ locsIn : 0,
+ locsOut : 0
+ }
+ }
+ }
+
+ var indResults = {}
+ for( var type in resultTypes ){
+ indResults[type] = {
+ docIn : false,
+ locsIn : 0,
+ locsOut : 0
+ }
+ }
+
+ for( var type in resultTypes ){
+
+ var docIn = false
+ for( var i = 0; i < locs.length; i++ ){
+ if( resultTypes[type]( locs[i] ) ){
+ results[type].locsIn++
+ indResults[type].locsIn++
+ indResults[type].docIn = true
+ }
+ else{
+ results[type].locsOut++
+ indResults[type].locsOut++
+ }
+ }
+ if( indResults[type].docIn ) results[type].docsIn++
+ else results[type].docsOut++
+
+ }
+
+ return indResults
}
var randQueryAdditions = function( doc, indResults ){
-
- for( var type in resultTypes ){
- var choice = Random.rand()
- if( Random.rand() < 0.25 )
- doc[type] = ( indResults[type].docIn ? { docIn : "yes" } : { docIn : "no" } )
- else if( Random.rand() < 0.5 )
- doc[type] = ( indResults[type].docIn ? { docIn : [ "yes" ] } : { docIn : [ "no" ] } )
- else if( Random.rand() < 0.75 )
- doc[type] = ( indResults[type].docIn ? [ { docIn : "yes" } ] : [ { docIn : "no" } ] )
- else
- doc[type] = ( indResults[type].docIn ? [ { docIn : [ "yes" ] } ] : [ { docIn : [ "no" ] } ] )
- }
-
+
+ for( var type in resultTypes ){
+ var choice = Random.rand()
+ if( Random.rand() < 0.25 )
+ doc[type] = ( indResults[type].docIn ? { docIn : "yes" } : { docIn : "no" } )
+ else if( Random.rand() < 0.5 )
+ doc[type] = ( indResults[type].docIn ? { docIn : [ "yes" ] } : { docIn : [ "no" ] } )
+ else if( Random.rand() < 0.75 )
+ doc[type] = ( indResults[type].docIn ? [ { docIn : "yes" } ] : [ { docIn : "no" } ] )
+ else
+ doc[type] = ( indResults[type].docIn ? [{ docIn: [ "yes" ] }] : [{ docIn: [ "no" ] }]);
+ }
}
var randIndexAdditions = function( indexDoc ){
-
- for( var type in resultTypes ){
-
- if( Random.rand() < 0.5 ) continue;
-
- var choice = Random.rand()
- if( Random.rand() < 0.5 )
- indexDoc[type] = 1
- else
- indexDoc[type + ".docIn"] = 1
-
- }
-
-}
+
+ for( var type in resultTypes ){
+
+ if( Random.rand() < 0.5 ) continue;
+
+ var choice = Random.rand()
+ if( Random.rand() < 0.5 )
+ indexDoc[type] = 1
+ else
+ indexDoc[type + ".docIn"] = 1;
+ }
+};
var randYesQuery = function(){
-
- var choice = Math.floor( Random.rand() * 7 )
- if( choice == 0 )
- return { $ne : "no" }
- else if( choice == 1 )
- return "yes"
- else if( choice == 2 )
- return /^yes/
- else if( choice == 3 )
- return { $in : [ "good", "yes", "ok" ] }
- else if( choice == 4 )
- return { $exists : true }
- else if( choice == 5 )
- return { $nin : [ "bad", "no", "not ok" ] }
- else if( choice == 6 )
- return { $not : /^no/ }
+
+ var choice = Math.floor( Random.rand() * 7 )
+ if( choice == 0 )
+ return { $ne : "no" }
+ else if( choice == 1 )
+ return "yes"
+ else if( choice == 2 )
+ return /^yes/
+ else if( choice == 3 )
+ return { $in : [ "good", "yes", "ok" ] }
+ else if( choice == 4 )
+ return { $exists : true }
+ else if( choice == 5 )
+ return { $nin : [ "bad", "no", "not ok" ] }
+ else if( choice == 6 )
+ return { $not : /^no/ }
}
var locArray = function( loc ){
- if( loc.x ) return [ loc.x, loc.y ]
- if( ! loc.length ) return [ loc[0], loc[1] ]
- return loc
+ if( loc.x ) return [ loc.x, loc.y ]
+ if( ! loc.length ) return [ loc[0], loc[1] ]
+ return loc
}
var locsArray = function( locs ){
- if( locs.loc ){
- arr = []
- for( var i = 0; i < locs.loc.length; i++ ) arr.push( locArray( locs.loc[i] ) )
- return arr
- }
- else{
- arr = []
- for( var i = 0; i < locs.length; i++ ) arr.push( locArray( locs[i].loc ) )
- return arr
- }
+ if( locs.loc ){
+ arr = []
+ for( var i = 0; i < locs.loc.length; i++ ) arr.push( locArray( locs.loc[i] ) )
+ return arr
+ }
+ else{
+ arr = []
+ for( var i = 0; i < locs.length; i++ ) arr.push( locArray( locs[i].loc ) )
+ return arr
+ }
}
var minBoxSize = function( env, box ){
@@ -335,16 +315,16 @@ var minBoxSize = function( env, box ){
}
var minBucketScale = function( env, box ){
-
+
if( box.length && box[0].length )
box = [ box[0][0] - box[1][0], box[0][1] - box[1][1] ]
-
+
if( box.length )
box = Math.max( box[0], box[1] )
-
+
print( box )
print( env.bucketSize )
-
+
return Math.ceil( Math.log( box / env.bucketSize ) / Math.log( 2 ) )
}
@@ -352,119 +332,114 @@ var minBucketScale = function( env, box ){
// TODO: Add spherical $uniqueDocs tests
var numTests = 100
-// Our seed will change every time this is run, but
+// Our seed will change every time this is run, but
// each individual test will be reproducible given
// that seed and test number
var seed = new Date().getTime()
//seed = 175 + 288 + 12
for ( var test = 0; test < numTests; test++ ) {
-
- Random.srand( seed + test );
- //Random.srand( 42240 )
- //Random.srand( 7344 )
- var t = db.testAllGeo
- t.drop()
-
- print( "Generating test environment #" + test )
- var env = randEnvironment()
- //env.bits = 11
- var query = randQuery( env )
- var data = randDataType()
- //data.numDocs = 5; data.maxLocs = 1;
- var paddingSize = Math.floor( Random.rand() * 10 + 1 )
- var results = {}
- var totalPoints = 0
- print( "Calculating target results for " + data.numDocs + " docs with max " + data.maxLocs + " locs " )
-
- // Index after a random number of docs added
- var indexIt = Math.floor( Random.rand() * data.numDocs )
-
- for ( var i = 0; i < data.numDocs; i++ ) {
-
- if( indexIt == i ){
- var indexDoc = { "locs.loc" : "2d" }
- randIndexAdditions( indexDoc )
-
- // printjson( indexDoc )
-
- t.ensureIndex( indexDoc, env )
- assert.isnull( db.getLastError() )
- }
-
- var numLocs = Math.floor( Random.rand() * data.maxLocs + 1 )
- totalPoints += numLocs
-
- var multiPoint = []
- for ( var p = 0; p < numLocs; p++ ) {
- var point = randPoint( env, query )
- multiPoint.push( point )
- }
-
- var indResults = queryResults( multiPoint, query, results )
-
- var doc
- // Nest the keys differently
- if( Random.rand() < 0.5 )
- doc = { locs : { loc : randLocTypes( multiPoint ) } }
- else
- doc = { locs : randLocTypes( multiPoint, "loc" ) }
-
- randQueryAdditions( doc, indResults )
-
- //printjson( doc )
- doc._id = i
- t.insert( doc )
-
- }
-
- var padding = "x"
- for( var i = 0; i < paddingSize; i++ ) padding = padding + padding
-
- print( padding )
-
- printjson( { seed : seed,
- test: test,
- env : env,
- query : query,
- data : data,
- results : results,
- paddingSize : paddingSize } )
-
- // exact
- print( "Exact query..." )
- assert.eq( results.exact.docsIn, t.find( { "locs.loc" : randLocType( query.exact ), "exact.docIn" : randYesQuery() } ).count() )
-
- // $center
- print( "Center query..." )
- print( "Min box : " + minBoxSize( env, query.radius ) )
- assert.eq( results.center.docsIn, t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : 1 } }, "center.docIn" : randYesQuery() } ).count() )
-
- print( "Center query update..." )
- // printjson( t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : 1 } }, "center.docIn" : randYesQuery() } ).toArray() )
- t.update( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : true } }, "center.docIn" : randYesQuery() }, { $set : { "centerPaddingA" : padding } }, false, true )
- assert.eq( results.center.docsIn, t.getDB().getLastErrorObj().n )
-
- if( query.sphereRadius >= 0 ){
-
- print( "Center sphere query...")
- // $centerSphere
- assert.eq( results.sphere.docsIn, t.find( { "locs.loc" : { $within : { $centerSphere : [ query.sphereCenter, query.sphereRadius ] } }, "sphere.docIn" : randYesQuery() } ).count() )
-
- print( "Center sphere query update..." )
- // printjson( t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : 1 } }, "center.docIn" : randYesQuery() } ).toArray() )
- t.update( { "locs.loc" : { $within : { $centerSphere : [ query.sphereCenter, query.sphereRadius ], $uniqueDocs : true } }, "sphere.docIn" : randYesQuery() }, { $set : { "spherePaddingA" : padding } }, false, true )
- assert.eq( results.sphere.docsIn, t.getDB().getLastErrorObj().n )
-
- }
-
- // $box
- print( "Box query..." )
- assert.eq( results.box.docsIn, t.find( { "locs.loc" : { $within : { $box : query.box, $uniqueDocs : true } }, "box.docIn" : randYesQuery() } ).count() )
-
- // $polygon
- print( "Polygon query..." )
- assert.eq( results.poly.docsIn, t.find( { "locs.loc" : { $within : { $polygon : query.boxPoly } }, "poly.docIn" : randYesQuery() } ).count() )
+
+ Random.srand( seed + test );
+ //Random.srand( 42240 )
+ //Random.srand( 7344 )
+ var t = db.testAllGeo
+ t.drop()
+
+ print( "Generating test environment #" + test )
+ var env = randEnvironment()
+ //env.bits = 11
+ var query = randQuery( env )
+ var data = randDataType()
+ //data.numDocs = 5; data.maxLocs = 1;
+ var paddingSize = Math.floor( Random.rand() * 10 + 1 )
+ var results = {}
+ var totalPoints = 0
+ print( "Calculating target results for " + data.numDocs + " docs with max " + data.maxLocs + " locs " )
+
+ var bulk = t.initializeUnorderedBulkOp();
+ for ( var i = 0; i < data.numDocs; i++ ) {
+ var numLocs = Math.floor( Random.rand() * data.maxLocs + 1 )
+ totalPoints += numLocs
+
+ var multiPoint = []
+ for ( var p = 0; p < numLocs; p++ ) {
+ var point = randPoint( env, query )
+ multiPoint.push( point )
+ }
+
+ var indResults = queryResults( multiPoint, query, results )
+
+ var doc
+ // Nest the keys differently
+ if( Random.rand() < 0.5 )
+ doc = { locs : { loc : randLocTypes( multiPoint ) } }
+ else
+ doc = { locs : randLocTypes( multiPoint, "loc" ) }
+
+ randQueryAdditions( doc, indResults )
+
+ doc._id = i
+ bulk.insert( doc );
+ }
+ assert.writeOK(bulk.execute());
+
+ var indexDoc = { "locs.loc" : "2d" };
+ randIndexAdditions( indexDoc );
+ t.ensureIndex( indexDoc, env );
+ assert.isnull( db.getLastError() );
+
+ var padding = "x"
+ for( var i = 0; i < paddingSize; i++ ) padding = padding + padding
+
+ print( padding )
+
+ printjson( { seed : seed,
+ test: test,
+ env : env,
+ query : query,
+ data : data,
+ results : results,
+ paddingSize : paddingSize } )
+
+ // exact
+ print( "Exact query..." )
+ assert.eq( results.exact.docsIn, t.find( { "locs.loc" : randLocType( query.exact ), "exact.docIn" : randYesQuery() } ).count() )
+
+ // $center
+ print( "Center query..." )
+ print( "Min box : " + minBoxSize( env, query.radius ) )
+ assert.eq( results.center.docsIn, t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : 1 } }, "center.docIn" : randYesQuery() } ).count() )
+
+ print( "Center query update..." )
+ var res = t.update({ "locs.loc": { $within: { $center: [ query.center, query.radius ],
+ $uniqueDocs: true }},
+ "center.docIn": randYesQuery() },
+ { $set: { centerPaddingA: padding }}, false, true);
+ assert.eq( results.center.docsIn, res.nModified );
+
+ if( query.sphereRadius >= 0 ){
+
+ print( "Center sphere query...")
+ // $centerSphere
+ assert.eq( results.sphere.docsIn, t.find( { "locs.loc" : { $within : { $centerSphere : [ query.sphereCenter, query.sphereRadius ] } }, "sphere.docIn" : randYesQuery() } ).count() )
+
+ print( "Center sphere query update..." )
+ res = t.update({ "locs.loc": { $within: {
+ $centerSphere: [ query.sphereCenter, query.sphereRadius ],
+ $uniqueDocs: true } },
+ "sphere.docIn" : randYesQuery() },
+ { $set: { spherePaddingA: padding } }, false, true);
+ assert.eq( results.sphere.docsIn, res.nModified );
+ }
+
+ // $box
+ print( "Box query..." )
+ assert.eq( results.box.docsIn, t.find( { "locs.loc" : { $within : { $box : query.box, $uniqueDocs : true } }, "box.docIn" : randYesQuery() } ).count() )
+
+ // $polygon
+ print( "Polygon query..." )
+ assert.eq( results.poly.docsIn, t.find( { "locs.loc" : { $within : { $polygon : query.boxPoly } }, "poly.docIn" : randYesQuery() } ).count() )
// $near, $nearSphere and geoNear results have a default document limit of 100.
var defaultDocLimit = 100;
@@ -506,12 +481,12 @@ for ( var test = 0; test < numTests; test++ ) {
var num = Math.min( 2* defaultDocLimit, 2 * results.center.docsIn);
- var output = db.runCommand( {
- geoNear : "testAllGeo",
- near : query.center,
- maxDistance : query.radius ,
- includeLocs : true,
- num : num } ).results
+ var output = db.runCommand( {
+ geoNear : "testAllGeo",
+ near : query.center,
+ maxDistance : query.radius ,
+ includeLocs : true,
+ num : num } ).results
assert.eq( Math.min( num, results.center.docsIn ),
output.length,
@@ -520,40 +495,36 @@ for ( var test = 0; test < numTests; test++ ) {
"; radius: " + query.radius +
"; docs: " + results.center.docsIn + "; locs: " + results.center.locsIn )
- var distance = 0;
- for ( var i = 0; i < output.length; i++ ) {
- var retDistance = output[i].dis
- var retLoc = locArray( output[i].loc )
-
- // print( "Dist from : " + results[i].loc + " to " + startPoint + " is "
- // + retDistance + " vs " + radius )
-
- var arrLocs = locsArray( output[i].obj.locs )
-
- assert.contains( retLoc, arrLocs )
-
- // printjson( arrLocs )
-
- var distInObj = false
- for ( var j = 0; j < arrLocs.length && distInObj == false; j++ ) {
- var newDistance = Geo.distance( locArray( query.center ) , arrLocs[j] )
- distInObj = ( newDistance >= retDistance - 0.0001 && newDistance <= retDistance + 0.0001 )
- }
-
- assert( distInObj )
- assert.between( retDistance - 0.0001 , Geo.distance( locArray( query.center ), retLoc ), retDistance + 0.0001 )
- assert.lte( retDistance, query.radius )
- assert.gte( retDistance, distance )
- distance = retDistance
- }
-
- }
-
- // $polygon
+ var distance = 0;
+ for ( var i = 0; i < output.length; i++ ) {
+ var retDistance = output[i].dis
+ var retLoc = locArray( output[i].loc )
+
+ var arrLocs = locsArray( output[i].obj.locs )
+
+ assert.contains( retLoc, arrLocs )
+
+ var distInObj = false
+ for ( var j = 0; j < arrLocs.length && distInObj == false; j++ ) {
+ var newDistance = Geo.distance( locArray( query.center ) , arrLocs[j] )
+ distInObj = ( newDistance >= retDistance - 0.0001 && newDistance <= retDistance + 0.0001 )
+ }
+
+ assert( distInObj )
+ assert.between( retDistance - 0.0001 , Geo.distance( locArray( query.center ), retLoc ), retDistance + 0.0001 )
+ assert.lte( retDistance, query.radius )
+ assert.gte( retDistance, distance )
+ distance = retDistance
+ }
+
+ }
+
+ // $polygon
print( "Polygon remove..." )
- t.remove( { "locs.loc" : { $within : { $polygon : query.boxPoly } }, "poly.docIn" : randYesQuery() } )
- assert.eq( results.poly.docsIn, t.getDB().getLastErrorObj().n )
-
+ res = t.remove({ "locs.loc": { $within: { $polygon: query.boxPoly }},
+ "poly.docIn": randYesQuery() });
+ assert.eq( results.poly.docsIn, res.nRemoved );
+
}
diff --git a/jstests/noPassthrough/geo_mnypts_plus_fields.js b/jstests/noPassthrough/geo_mnypts_plus_fields.js
index 53d33da4f29..7c5e23d4b97 100644
--- a/jstests/noPassthrough/geo_mnypts_plus_fields.js
+++ b/jstests/noPassthrough/geo_mnypts_plus_fields.js
@@ -12,7 +12,8 @@ for( var fields = 1; fields < maxFields; fields++ ){
coll.drop()
var totalPts = 500 * 1000
-
+
+ var bulk = coll.initializeUnorderedBulkOp();
// Add points in a 100x100 grid
for( var i = 0; i < totalPts; i++ ){
var ii = i % 10000
@@ -37,10 +38,11 @@ for( var fields = 1; fields < maxFields; fields++ ){
doc[ "field" + j ] = field
}
-
- coll.insert( doc )
+
+ bulk.insert( doc );
}
-
+ assert.writeOK(bulk.execute());
+
// Create the query for the additional fields
queryFields = {}
for( var j = 0; j < fields; j++ ){
diff --git a/jstests/noPassthrough/gle_after_split_failure_during_migration.js b/jstests/noPassthrough/gle_after_split_failure_during_migration.js
deleted file mode 100644
index 9d0a6a9ca2a..00000000000
--- a/jstests/noPassthrough/gle_after_split_failure_during_migration.js
+++ /dev/null
@@ -1,136 +0,0 @@
-/**
- * SERVER-4987 This test tries to check the getLastError call will still use
- * the same connection even if a split chunk triggered while doing inserts
- * failed (cause by StaleConfigException).
- *
- * TODO: SERVER-5175
- * This test relies on the corresponding delays inside (1) WriteBackListener::run
- * and (2) ShardStrategy::_insert and (3) receivedInsert from instance.cpp
- * to make the bug easier to manifest.
- *
- * The purpose of (1) is to make the writebacks slower so the failed inserts won't
- * be reapplied on time.
- *
- * The purpose of (2) is to make it easier for the moveChunk command from the other
- * mongos to interleave in between the moment the insert has set its shard version and
- * when in tries to autosplit (Note: it should be long enough to allow the moveChunk
- * to actually complete before it tries to proceed to autosplit).
- *
- * The purpose of (3) is to make sure that the insert won't get applied to the
- * shard right away so when a different connection is used to do the getLastError,
- * the write will still not be applied.
- */
-function testGleAfterSplitDuringMigration(){
- var st = new ShardingTest({ shards: 2, verbose: 2, mongos: 2,
- other: { chunksize: 1 }});
-
- // Stop the balancer to prevent it from contending with the distributed lock.
- st.stopBalancer();
-
- var DB_NAME = jsTest.name();
- var COLL_NAME = "coll";
-
- var mongos = st.s0;
- var confDB = mongos.getDB( "config" );
- var coll = mongos.getCollection( DB_NAME + "." + COLL_NAME );
-
- var shardConn = st.d0;
- var shardColl = shardConn.getCollection( coll.getFullName() );
-
- var data = "x";
- var dataSize = 1024 * 256; // bytes, must be power of 2
- while( data.length < dataSize ) data += data;
-
- // Shard collection
- st.shardColl( coll, { _id : 1 }, false );
-
- var docID = 0;
-
- /**
- * @return {Mongo} the connection object given the name of the shard.
- */
- var getShardConn = function( shardName ) {
- var shardLoc = confDB.shards.findOne({ _id: shardName }).host;
- return new Mongo( shardLoc );
- };
-
- /**
- * Inserts documents using a direct shard connection to the max key chunk
- * enough to make sure that it will trigger the auto split.
- *
- * variables from outer scope: docID, coll, confDB, data
- */
- var primeForSplitting = function() {
- var topChunk = confDB.chunks.find().sort({ max: -1 }).limit( 1 ).next();
- var shardLoc = getShardConn( topChunk.shard );
- var testColl = shardLoc.getCollection( coll.getFullName() );
-
- var superSaturatedChunkSize = 1024 * 1024 * 10; // 10MB
- var docsToSaturateChunkSize = superSaturatedChunkSize / dataSize;
-
- for ( var i = 0; i < docsToSaturateChunkSize; i++ ) {
- testColl.insert({ _id: docID++, val: data });
- }
-
- assert.eq( null, testColl.getDB().getLastError() );
- };
-
- /**
- * Moves a random chunk to a new shard using a different mongos.
- *
- * @param tries {Number} number of retry attempts when the moveChunk command
- * fails.
- *
- * variables from outer scope: coll, st
- */
- var moveRandomChunk = function( tries ) {
- var otherConfDB = st.s1.getDB( "config" );
- var chunksCursor = otherConfDB.chunks.find().sort({ max: 1 });
- var chunkCount = chunksCursor.count();
-
- var randIdx = Math.floor( Math.random() * chunkCount );
- // Don't get the chunk with max/min key
- randIdx = ( randIdx == chunkCount )? randIdx - 1 : randIdx;
- randIdx = ( randIdx == 0 )? randIdx + 1 : randIdx;
-
- var chunk = chunksCursor.arrayAccess( randIdx );
- var chunkOwner = chunk.shard;
- var newOwner = otherConfDB.shards.findOne({ _id: { $ne: chunkOwner }})._id;
-
- var result = otherConfDB.adminCommand({ moveChunk: coll.getFullName(),
- find: { _id: chunk.min._id },
- to: newOwner });
-
- jsTest.log( "moveChunk result: " + tojson( result ));
- if( !result.ok && tries > 1 ) {
- moveRandomChunk( tries - 1 );
- }
- };
-
- var chunks = 0;
- do {
- coll.insert({ _id: docID++, val: data });
- chunks = mongos.getDB( "config" ).chunks.find().count();
- } while ( chunks < 5 );
-
- primeForSplitting();
-
- jsTest.log( "Starting the insert that should trigger auto-split." );
-
- // TODO: SERVER-5175 Trigger delays here
- coll.insert({ _id: docID++, val: data });
- moveRandomChunk( 3 );
-
- // getLastError should wait for all writes to this connection.
- var errObj = coll.getDB().getLastErrorObj();
- jsTest.log( "Last Error Object: " + tojson( errObj ));
-
- assert.eq( docID, coll.find().itcount(), "Count does not match!" );
-
- jsTest.log( "Finished counting." );
-
- st.stop();
-}
-
-testGleAfterSplitDuringMigration();
-
diff --git a/jstests/noPassthrough/indexbg1.js b/jstests/noPassthrough/indexbg1.js
index d2c3ccac7e4..1a7ba4b3f4c 100644
--- a/jstests/noPassthrough/indexbg1.js
+++ b/jstests/noPassthrough/indexbg1.js
@@ -35,10 +35,11 @@ while( 1 ) { // if indexing finishes before we can run checks, try indexing w/ m
t = db[ baseName ];
t.drop();
+ var bulk = db.jstests_indexbg1.initializeUnorderedBulkOp();
for( i = 0; i < size; ++i ) {
- db.jstests_indexbg1.save( {i:i} );
+ bulk.insert({ i: i });
}
- db.getLastError();
+ assert.writeOK(bulk.execute());
assert.eq( size, t.count() );
doParallel( fullName + ".ensureIndex( {i:1}, {background:true} )" );
@@ -62,25 +63,16 @@ while( 1 ) { // if indexing finishes before we can run checks, try indexing w/ m
assert( ex.nscanned < 1000 , "took too long to find 100: " + tojson( ex ) );
- t.remove( {i:40}, true ); // table scan
- assert( !db.getLastError() );
-
- t.update( {i:10}, {i:-10} ); // should scan 10
- assert( !db.getLastError() );
+ assert.writeOK(t.remove({ i: 40 }, true )); // table scan
+ assert.writeOK(t.update({ i: 10 }, { i :-10 })); // should scan 10
id = t.find().hint( {$natural:-1} ).next()._id;
- t.update( {_id:id}, {i:-2} );
- assert( !db.getLastError() );
-
- t.save( {i:-50} );
- assert( !db.getLastError() );
-
- t.save( {i:size+2} );
- assert( !db.getLastError() );
+ assert.writeOK(t.update({ _id: id }, { i: -2 } ));
+ assert.writeOK(t.save({ i: -50 }));
+ assert.writeOK(t.save({ i: size + 2 }));
assert.eq( size + 1, t.count() );
- assert( !db.getLastError() );
print( "finished with checks" );
} catch( e ) {
@@ -113,10 +105,10 @@ assert.eq( 1, t.count( {i:-2} ) );
assert.eq( 1, t.count( {i:-50} ) );
assert.eq( 1, t.count( {i:size+2} ) );
assert.eq( 0, t.count( {i:40} ) );
-assert( !db.getLastError() );
print("about to drop index");
t.dropIndex( {i:1} );
-printjson( db.getLastError() );
-assert( !db.getLastError() );
+var gle = db.getLastError();
+printjson( gle );
+assert( !gle );
testServer.stop();
diff --git a/jstests/noPassthrough/indexbg2.js b/jstests/noPassthrough/indexbg2.js
index 0b5edc79aa2..fcdac89956d 100644
--- a/jstests/noPassthrough/indexbg2.js
+++ b/jstests/noPassthrough/indexbg2.js
@@ -49,9 +49,7 @@ doTest = function(dropDups) {
// wait for indexing to start
assert.soon(function() { return 2 == db.system.indexes.count({ ns: "test." + baseName }) }, "no index created", 30000, 50);
t.save({ i: 0, n: true });
- //printjson(db.getLastError());
t.save({ i: size - 1, n: true });
- //printjson(db.getLastError());
} catch (e) {
// only a failure if we're still indexing
// wait for parallel status to update to reflect indexing status
diff --git a/jstests/noPassthrough/query_yield1.js b/jstests/noPassthrough/query_yield1.js
index 624215f8c45..7c168c1e208 100644
--- a/jstests/noPassthrough/query_yield1.js
+++ b/jstests/noPassthrough/query_yield1.js
@@ -14,9 +14,11 @@ q = function(){ var x=this.n; for ( var i=0; i<250; i++ ){ x = x * 2; } return f
while ( true ){
function fill(){
+ var bulk = t.initializeUnorderedBulkOp();
for ( ; i<N; i++ ){
- t.insert( { _id : i , n : 1 } )
+ bulk.insert({ _id: i, n: 1 });
}
+ assert.writeOK(bulk.execute());
}
function timeQuery(){
@@ -58,7 +60,7 @@ num = 0;
start = new Date();
biggestMe = 0;
while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ){
- var me = Date.timeFunc( function(){ t.insert( { x : 1 } ); db.getLastError(); } )
+ var me = Date.timeFunc( function(){ t.insert( { x : 1 } ); });
var x = db.currentOp()
if ( num++ == 0 ){
@@ -84,4 +86,4 @@ assert.eq( 0 , x.inprog.length , "weird 2" );
testServer.stop();
-} \ No newline at end of file
+}
diff --git a/jstests/noPassthrough/query_yield2.js b/jstests/noPassthrough/query_yield2.js
index 71ce4535aa6..b2262d6b357 100644
--- a/jstests/noPassthrough/query_yield2.js
+++ b/jstests/noPassthrough/query_yield2.js
@@ -26,9 +26,11 @@ print( "Shell ==== Creating test.query_yield2 collection ..." );
print( "Shell ==== Adding documents until a time-wasting query takes over 2 seconds to complete" );
while ( true ){
function fill() {
+ var bulk = t.initializeUnorderedBulkOp();
for ( ; i < N; ++i ) {
- t.insert( { _id : i , n : 1 } )
+ bulk.insert({ _id: i , n: 1 });
}
+ assert.writeOK(bulk.execute());
}
function timeQuery() {
return Date.timeFunc(
@@ -100,7 +102,7 @@ while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ) {
if ( num == 0 ) {
print( "Shell ==== Starting loop " + num + ", inserting 1 document" );
}
- insertTime = Date.timeFunc( function() { t.insert( { x : 1 } ); db.getLastError(); } );
+ insertTime = Date.timeFunc( function() { t.insert({ x: 1 } ); });
currentOp = db.currentOp();
len = currentOp.inprog.length;
print( "Shell ==== Time to insert document " + num + " was " + insertTime + " ms, db.currentOp().inprog.length is " + len );
@@ -133,4 +135,4 @@ if ( len != 0 ) {
print( "Shell ==== Test completed successfully, shutting down server" );
testServer.stop();
-} \ No newline at end of file
+}
diff --git a/jstests/noPassthrough/repair2.js b/jstests/noPassthrough/repair2.js
index 6f57ac0d45f..e80a3edf02b 100644
--- a/jstests/noPassthrough/repair2.js
+++ b/jstests/noPassthrough/repair2.js
@@ -9,8 +9,8 @@ t = testServer.getDB( baseName )[ baseName ];
t.drop();
function protect( f ) {
- try {
- f();
+ try {
+ f();
} catch( e ) {
printjson( e );
}
@@ -19,16 +19,17 @@ function protect( f ) {
s = startParallelShell( "db = db.getSisterDB( '" + baseName + "'); for( i = 0; i < 10; ++i ) { db.repairDatabase(); sleep( 5000 ); }" );
for( i = 0; i < 30; ++i ) {
-
- for( j = 0; j < 5000; ++j ) {
- protect( function() { t.insert( {_id:j} ); } );
+ var bulk = t.initializeUnorderedBulkOp();
+ for( j = 0; j < 5000; ++j ) {
+ bulk.insert({ _id: j } );
}
- for( j = 0; j < 5000; ++j ) {
- protect( function() { t.remove( {_id:j} ); } );
+ for( j = 0; j < 5000; ++j ) {
+ bulk.find({ _id: j }).remove();
}
-
- assert.eq( 0, t.count() );
+
+ assert.writeOK(bulk.execute());
+ assert.eq( 0, t.count() );
}
diff --git a/jstests/noPassthrough/sync1.js b/jstests/noPassthrough/sync1.js
deleted file mode 100644
index 490d2a53c5a..00000000000
--- a/jstests/noPassthrough/sync1.js
+++ /dev/null
@@ -1,49 +0,0 @@
-
-test = new SyncCCTest( "sync1" )
-
-db = test.conn.getDB( "test" )
-t = db.sync1
-t.save( { x : 1 } )
-assert.eq( 1 , t.find().itcount() , "A1" );
-assert.eq( 1 , t.find().count() , "A2" );
-t.save( { x : 2 } )
-assert.eq( 2 , t.find().itcount() , "A3" );
-assert.eq( 2 , t.find().count() , "A4" );
-
-test.checkHashes( "test" , "A3" );
-
-test.tempKill();
-assert.throws( function(){ t.save( { x : 3 } ); } , null , "B1" );
-// It's ok even for some of the mongod to be unreachable for read-only cmd
-assert.eq( 2, t.find({}).count() );
-// It's NOT ok for some of the mongod to be unreachable for write cmd
-assert.throws( function(){ t.getDB().runCommand({ profile: 1 }); });
-assert.eq( 2 , t.find().itcount() , "B2" );
-test.tempStart();
-test.checkHashes( "test" , "B3" );
-
-// Trying killing the second mongod
-test.tempKill( 1 );
-assert.throws( function(){ t.save( { x : 3 } ); } );
-// It's ok even for some of the mongod to be unreachable for read-only cmd
-assert.eq( 2, t.find({}).count() );
-// It's NOT ok for some of the mongod to be unreachable for write cmd
-assert.throws( function(){ t.getDB().runCommand({ profile: 1 }); });
-assert.eq( 2 , t.find().itcount() );
-test.tempStart( 1 );
-
-assert.eq( 2 , t.find().itcount() , "C1" );
-assert.soon( function(){
- try {
- t.remove( { x : 1 } )
- return true;
- }
- catch ( e ){
- print( e );
- }
- return false;
-} )
-t.find().forEach( printjson )
-assert.eq( 1 , t.find().itcount() , "C2" );
-
-test.stop();
diff --git a/jstests/noPassthrough/sync4.js b/jstests/noPassthrough/sync4.js
deleted file mode 100644
index 6733f07089d..00000000000
--- a/jstests/noPassthrough/sync4.js
+++ /dev/null
@@ -1,19 +0,0 @@
-
-test = new SyncCCTest( "sync4" )
-
-db = test.conn.getDB( "test" )
-t = db.sync4
-
-for ( i=0; i<1000; i++ ){
- t.insert( { _id : i , x : "asdasdsdasdas" } )
-}
-db.getLastError();
-
-test.checkHashes( "test" , "A0" );
-assert.eq( 1000 , t.find().count() , "A1" )
-assert.eq( 1000 , t.find().itcount() , "A2" )
-assert.eq( 1000 , t.find().snapshot().batchSize(10).itcount() , "A2" )
-
-
-
-test.stop();
diff --git a/jstests/noPassthrough/sync8.js b/jstests/noPassthrough/sync8.js
deleted file mode 100644
index 241ad655569..00000000000
--- a/jstests/noPassthrough/sync8.js
+++ /dev/null
@@ -1,13 +0,0 @@
-// Test for SERVER-11492 - make sure that upserting a new document reports n:1 in GLE
-
-var test = new SyncCCTest( "sync1" );
-
-var db = test.conn.getDB( "test" );
-var t = db.sync8;
-t.remove({});
-
-t.update({_id:1}, {$set:{a:1}}, true);
-var le = db.getLastErrorObj();
-assert.eq(1, le.n);
-
-test.stop();
diff --git a/jstests/noPassthrough/update_server-5552.js b/jstests/noPassthrough/update_server-5552.js
index d1f1bcb518b..c164ba67694 100644
--- a/jstests/noPassthrough/update_server-5552.js
+++ b/jstests/noPassthrough/update_server-5552.js
@@ -1,5 +1,3 @@
-
-
load( "jstests/libs/slow_weekly_util.js" )
testServer = new SlowWeeklyMongod( "update_server-5552" )
db = testServer.getDB( "test" );
@@ -9,9 +7,11 @@ t.drop()
N = 10000;
-for ( i=0; i<N; i++ )
- t.insert( { _id : i , x : 1 } )
-db.getLastError();
+var bulk = t.initializeUnorderedBulkOp();
+for ( i=0; i<N; i++ ) {
+ bulk.insert({ _id: i, x: 1 });
+}
+assert.writeOK(bulk.execute());
join = startParallelShell( "while( db.foo.findOne( { _id : 0 } ).x == 1 ); db.foo.ensureIndex( { x : 1 } );" )
diff --git a/jstests/noPassthrough/update_yield1.js b/jstests/noPassthrough/update_yield1.js
index db684a6d6eb..98437414600 100644
--- a/jstests/noPassthrough/update_yield1.js
+++ b/jstests/noPassthrough/update_yield1.js
@@ -12,16 +12,17 @@ var i = 0;
while ( true ){
var fill = function() {
+ var bulk = t.initializeUnorderedBulkOp();
for ( ; i<N; i++ ){
- t.insert( { _id : i , n : 1 } );
+ bulk.insert({ _id: i, n: 1 });
}
+ assert.writeOK(bulk.execute());
};
var timeUpdate = function() {
return Date.timeFunc(
function(){
t.update( {} , { $inc : { n : 1 } } , false , true );
- var r = db.getLastErrorObj();
}
);
};
@@ -48,7 +49,7 @@ function haveInProgressUpdate() {
// --- test 1
-var join = startParallelShell( "db.update_yield1.update( {} , { $inc : { n : 1 } } , false , true ); db.getLastError()" );
+var join = startParallelShell( "db.update_yield1.update( {}, { $inc: { n: 1 }}, false, true );" );
assert.soon(haveInProgressUpdate, "never doing update");
var num = 0;