diff options
author | Romans Kasperovics <romans.kasperovics@mongodb.com> | 2022-04-12 16:43:51 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2022-04-12 22:54:31 +0000 |
commit | f285f0af3d8d1448db1abafcaf4506a96af9e511 (patch) | |
tree | 4b72f79e7899358c8a882588ccf4017ff41354d7 /jstests/sharding | |
parent | fd75059c83cfa4be0225bd03b9c96a21aea39887 (diff) | |
download | mongo-f285f0af3d8d1448db1abafcaf4506a96af9e511.tar.gz |
SERVER-63208 Make allowDiskUse opt-out rather than opt-in
Diffstat (limited to 'jstests/sharding')
-rw-r--r-- | jstests/sharding/in_memory_sort_limit.js | 16 | ||||
-rw-r--r-- | jstests/sharding/query/javascript_heap_limit.js | 9 | ||||
-rw-r--r-- | jstests/sharding/query/sharded_graph_lookup_execution.js | 12 | ||||
-rw-r--r-- | jstests/sharding/query/sharded_lookup_execution.js | 70 |
4 files changed, 59 insertions, 48 deletions
diff --git a/jstests/sharding/in_memory_sort_limit.js b/jstests/sharding/in_memory_sort_limit.js index edc5d1ac7c0..6be5d38f013 100644 --- a/jstests/sharding/in_memory_sort_limit.js +++ b/jstests/sharding/in_memory_sort_limit.js @@ -35,21 +35,21 @@ var failLimit = 4000; // Test on MongoD jsTestLog("Test no error with limit of " + passLimit + " on mongod"); -assert.eq(passLimit, shardCol.find().sort({x: 1}).limit(passLimit).itcount()); +assert.eq(passLimit, shardCol.find().sort({x: 1}).allowDiskUse(false).limit(passLimit).itcount()); jsTestLog("Test error with limit of " + failLimit + " on mongod"); -assert.throws(function() { - shardCol.find().sort({x: 1}).limit(failLimit).itcount(); -}); +assert.throwsWithCode( + () => shardCol.find().sort({x: 1}).allowDiskUse(false).limit(failLimit).itcount(), + ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed); // Test on MongoS jsTestLog("Test no error with limit of " + passLimit + " on mongos"); -assert.eq(passLimit, mongosCol.find().sort({x: 1}).limit(passLimit).itcount()); +assert.eq(passLimit, mongosCol.find().sort({x: 1}).allowDiskUse(false).limit(passLimit).itcount()); jsTestLog("Test error with limit of " + failLimit + " on mongos"); -assert.throws(function() { - mongosCol.find().sort({x: 1}).limit(failLimit).itcount(); -}); +assert.throwsWithCode( + () => mongosCol.find().sort({x: 1}).allowDiskUse(false).limit(failLimit).itcount(), + ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed); st.stop(); })(); diff --git a/jstests/sharding/query/javascript_heap_limit.js b/jstests/sharding/query/javascript_heap_limit.js index 32762653f8d..6d2ae116201 100644 --- a/jstests/sharding/query/javascript_heap_limit.js +++ b/jstests/sharding/query/javascript_heap_limit.js @@ -46,7 +46,8 @@ const aggregateWithJSFunction = { pipeline: [ {$group: {_id: "$x"}}, {$project: {y: {"$function": {args: [], body: allocateLargeString, lang: "js"}}}} - ] + ], + allowDiskUse: false }; const aggregateWithInternalJsReduce = { aggregate: "coll", @@ -61,7 +62,8 @@ const aggregateWithInternalJsReduce = { } } } - }] + }], + allowDiskUse: false }; const aggregateWithUserDefinedAccumulator = { aggregate: "coll", @@ -79,7 +81,8 @@ const aggregateWithUserDefinedAccumulator = { } } } - }] + }], + allowDiskUse: false }; const findWithJavaScriptFunction = { find: "coll", diff --git a/jstests/sharding/query/sharded_graph_lookup_execution.js b/jstests/sharding/query/sharded_graph_lookup_execution.js index 4ed51bbc2b0..51e18a36e88 100644 --- a/jstests/sharding/query/sharded_graph_lookup_execution.js +++ b/jstests/sharding/query/sharded_graph_lookup_execution.js @@ -481,7 +481,7 @@ st.shardColl( st.shardColl( airfieldsColl, {airfield: 1}, {airfield: "LHR"}, {airfield: "LHR"}, mongosDB.getName()); -assert.commandWorked(mongosDB.createView("airportsView", airportsColl.getName(), +assert.commandWorked(mongosDB.createView("airportsView", airportsColl.getName(), [{$lookup: { from: "airfields", localField: "airport", @@ -579,7 +579,10 @@ expectedRes = [ } ]; assertGraphLookupExecution( - pipeline, {comment: "sharded_to_sharded_on_mongos_targeted"}, expectedRes, [{ + pipeline, + {comment: "sharded_to_sharded_on_mongos_targeted", allowDiskUse: false}, + expectedRes, + [{ // Because the $graphLookup is after a $group that requires merging, it is executed on // mongos. toplevelExec: [0, 0], @@ -595,7 +598,10 @@ st.shardColl(airportsColl, {_id: 1}, {_id: 1}, {_id: 1}, mongosDB.getName()); st.shardColl( travelersColl, {firstName: 1}, {firstName: "Bob"}, {firstName: "Bob"}, mongosDB.getName()); assertGraphLookupExecution( - pipeline, {comment: "sharded_to_sharded_on_mongos_untargeted"}, expectedRes, [{ + pipeline, + {comment: "sharded_to_sharded_on_mongos_untargeted", allowDiskUse: false}, + expectedRes, + [{ // Because the $graphLookup is after a $group that requires merging, it is executed on // mongos. toplevelExec: [0, 0], diff --git a/jstests/sharding/query/sharded_lookup_execution.js b/jstests/sharding/query/sharded_lookup_execution.js index 7ffc854be78..293229f65f7 100644 --- a/jstests/sharding/query/sharded_lookup_execution.js +++ b/jstests/sharding/query/sharded_lookup_execution.js @@ -418,7 +418,7 @@ st.shardColl(updatesColl, {original_review_id: 1}, mongosDB.getName()); -assert.commandWorked(mongosDB.createView("reviewsView", reviewsColl.getName(), +assert.commandWorked(mongosDB.createView("reviewsView", reviewsColl.getName(), [{$lookup: { from: "updates", let: {review_id: "$_id"}, @@ -491,32 +491,34 @@ expectedRes = [ {_id: "shirt", reviews: [{comment: "meh"}]} ]; -assertLookupExecution(pipeline, {comment: "sharded_to_sharded_on_mongos_targeted"}, { - results: expectedRes, - // Because the $lookup is after a $group that requires merging, the $lookup stage is executed on - // mongos. - toplevelExec: [0, 0], - mongosMerger: true, - // For every document that flows through the $lookup stage, the mongos executing the $lookup - // will target the shard that holds the relevant data for the sharded foreign collection. - subpipelineExec: [0, 2] -}); +assertLookupExecution( + pipeline, {comment: "sharded_to_sharded_on_mongos_targeted", allowDiskUse: false}, { + results: expectedRes, + // Because the $lookup is after a $group that requires merging, the $lookup stage is + // executed on mongos. + toplevelExec: [0, 0], + mongosMerger: true, + // For every document that flows through the $lookup stage, the mongos executing the $lookup + // will target the shard that holds the relevant data for the sharded foreign collection. + subpipelineExec: [0, 2] + }); // Test that an untargeted $lookup on a sharded collection can execute correctly on mongos. st.shardColl(ordersColl, {_id: 1}, {_id: 1}, {_id: 1}, mongosDB.getName()); st.shardColl(reviewsColl, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName()); -assertLookupExecution(pipeline, {comment: "sharded_to_sharded_on_mongos_untargeted"}, { - results: expectedRes, - // Because the $lookup is after a $group that requires merging, the $lookup stage is executed on - // mongos. - toplevelExec: [0, 0], - mongosMerger: true, - // For every document that flows through the $lookup stage, the mongos executing the $lookup - // will perform a scatter-gather query and open a cursor on every shard that contains the - // foreign collection. - subpipelineExec: [2, 2] -}); +assertLookupExecution( + pipeline, {comment: "sharded_to_sharded_on_mongos_untargeted", allowDiskUse: false}, { + results: expectedRes, + // Because the $lookup is after a $group that requires merging, the $lookup stage is + // executed on mongos. + toplevelExec: [0, 0], + mongosMerger: true, + // For every document that flows through the $lookup stage, the mongos executing the $lookup + // will perform a scatter-gather query and open a cursor on every shard that contains the + // foreign collection. + subpipelineExec: [2, 2] + }); // Test that a targeted $lookup on a sharded collection can execute correctly when mongos delegates // to a merging shard. @@ -548,7 +550,7 @@ assertLookupExecution( randomlyDelegatedMerger: true, // For every document that flows through the $lookup stage, the node executing the $lookup // will perform a scatter-gather query and open a cursor on every shard that contains the - // foreign collection. + // foreign collection. subpipelineExec: [2, 2] }); @@ -609,19 +611,19 @@ pipeline = [ // To make sure that there is a non-correlated pipeline prefix, we will match on "name" instead // of _id to prevent the $match stage from being optimized before the $group. {$lookup: { - from: "reviews", - let: {customer_product_name: "$products._id"}, + from: "reviews", + let: {customer_product_name: "$products._id"}, pipeline: [ - {$group: + {$group: {_id: "$product_id", avg_stars: {$avg: "$stars"}, name: {$first: "$product_id"}} }, {$match: {$expr: {$eq: ["$name", "$$customer_product_name"]}}}, ], as: "avg_review"}}, {$unwind: {path: "$avg_review", preserveNullAndEmptyArrays: true}}, - {$group: + {$group: { - _id: "$_id", + _id: "$_id", products: {$push: {_id: "$products._id", avg_review: "$avg_review.avg_stars"}} } } @@ -651,18 +653,18 @@ pipeline = [ // To make sure that there is a non-correlated pipeline prefix, we will match on "name" instead // of _id to prevent the $match stage from being optimized before the $group. {$lookup: { - from: "reviews", - let: {customer_product_name: "$products._id"}, + from: "reviews", + let: {customer_product_name: "$products._id"}, pipeline: [ - {$group: - {_id: "$product_id", avg_stars: {$avg: "$stars"}, name: {$first: "$product_id"}}}, + {$group: + {_id: "$product_id", avg_stars: {$avg: "$stars"}, name: {$first: "$product_id"}}}, {$match: {$expr: {$eq: ["$name", "$$customer_product_name"]}}}, ], as: "avg_review"}}, {$unwind: {path: "$avg_review", preserveNullAndEmptyArrays: true}}, - {$group: + {$group: { - _id: "$_id", + _id: "$_id", products: {$push: {_id: "$products._id", avg_review: "$avg_review.avg_stars"}} } } |