summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRomans Kasperovics <romans.kasperovics@mongodb.com>2022-04-12 16:43:51 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-04-12 22:54:31 +0000
commitf285f0af3d8d1448db1abafcaf4506a96af9e511 (patch)
tree4b72f79e7899358c8a882588ccf4017ff41354d7
parentfd75059c83cfa4be0225bd03b9c96a21aea39887 (diff)
downloadmongo-f285f0af3d8d1448db1abafcaf4506a96af9e511.tar.gz
SERVER-63208 Make allowDiskUse opt-out rather than opt-in
-rw-r--r--jstests/aggregation/accumulators/accumulator_js_size_limits.js45
-rw-r--r--jstests/aggregation/sources/lookup/lookup_collation.js10
-rw-r--r--jstests/aggregation/sources/setWindowFields/memory_limit.js25
-rw-r--r--jstests/aggregation/spill_to_disk.js12
-rw-r--r--jstests/core/explain_execution_error.js19
-rw-r--r--jstests/core/sortb.js13
-rw-r--r--jstests/core/sortg.js8
-rw-r--r--jstests/core/sortj.js6
-rw-r--r--jstests/core/views/views_aggregation.js10
-rw-r--r--jstests/noPassthrough/external_sort_find.js26
-rw-r--r--jstests/noPassthrough/lookup_pushdown.js8
-rw-r--r--jstests/noPassthrough/mr_disk_use.js43
-rw-r--r--jstests/noPassthrough/plan_cache_list_failed_plans.js1
-rw-r--r--jstests/noPassthrough/plan_cache_replan_group_lookup.js3
-rw-r--r--jstests/noPassthrough/plan_cache_replan_sort.js2
-rw-r--r--jstests/noPassthrough/query_knobs_validation.js4
-rw-r--r--jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js1
-rw-r--r--jstests/noPassthrough/timeseries_internal_bounded_sort_spilling.js6
-rw-r--r--jstests/noPassthrough/views_count_distinct_disk_use.js45
-rw-r--r--jstests/noPassthroughWithMongod/find_and_modify_server16469.js2
-rw-r--r--jstests/sharding/in_memory_sort_limit.js16
-rw-r--r--jstests/sharding/query/javascript_heap_limit.js9
-rw-r--r--jstests/sharding/query/sharded_graph_lookup_execution.js12
-rw-r--r--jstests/sharding/query/sharded_lookup_execution.js70
-rw-r--r--src/mongo/db/SConscript1
-rw-r--r--src/mongo/db/commands/find_cmd.cpp3
-rw-r--r--src/mongo/db/commands/map_reduce_agg.cpp7
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp2
-rw-r--r--src/mongo/db/exec/disk_use_options.idl43
-rw-r--r--src/mongo/db/query/query_request_helper.cpp2
-rw-r--r--src/mongo/db/query/query_request_test.cpp3
-rw-r--r--src/mongo/shell/assert.js1
-rw-r--r--src/mongo/shell/query.js4
33 files changed, 319 insertions, 143 deletions
diff --git a/jstests/aggregation/accumulators/accumulator_js_size_limits.js b/jstests/aggregation/accumulators/accumulator_js_size_limits.js
index 10c6686deba..84e31cf218a 100644
--- a/jstests/aggregation/accumulators/accumulator_js_size_limits.js
+++ b/jstests/aggregation/accumulators/accumulator_js_size_limits.js
@@ -5,8 +5,8 @@
const coll = db.accumulator_js_size_limits;
-function runExample(groupKey, accumulatorSpec) {
- return coll.runCommand({
+function runExample(groupKey, accumulatorSpec, aggregateOptions = {}) {
+ const aggregateCmd = {
aggregate: coll.getName(),
cursor: {},
pipeline: [{
@@ -15,7 +15,8 @@ function runExample(groupKey, accumulatorSpec) {
accumulatedField: {$accumulator: accumulatorSpec},
}
}]
- });
+ };
+ return coll.runCommand(Object.assign(aggregateCmd, aggregateOptions));
}
// Accumulator tries to create too long a String; it can't be serialized to BSON.
@@ -90,24 +91,26 @@ assert(coll.drop());
assert.commandWorked(coll.insert(Array.from({length: 200}, (_, i) => ({_id: i}))));
// By grouping on _id, each group contains only 1 document. This means it creates many
// AccumulatorState instances.
-res = runExample("$_id", {
- init: function() {
- // Each accumulator state is big enough to be expensive, but not big enough to hit the BSON
- // size limit.
- return "a".repeat(1 * 1024 * 1024);
- },
- accumulate: function(state) {
- return state;
- },
- accumulateArgs: [1],
- merge: function(state1, state2) {
- return state1;
- },
- finalize: function(state) {
- return state.length;
- },
- lang: 'js',
-});
+res = runExample("$_id",
+ {
+ init: function() {
+ // Each accumulator state is big enough to be expensive, but not big enough
+ // to hit the BSON size limit.
+ return "a".repeat(1 * 1024 * 1024);
+ },
+ accumulate: function(state) {
+ return state;
+ },
+ accumulateArgs: [1],
+ merge: function(state1, state2) {
+ return state1;
+ },
+ finalize: function(state) {
+ return state.length;
+ },
+ lang: 'js',
+ },
+ {allowDiskUse: false});
assert.commandFailedWithCode(res, [ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed]);
// Verify that having large number of documents doesn't cause the $accumulator to run out of memory.
diff --git a/jstests/aggregation/sources/lookup/lookup_collation.js b/jstests/aggregation/sources/lookup/lookup_collation.js
index 3b55a32f91a..4af61a41234 100644
--- a/jstests/aggregation/sources/lookup/lookup_collation.js
+++ b/jstests/aggregation/sources/lookup/lookup_collation.js
@@ -99,7 +99,7 @@ let explain;
extraErrorMsg: " Default collation on local, running: " + tojson(lookupInto)
});
- results = collAA.aggregate([lookupInto(collAa)]).toArray();
+ results = collAA.aggregate([lookupInto(collAa)], {allowDiskUse: false}).toArray();
assertArrayEq({
actual: results,
expected: resultCaseInsensitive,
@@ -209,10 +209,10 @@ let explain;
collAa.explain().aggregate([lookupInto(collAa_indexed)], {collation: caseInsensitive});
assertIndexJoinStrategy(explain);
- // If no index is compatible with the requested collation, nested loop join will be chosen
- // instead.
- explain =
- collAa.explain().aggregate([lookupInto(collAa_indexed)], {collation: {locale: "fr"}});
+ // If no index is compatible with the requested collation and disk use is not allowed,
+ // nested loop join will be chosen instead.
+ explain = collAa.explain().aggregate([lookupInto(collAa_indexed)],
+ {collation: {locale: "fr"}, allowDiskUse: false});
assertNestedLoopJoinStrategy(explain);
// Stage-level collation overrides collection-level and command-level collations.
diff --git a/jstests/aggregation/sources/setWindowFields/memory_limit.js b/jstests/aggregation/sources/setWindowFields/memory_limit.js
index 437aa37d654..e64781d3eca 100644
--- a/jstests/aggregation/sources/setWindowFields/memory_limit.js
+++ b/jstests/aggregation/sources/setWindowFields/memory_limit.js
@@ -14,9 +14,8 @@ const coll = db[jsTestName()];
coll.drop();
// Test that we can set the memory limit.
-setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()),
- "internalDocumentSourceSetWindowFieldsMaxMemoryBytes",
- 1200);
+const nonConfigNodes = DiscoverTopology.findNonConfigNodes(db.getMongo());
+setParameterOnAllHosts(nonConfigNodes, "internalDocumentSourceSetWindowFieldsMaxMemoryBytes", 1200);
// Create a collection with enough documents in a single partition to go over the memory limit.
const docsPerPartition = 10;
@@ -27,21 +26,24 @@ for (let i = 0; i < docsPerPartition; i++) {
assert.commandFailedWithCode(coll.runCommand({
aggregate: coll.getName(),
pipeline: [{$setWindowFields: {sortBy: {partitionKey: 1}, output: {val: {$sum: "$_id"}}}}],
- cursor: {}
+ cursor: {},
+ allowDiskUse: false
}),
5643011);
// The same query passes with a higher memory limit. Note that the amount of memory consumed by the
// stage is roughly double the size of the documents since each document has an internal cache.
const perDocSize = 1200;
-setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()),
+setParameterOnAllHosts(nonConfigNodes,
"internalDocumentSourceSetWindowFieldsMaxMemoryBytes",
(perDocSize * docsPerPartition * 3) + 1024);
assert.commandWorked(coll.runCommand({
aggregate: coll.getName(),
pipeline: [{$setWindowFields: {sortBy: {partitionKey: 1}, output: {val: {$sum: "$largeStr"}}}}],
- cursor: {}
+ cursor: {},
+ allowDiskUse: false
}));
+
// The query passes with multiple partitions of the same size.
for (let i = docsPerPartition; i < docsPerPartition * 2; i++) {
assert.commandWorked(coll.insert({_id: i, partitionKey: 2, largeStr: Array(1025).toString()}));
@@ -55,7 +57,8 @@ assert.commandWorked(coll.runCommand({
output: {val: {$sum: "$largeStr"}}
}
}],
- cursor: {}
+ cursor: {},
+ allowDiskUse: false
}));
// Test that the query fails with a window function that stores documents.
@@ -68,11 +71,11 @@ assert.commandFailedWithCode(coll.runCommand({
output: {val: {$max: "$largeStr", window: {documents: [-9, 9]}}}
}
}],
- cursor: {}
+ cursor: {},
+ allowDiskUse: false
}),
5414201);
// Reset limit for other tests.
-setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()),
- "internalDocumentSourceSetWindowFieldsMaxMemoryBytes",
- 100 * 1024 * 1024);
+setParameterOnAllHosts(
+ nonConfigNodes, "internalDocumentSourceSetWindowFieldsMaxMemoryBytes", 100 * 1024 * 1024);
})();
diff --git a/jstests/aggregation/spill_to_disk.js b/jstests/aggregation/spill_to_disk.js
index c9be71aadc0..e89a1a8ef31 100644
--- a/jstests/aggregation/spill_to_disk.js
+++ b/jstests/aggregation/spill_to_disk.js
@@ -36,23 +36,15 @@ for (let i = 0; i < memoryLimitMB + 1; i++)
assert.gt(coll.stats().size, memoryLimitMB * 1024 * 1024);
function test({pipeline, expectedCodes, canSpillToDisk}) {
- // Test that by default we error out if exceeding memory limit.
- assert.commandFailedWithCode(
- db.runCommand({aggregate: coll.getName(), pipeline: pipeline, cursor: {}}), expectedCodes);
-
// Test that 'allowDiskUse: false' does indeed prevent spilling to disk.
assert.commandFailedWithCode(
db.runCommand(
{aggregate: coll.getName(), pipeline: pipeline, cursor: {}, allowDiskUse: false}),
expectedCodes);
- // Test that allowDiskUse only supports bool. In particular, numbers aren't allowed.
- assert.commandFailed(db.runCommand(
- {aggregate: coll.getName(), pipeline: pipeline, cursor: {}, allowDiskUse: 1}));
-
// If this command supports spilling to disk, ensure that it will succeed when disk use is
// allowed.
- let res = db.runCommand(
+ const res = db.runCommand(
{aggregate: coll.getName(), pipeline: pipeline, cursor: {}, allowDiskUse: true});
if (canSpillToDisk) {
assert.eq(new DBCommandCursor(coll.getDB(), res).itcount(),
@@ -158,6 +150,7 @@ test({
expectedCodes: ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed,
canSpillToDisk: true
});
+
test({
pipeline: [{$sort: {random: 1}}, {$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}],
expectedCodes: ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed,
@@ -172,6 +165,7 @@ test({
[ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed, ErrorCodes.ExceededMemoryLimit],
canSpillToDisk: false
});
+
test({
pipeline:
[{$group: {_id: null, bigArray: {$addToSet: {$concat: ['$bigStr', {$toString: "$_id"}]}}}}],
diff --git a/jstests/core/explain_execution_error.js b/jstests/core/explain_execution_error.js
index 43ea960e8d3..6f11a555ba3 100644
--- a/jstests/core/explain_execution_error.js
+++ b/jstests/core/explain_execution_error.js
@@ -80,13 +80,12 @@ for (var i = 0; i < 120 * numShards; i++) {
// A query which sorts the whole collection by "b" should throw an error due to hitting the
// memory limit for sort.
-assert.throws(function() {
- t.find({a: {$exists: true}}).sort({b: 1}).itcount();
-});
+assert.throwsWithCode(() => t.find({a: {$exists: true}}).sort({b: 1}).allowDiskUse(false).itcount(),
+ ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
// Explain of this query should succeed at query planner verbosity.
result = db.runCommand({
- explain: {find: t.getName(), filter: {a: {$exists: true}}, sort: {b: 1}},
+ explain: {find: t.getName(), filter: {a: {$exists: true}}, sort: {b: 1}, allowDiskUse: false},
verbosity: "queryPlanner"
});
assert.commandWorked(result);
@@ -95,7 +94,7 @@ assert("queryPlanner" in result);
// Explaining the same query at execution stats verbosity should succeed, but indicate that the
// underlying operation failed.
result = db.runCommand({
- explain: {find: t.getName(), filter: {a: {$exists: true}}, sort: {b: 1}},
+ explain: {find: t.getName(), filter: {a: {$exists: true}}, sort: {b: 1}, allowDiskUse: false},
verbosity: "executionStats"
});
assert.commandWorked(result);
@@ -105,7 +104,7 @@ assertExecError(result);
// The underlying operation should also report a failure at allPlansExecution verbosity.
result = db.runCommand({
- explain: {find: t.getName(), filter: {a: {$exists: true}}, sort: {b: 1}},
+ explain: {find: t.getName(), filter: {a: {$exists: true}}, sort: {b: 1}, allowDiskUse: false},
verbosity: "allPlansExecution"
});
assert.commandWorked(result);
@@ -121,19 +120,19 @@ t.createIndex({c: 1});
// The query should no longer fail with a memory limit error because the planner can obtain
// the sort by scanning an index.
-assert.eq(40, t.find({c: {$lt: 40}}).sort({b: 1}).itcount());
+assert.eq(40, t.find({c: {$lt: 40}}).sort({b: 1}).allowDiskUse(false).itcount());
// The explain should succeed at all verbosity levels because the query itself succeeds.
// First test "queryPlanner" verbosity.
result = db.runCommand({
- explain: {find: t.getName(), filter: {c: {$lt: 40}}, sort: {b: 1}},
+ explain: {find: t.getName(), filter: {c: {$lt: 40}}, sort: {b: 1}, allowDiskUse: false},
verbosity: "queryPlanner"
});
assert.commandWorked(result);
assert("queryPlanner" in result);
result = db.runCommand({
- explain: {find: t.getName(), filter: {c: {$lt: 40}}, sort: {b: 1}},
+ explain: {find: t.getName(), filter: {c: {$lt: 40}}, sort: {b: 1}, allowDiskUse: false},
verbosity: "executionStats"
});
assert.commandWorked(result);
@@ -143,7 +142,7 @@ assertExecSuccess(result);
// We expect allPlansExecution verbosity to show execution stats for both candidate plans.
result = db.runCommand({
- explain: {find: t.getName(), filter: {c: {$lt: 40}}, sort: {b: 1}},
+ explain: {find: t.getName(), filter: {c: {$lt: 40}}, sort: {b: 1}, allowDiskUse: false},
verbosity: "allPlansExecution"
});
assert.commandWorked(result);
diff --git a/jstests/core/sortb.js b/jstests/core/sortb.js
index 7f24592e84a..7c6abe340b4 100644
--- a/jstests/core/sortb.js
+++ b/jstests/core/sortb.js
@@ -31,11 +31,12 @@ for (; i < 200 + numLargeDocumentsToWrite; ++i) {
t.save({a: i, b: i});
}
-assert.throws(function() {
- t.find().sort({a: -1}).hint({b: 1}).limit(100).itcount();
-});
-assert.throws(function() {
- t.find().sort({a: -1}).hint({b: 1}).showDiskLoc().limit(100).itcount();
-});
+assert.throwsWithCode(
+ () => t.find().sort({a: -1}).allowDiskUse(false).hint({b: 1}).limit(100).itcount(),
+ ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
+assert.throwsWithCode(
+ () =>
+ t.find().sort({a: -1}).allowDiskUse(false).hint({b: 1}).showDiskLoc().limit(100).itcount(),
+ ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
t.drop();
})();
diff --git a/jstests/core/sortg.js b/jstests/core/sortg.js
index 81603a580dc..7e6186af097 100644
--- a/jstests/core/sortg.js
+++ b/jstests/core/sortg.js
@@ -25,15 +25,15 @@ for (i = 0; i < 110; ++i) {
function memoryException(sortSpec, querySpec) {
querySpec = querySpec || {};
- var ex = assert.throws(function() {
- t.find(querySpec).sort(sortSpec).batchSize(1000).itcount();
- });
+ var ex = assert.throwsWithCode(
+ () => t.find(querySpec).sort(sortSpec).allowDiskUse(false).batchSize(1000).itcount(),
+ ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
assert(ex.toString().match(/Sort/));
}
function noMemoryException(sortSpec, querySpec) {
querySpec = querySpec || {};
- t.find(querySpec).sort(sortSpec).batchSize(1000).itcount();
+ t.find(querySpec).sort(sortSpec).allowDiskUse(false).batchSize(1000).itcount();
}
// Unindexed sorts.
diff --git a/jstests/core/sortj.js b/jstests/core/sortj.js
index a710a74bc5c..43773b5ede6 100644
--- a/jstests/core/sortj.js
+++ b/jstests/core/sortj.js
@@ -17,8 +17,8 @@ for (let i = 0; i < 1200 * numShards; ++i) {
t.save({a: 1, b: big});
}
-assert.throws(function() {
- t.find({a: {$gte: 0}, c: null}).sort({d: 1}).itcount();
-});
+assert.throwsWithCode(
+ () => t.find({a: {$gte: 0}, c: null}).sort({d: 1}).allowDiskUse(false).itcount(),
+ ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
t.drop();
})();
diff --git a/jstests/core/views/views_aggregation.js b/jstests/core/views/views_aggregation.js
index fd1277da546..50b2edfd4a7 100644
--- a/jstests/core/views/views_aggregation.js
+++ b/jstests/core/views/views_aggregation.js
@@ -133,18 +133,24 @@ assert.commandWorked(viewsDB.runCommand({
assertErrorCode(viewsDB.largeColl,
[{$sort: {x: -1}}],
ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed,
- "Expected in-memory sort to fail due to excessive memory usage");
+ "Expected in-memory sort to fail due to excessive memory usage",
+ {allowDiskUse: false});
viewsDB.largeView.drop();
assert.commandWorked(viewsDB.createView("largeView", "largeColl", []));
assertErrorCode(viewsDB.largeView,
[{$sort: {x: -1}}],
ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed,
- "Expected in-memory sort to fail due to excessive memory usage");
+ "Expected in-memory sort to fail due to excessive memory usage",
+ {allowDiskUse: false});
assert.commandWorked(
viewsDB.runCommand(
{aggregate: "largeView", pipeline: [{$sort: {x: -1}}], cursor: {}, allowDiskUse: true}),
"Expected aggregate to succeed since 'allowDiskUse' was specified");
+
+ assert.commandWorked(
+ viewsDB.runCommand({aggregate: "largeView", pipeline: [{$sort: {x: -1}}], cursor: {}}),
+ "Expected aggregate to succeed since 'allowDiskUse' is true by default");
})();
// Test explain modes on a view.
diff --git a/jstests/noPassthrough/external_sort_find.js b/jstests/noPassthrough/external_sort_find.js
index d9e13603290..a1505f129a3 100644
--- a/jstests/noPassthrough/external_sort_find.js
+++ b/jstests/noPassthrough/external_sort_find.js
@@ -1,6 +1,5 @@
/**
- * Test that the find command can spill to disk while executing a blocking sort, if the client
- * explicitly allows disk usage.
+ * Test that the find command can spill to disk while executing a blocking sort.
*/
(function() {
"use strict";
@@ -15,7 +14,7 @@ const kNumDocsWithinMemLimit = 70;
const kNumDocsExceedingMemLimit = 100;
const options = {
- setParameter: "internalQueryMaxBlockingSortMemoryUsageBytes=" + kMaxMemoryUsageBytes
+ setParameter: {internalQueryMaxBlockingSortMemoryUsageBytes: kMaxMemoryUsageBytes}
};
const conn = MongoRunner.runMongod(options);
assert.neq(null, conn, "mongod was unable to start up with options: " + tojson(options));
@@ -37,15 +36,14 @@ for (let i = 0; i < kNumDocsWithinMemLimit; ++i) {
}
// We should be able to successfully sort the collection with or without disk use allowed.
-assert.eq(kNumDocsWithinMemLimit, collection.find().sort({sequenceNumber: -1}).itcount());
assert.eq(kNumDocsWithinMemLimit,
- collection.find().sort({sequenceNumber: -1}).allowDiskUse().itcount());
+ collection.find().sort({sequenceNumber: -1}).allowDiskUse(false).itcount());
+assert.eq(kNumDocsWithinMemLimit,
+ collection.find().sort({sequenceNumber: -1}).allowDiskUse(true).itcount());
function getFindSortStats(allowDiskUse) {
let cursor = collection.find().sort({sequenceNumber: -1});
- if (allowDiskUse) {
- cursor = cursor.allowDiskUse();
- }
+ cursor = cursor.allowDiskUse(allowDiskUse);
const stageName = isSBEEnabled ? "sort" : "SORT";
const explain = cursor.explain("executionStats");
return getPlanStage(explain.executionStats.executionStages, stageName);
@@ -89,7 +87,8 @@ for (let i = kNumDocsWithinMemLimit; i < kNumDocsExceedingMemLimit; ++i) {
// The sort should fail if disk use is not allowed, but succeed if disk use is allowed.
assert.commandFailedWithCode(
- testDb.runCommand({find: collection.getName(), sort: {sequenceNumber: -1}}),
+ testDb.runCommand(
+ {find: collection.getName(), sort: {sequenceNumber: -1}, allowDiskUse: false}),
ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
assert.eq(kNumDocsExceedingMemLimit,
collection.find().sort({sequenceNumber: -1}).allowDiskUse().itcount());
@@ -116,7 +115,11 @@ assert.eq(sortStats.usedDisk, true);
// If disk use is not allowed but there is a limit, we should be able to avoid exceeding the memory
// limit.
assert.eq(kNumDocsWithinMemLimit,
- collection.find().sort({sequenceNumber: -1}).limit(kNumDocsWithinMemLimit).itcount());
+ collection.find()
+ .sort({sequenceNumber: -1})
+ .allowDiskUse(false)
+ .limit(kNumDocsWithinMemLimit)
+ .itcount());
// Create a view on top of the collection. When a find command is run against the view without disk
// use allowed, the command should fail with the expected error code. When the find command allows
@@ -124,7 +127,8 @@ assert.eq(kNumDocsWithinMemLimit,
assert.commandWorked(testDb.createView("identityView", collection.getName(), []));
const identityView = testDb.identityView;
assert.commandFailedWithCode(
- testDb.runCommand({find: identityView.getName(), sort: {sequenceNumber: -1}}),
+ testDb.runCommand(
+ {find: identityView.getName(), sort: {sequenceNumber: -1}, allowDiskUse: false}),
ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
assert.eq(kNumDocsExceedingMemLimit,
identityView.find().sort({sequenceNumber: -1}).allowDiskUse().itcount());
diff --git a/jstests/noPassthrough/lookup_pushdown.js b/jstests/noPassthrough/lookup_pushdown.js
index a918b571543..4734d5ff033 100644
--- a/jstests/noPassthrough/lookup_pushdown.js
+++ b/jstests/noPassthrough/lookup_pushdown.js
@@ -17,7 +17,8 @@ const JoinAlgorithm = {
};
// Standalone cases.
-const conn = MongoRunner.runMongod({setParameter: "featureFlagSBELookupPushdown=true"});
+const conn = MongoRunner.runMongod(
+ {setParameter: {featureFlagSBELookupPushdown: true, allowDiskUseByDefault: false}});
assert.neq(null, conn, "mongod was unable to start up");
const name = "lookup_pushdown";
const foreignCollName = "foreign_lookup_pushdown";
@@ -674,7 +675,10 @@ MongoRunner.stopMongod(conn);
const st = new ShardingTest({
shards: 2,
mongos: 1,
- other: {shardOptions: {setParameter: "featureFlagSBELookupPushdown=true"}}
+ other: {
+ shardOptions:
+ {setParameter: {featureFlagSBELookupPushdown: true, allowDiskUseByDefault: false}}
+ }
});
db = st.s.getDB(name);
diff --git a/jstests/noPassthrough/mr_disk_use.js b/jstests/noPassthrough/mr_disk_use.js
new file mode 100644
index 00000000000..f2d178aa9c2
--- /dev/null
+++ b/jstests/noPassthrough/mr_disk_use.js
@@ -0,0 +1,43 @@
+// Test mapReduce use with different values of the allowDiskUseByDefault parameter.
+
+(function() {
+"use strict";
+
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
+
+const db = conn.getDB("test");
+const coll = db.getCollection(jsTestName());
+coll.drop();
+
+const memoryLimitMb = 1;
+const largeStr = "A".repeat(1024 * 1024); // 1MB string
+
+// Create a collection exceeding the memory limit.
+for (let i = 0; i < memoryLimitMb + 1; ++i)
+ assert.commandWorked(coll.insert({largeStr: largeStr}));
+
+const mapReduceCmd = {
+ mapReduce: coll.getName(),
+ map: function() {
+ emit("a", this.largeStr);
+ },
+ reduce: function(k, v) {
+ return 42;
+ },
+ out: {inline: 1}
+};
+
+assert.commandWorked(db.adminCommand(
+ {setParameter: 1, internalDocumentSourceGroupMaxMemoryBytes: memoryLimitMb * 1024 * 1024}));
+
+assert.commandWorked(db.adminCommand({setParameter: 1, allowDiskUseByDefault: false}));
+assert.commandFailedWithCode(db.runCommand(mapReduceCmd),
+ ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
+
+assert.commandWorked(db.adminCommand({setParameter: 1, allowDiskUseByDefault: true}));
+const res = assert.commandWorked(db.runCommand(mapReduceCmd));
+assert.eq(res.results[0], {_id: "a", value: 42}, res);
+
+MongoRunner.stopMongod(conn);
+})(); \ No newline at end of file
diff --git a/jstests/noPassthrough/plan_cache_list_failed_plans.js b/jstests/noPassthrough/plan_cache_list_failed_plans.js
index e1fb5161c71..696237a3248 100644
--- a/jstests/noPassthrough/plan_cache_list_failed_plans.js
+++ b/jstests/noPassthrough/plan_cache_list_failed_plans.js
@@ -23,6 +23,7 @@ const numDocs = 32;
const smallNumber = 10;
assert.commandWorked(testDB.adminCommand(
{setParameter: 1, internalQueryMaxBlockingSortMemoryUsageBytes: smallNumber}));
+assert.commandWorked(testDB.adminCommand({setParameter: 1, allowDiskUseByDefault: false}));
for (let i = 0; i < numDocs * 2; ++i)
assert.commandWorked(coll.insert({a: ((i >= (numDocs * 2) - smallNumber) ? 1 : 0), d: i}));
diff --git a/jstests/noPassthrough/plan_cache_replan_group_lookup.js b/jstests/noPassthrough/plan_cache_replan_group_lookup.js
index 149d8beaf2e..de6d9c1a42c 100644
--- a/jstests/noPassthrough/plan_cache_replan_group_lookup.js
+++ b/jstests/noPassthrough/plan_cache_replan_group_lookup.js
@@ -159,7 +159,8 @@ testFn(aLookup,
bLookup,
createLookupForeignColl,
dropLookupForeignColl,
- (pipeline) => verifyCorrectLookupAlgorithmUsed("NestedLoopJoin", pipeline));
+ (pipeline) =>
+ verifyCorrectLookupAlgorithmUsed("NestedLoopJoin", pipeline, {allowDiskUse: false}));
// INLJ.
testFn(aLookup,
diff --git a/jstests/noPassthrough/plan_cache_replan_sort.js b/jstests/noPassthrough/plan_cache_replan_sort.js
index 86ac3ca8bdc..55398c422ea 100644
--- a/jstests/noPassthrough/plan_cache_replan_sort.js
+++ b/jstests/noPassthrough/plan_cache_replan_sort.js
@@ -12,7 +12,7 @@ load("jstests/libs/analyze_plan.js");
load("jstests/libs/profiler.js");
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-const conn = MongoRunner.runMongod();
+const conn = MongoRunner.runMongod({setParameter: {allowDiskUseByDefault: false}});
const db = conn.getDB("test");
const coll = db.plan_cache_replan_sort;
coll.drop();
diff --git a/jstests/noPassthrough/query_knobs_validation.js b/jstests/noPassthrough/query_knobs_validation.js
index 511ce3251e4..216b6c97e32 100644
--- a/jstests/noPassthrough/query_knobs_validation.js
+++ b/jstests/noPassthrough/query_knobs_validation.js
@@ -57,6 +57,7 @@ const expectedParamDefaults = {
internalQueryMaxNumberOfFieldsToChooseFilteredColumnScan: 12,
internalQueryFLERewriteMemoryLimit: 14 * 1024 * 1024,
internalQueryDisableLookupExecutionUsingHashJoin: false,
+ allowDiskUseByDefault: true,
};
function assertDefaultParameterValues() {
@@ -239,6 +240,9 @@ assertSetParameterSucceeds("internalQueryMaxNumberOfFieldsToChooseFilteredColumn
assertSetParameterSucceeds("internalQueryMaxNumberOfFieldsToChooseFilteredColumnScan", 0);
assertSetParameterFails("internalQueryMaxNumberOfFieldsToChooseFilteredColumnScan", -1);
+assertSetParameterSucceeds("allowDiskUseByDefault", false);
+assertSetParameterSucceeds("allowDiskUseByDefault", true);
+
assertSetParameterSucceeds("internalQueryFLERewriteMemoryLimit", 14 * 1024 * 1024);
assertSetParameterFails("internalQueryFLERewriteMemoryLimit", 0);
diff --git a/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js b/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js
index 6f148307a98..7142a4bd6ea 100644
--- a/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js
+++ b/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js
@@ -35,6 +35,7 @@ const paramList = [
{name: "internalQueryCollectionMaxDataSizeBytesToChooseHashJoin", value: 100},
{name: "internalQueryCollectionMaxStorageSizeBytesToChooseHashJoin", value: 100},
{name: "internalQueryDisableLookupExecutionUsingHashJoin", value: true},
+ {name: "allowDiskUseByDefault", value: false},
{name: "internalQueryMaxNumberOfFieldsToChooseUnfilteredColumnScan", value: 100},
{name: "internalQueryMaxNumberOfFieldsToChooseFilteredColumnScan", value: 100},
];
diff --git a/jstests/noPassthrough/timeseries_internal_bounded_sort_spilling.js b/jstests/noPassthrough/timeseries_internal_bounded_sort_spilling.js
index ffb9b948628..bd5d7e9bcca 100644
--- a/jstests/noPassthrough/timeseries_internal_bounded_sort_spilling.js
+++ b/jstests/noPassthrough/timeseries_internal_bounded_sort_spilling.js
@@ -72,7 +72,8 @@ function assertSorted(result) {
{$_internalInhibitOptimization: {}},
{$sort: {t: 1}},
],
- cursor: {}
+ cursor: {},
+ allowDiskUse: false
}),
ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
@@ -88,7 +89,8 @@ function assertSorted(result) {
}
},
],
- cursor: {}
+ cursor: {},
+ allowDiskUse: false
}),
ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
}
diff --git a/jstests/noPassthrough/views_count_distinct_disk_use.js b/jstests/noPassthrough/views_count_distinct_disk_use.js
new file mode 100644
index 00000000000..a8c5167c0b2
--- /dev/null
+++ b/jstests/noPassthrough/views_count_distinct_disk_use.js
@@ -0,0 +1,45 @@
+// Test count and distinct on views use with different values of the allowDiskUseByDefault
+// parameter.
+
+(function() {
+"use strict";
+
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
+
+const viewsDB = conn.getDB(jsTestName());
+viewsDB.largeColl.drop();
+
+const memoryLimitMb = 1;
+const largeStr = "A".repeat(1024 * 1024); // 1MB string
+
+// Create a collection exceeding the memory limit.
+for (let i = 0; i < memoryLimitMb + 1; ++i)
+ assert.commandWorked(viewsDB.largeColl.insert({x: i, largeStr: largeStr}));
+
+viewsDB.largeView.drop();
+assert.commandWorked(viewsDB.createView("largeView", "largeColl", [{$sort: {x: -1}}]));
+
+function testDiskUse(cmd) {
+ assert.commandWorked(viewsDB.adminCommand({setParameter: 1, allowDiskUseByDefault: false}));
+ assert.commandFailedWithCode(viewsDB.runCommand(cmd),
+ ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
+
+ assert.commandWorked(viewsDB.adminCommand({setParameter: 1, allowDiskUseByDefault: true}));
+ assert.commandWorked(viewsDB.runCommand(cmd));
+}
+
+// The 'count' command executes the view definition pipeline containing the '$sort' stage. This
+// stage needs to spill to disk if the memory limit is reached.
+assert.commandWorked(viewsDB.adminCommand(
+ {setParameter: 1, internalQueryMaxBlockingSortMemoryUsageBytes: memoryLimitMb * 1024 * 1024}));
+testDiskUse({count: "largeView"});
+
+// The 'distinct' command involves '$groupBy' stage. This stage needs to spill to disk if the memory
+// limit is reached.
+assert.commandWorked(viewsDB.adminCommand(
+ {setParameter: 1, internalDocumentSourceGroupMaxMemoryBytes: memoryLimitMb * 1024 * 1024}));
+testDiskUse({distinct: "largeView", key: "largeStr"});
+
+MongoRunner.stopMongod(conn);
+})();
diff --git a/jstests/noPassthroughWithMongod/find_and_modify_server16469.js b/jstests/noPassthroughWithMongod/find_and_modify_server16469.js
index b1b7ceae613..40bd862e111 100644
--- a/jstests/noPassthroughWithMongod/find_and_modify_server16469.js
+++ b/jstests/noPassthroughWithMongod/find_and_modify_server16469.js
@@ -16,6 +16,7 @@ var oldSortLimit = result.internalQueryMaxBlockingSortMemoryUsageBytes;
var newSortLimit = 1024 * 1024;
assert.commandWorked(
db.adminCommand({setParameter: 1, internalQueryMaxBlockingSortMemoryUsageBytes: newSortLimit}));
+assert.commandWorked(db.adminCommand({setParameter: 1, allowDiskUseByDefault: false}));
try {
// Insert ~3MB of data.
@@ -41,6 +42,7 @@ try {
assert.eq(result.value.b, 0);
} finally {
// Restore the orginal sort memory limit.
+ assert.commandWorked(db.adminCommand({setParameter: 1, allowDiskUseByDefault: true}));
assert.commandWorked(db.adminCommand(
{setParameter: 1, internalQueryMaxBlockingSortMemoryUsageBytes: oldSortLimit}));
}
diff --git a/jstests/sharding/in_memory_sort_limit.js b/jstests/sharding/in_memory_sort_limit.js
index edc5d1ac7c0..6be5d38f013 100644
--- a/jstests/sharding/in_memory_sort_limit.js
+++ b/jstests/sharding/in_memory_sort_limit.js
@@ -35,21 +35,21 @@ var failLimit = 4000;
// Test on MongoD
jsTestLog("Test no error with limit of " + passLimit + " on mongod");
-assert.eq(passLimit, shardCol.find().sort({x: 1}).limit(passLimit).itcount());
+assert.eq(passLimit, shardCol.find().sort({x: 1}).allowDiskUse(false).limit(passLimit).itcount());
jsTestLog("Test error with limit of " + failLimit + " on mongod");
-assert.throws(function() {
- shardCol.find().sort({x: 1}).limit(failLimit).itcount();
-});
+assert.throwsWithCode(
+ () => shardCol.find().sort({x: 1}).allowDiskUse(false).limit(failLimit).itcount(),
+ ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
// Test on MongoS
jsTestLog("Test no error with limit of " + passLimit + " on mongos");
-assert.eq(passLimit, mongosCol.find().sort({x: 1}).limit(passLimit).itcount());
+assert.eq(passLimit, mongosCol.find().sort({x: 1}).allowDiskUse(false).limit(passLimit).itcount());
jsTestLog("Test error with limit of " + failLimit + " on mongos");
-assert.throws(function() {
- mongosCol.find().sort({x: 1}).limit(failLimit).itcount();
-});
+assert.throwsWithCode(
+ () => mongosCol.find().sort({x: 1}).allowDiskUse(false).limit(failLimit).itcount(),
+ ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
st.stop();
})();
diff --git a/jstests/sharding/query/javascript_heap_limit.js b/jstests/sharding/query/javascript_heap_limit.js
index 32762653f8d..6d2ae116201 100644
--- a/jstests/sharding/query/javascript_heap_limit.js
+++ b/jstests/sharding/query/javascript_heap_limit.js
@@ -46,7 +46,8 @@ const aggregateWithJSFunction = {
pipeline: [
{$group: {_id: "$x"}},
{$project: {y: {"$function": {args: [], body: allocateLargeString, lang: "js"}}}}
- ]
+ ],
+ allowDiskUse: false
};
const aggregateWithInternalJsReduce = {
aggregate: "coll",
@@ -61,7 +62,8 @@ const aggregateWithInternalJsReduce = {
}
}
}
- }]
+ }],
+ allowDiskUse: false
};
const aggregateWithUserDefinedAccumulator = {
aggregate: "coll",
@@ -79,7 +81,8 @@ const aggregateWithUserDefinedAccumulator = {
}
}
}
- }]
+ }],
+ allowDiskUse: false
};
const findWithJavaScriptFunction = {
find: "coll",
diff --git a/jstests/sharding/query/sharded_graph_lookup_execution.js b/jstests/sharding/query/sharded_graph_lookup_execution.js
index 4ed51bbc2b0..51e18a36e88 100644
--- a/jstests/sharding/query/sharded_graph_lookup_execution.js
+++ b/jstests/sharding/query/sharded_graph_lookup_execution.js
@@ -481,7 +481,7 @@ st.shardColl(
st.shardColl(
airfieldsColl, {airfield: 1}, {airfield: "LHR"}, {airfield: "LHR"}, mongosDB.getName());
-assert.commandWorked(mongosDB.createView("airportsView", airportsColl.getName(),
+assert.commandWorked(mongosDB.createView("airportsView", airportsColl.getName(),
[{$lookup: {
from: "airfields",
localField: "airport",
@@ -579,7 +579,10 @@ expectedRes = [
}
];
assertGraphLookupExecution(
- pipeline, {comment: "sharded_to_sharded_on_mongos_targeted"}, expectedRes, [{
+ pipeline,
+ {comment: "sharded_to_sharded_on_mongos_targeted", allowDiskUse: false},
+ expectedRes,
+ [{
// Because the $graphLookup is after a $group that requires merging, it is executed on
// mongos.
toplevelExec: [0, 0],
@@ -595,7 +598,10 @@ st.shardColl(airportsColl, {_id: 1}, {_id: 1}, {_id: 1}, mongosDB.getName());
st.shardColl(
travelersColl, {firstName: 1}, {firstName: "Bob"}, {firstName: "Bob"}, mongosDB.getName());
assertGraphLookupExecution(
- pipeline, {comment: "sharded_to_sharded_on_mongos_untargeted"}, expectedRes, [{
+ pipeline,
+ {comment: "sharded_to_sharded_on_mongos_untargeted", allowDiskUse: false},
+ expectedRes,
+ [{
// Because the $graphLookup is after a $group that requires merging, it is executed on
// mongos.
toplevelExec: [0, 0],
diff --git a/jstests/sharding/query/sharded_lookup_execution.js b/jstests/sharding/query/sharded_lookup_execution.js
index 7ffc854be78..293229f65f7 100644
--- a/jstests/sharding/query/sharded_lookup_execution.js
+++ b/jstests/sharding/query/sharded_lookup_execution.js
@@ -418,7 +418,7 @@ st.shardColl(updatesColl,
{original_review_id: 1},
mongosDB.getName());
-assert.commandWorked(mongosDB.createView("reviewsView", reviewsColl.getName(),
+assert.commandWorked(mongosDB.createView("reviewsView", reviewsColl.getName(),
[{$lookup: {
from: "updates",
let: {review_id: "$_id"},
@@ -491,32 +491,34 @@ expectedRes = [
{_id: "shirt", reviews: [{comment: "meh"}]}
];
-assertLookupExecution(pipeline, {comment: "sharded_to_sharded_on_mongos_targeted"}, {
- results: expectedRes,
- // Because the $lookup is after a $group that requires merging, the $lookup stage is executed on
- // mongos.
- toplevelExec: [0, 0],
- mongosMerger: true,
- // For every document that flows through the $lookup stage, the mongos executing the $lookup
- // will target the shard that holds the relevant data for the sharded foreign collection.
- subpipelineExec: [0, 2]
-});
+assertLookupExecution(
+ pipeline, {comment: "sharded_to_sharded_on_mongos_targeted", allowDiskUse: false}, {
+ results: expectedRes,
+ // Because the $lookup is after a $group that requires merging, the $lookup stage is
+ // executed on mongos.
+ toplevelExec: [0, 0],
+ mongosMerger: true,
+ // For every document that flows through the $lookup stage, the mongos executing the $lookup
+ // will target the shard that holds the relevant data for the sharded foreign collection.
+ subpipelineExec: [0, 2]
+ });
// Test that an untargeted $lookup on a sharded collection can execute correctly on mongos.
st.shardColl(ordersColl, {_id: 1}, {_id: 1}, {_id: 1}, mongosDB.getName());
st.shardColl(reviewsColl, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName());
-assertLookupExecution(pipeline, {comment: "sharded_to_sharded_on_mongos_untargeted"}, {
- results: expectedRes,
- // Because the $lookup is after a $group that requires merging, the $lookup stage is executed on
- // mongos.
- toplevelExec: [0, 0],
- mongosMerger: true,
- // For every document that flows through the $lookup stage, the mongos executing the $lookup
- // will perform a scatter-gather query and open a cursor on every shard that contains the
- // foreign collection.
- subpipelineExec: [2, 2]
-});
+assertLookupExecution(
+ pipeline, {comment: "sharded_to_sharded_on_mongos_untargeted", allowDiskUse: false}, {
+ results: expectedRes,
+ // Because the $lookup is after a $group that requires merging, the $lookup stage is
+ // executed on mongos.
+ toplevelExec: [0, 0],
+ mongosMerger: true,
+ // For every document that flows through the $lookup stage, the mongos executing the $lookup
+ // will perform a scatter-gather query and open a cursor on every shard that contains the
+ // foreign collection.
+ subpipelineExec: [2, 2]
+ });
// Test that a targeted $lookup on a sharded collection can execute correctly when mongos delegates
// to a merging shard.
@@ -548,7 +550,7 @@ assertLookupExecution(
randomlyDelegatedMerger: true,
// For every document that flows through the $lookup stage, the node executing the $lookup
// will perform a scatter-gather query and open a cursor on every shard that contains the
- // foreign collection.
+ // foreign collection.
subpipelineExec: [2, 2]
});
@@ -609,19 +611,19 @@ pipeline = [
// To make sure that there is a non-correlated pipeline prefix, we will match on "name" instead
// of _id to prevent the $match stage from being optimized before the $group.
{$lookup: {
- from: "reviews",
- let: {customer_product_name: "$products._id"},
+ from: "reviews",
+ let: {customer_product_name: "$products._id"},
pipeline: [
- {$group:
+ {$group:
{_id: "$product_id", avg_stars: {$avg: "$stars"}, name: {$first: "$product_id"}}
},
{$match: {$expr: {$eq: ["$name", "$$customer_product_name"]}}},
],
as: "avg_review"}},
{$unwind: {path: "$avg_review", preserveNullAndEmptyArrays: true}},
- {$group:
+ {$group:
{
- _id: "$_id",
+ _id: "$_id",
products: {$push: {_id: "$products._id", avg_review: "$avg_review.avg_stars"}}
}
}
@@ -651,18 +653,18 @@ pipeline = [
// To make sure that there is a non-correlated pipeline prefix, we will match on "name" instead
// of _id to prevent the $match stage from being optimized before the $group.
{$lookup: {
- from: "reviews",
- let: {customer_product_name: "$products._id"},
+ from: "reviews",
+ let: {customer_product_name: "$products._id"},
pipeline: [
- {$group:
- {_id: "$product_id", avg_stars: {$avg: "$stars"}, name: {$first: "$product_id"}}},
+ {$group:
+ {_id: "$product_id", avg_stars: {$avg: "$stars"}, name: {$first: "$product_id"}}},
{$match: {$expr: {$eq: ["$name", "$$customer_product_name"]}}},
],
as: "avg_review"}},
{$unwind: {path: "$avg_review", preserveNullAndEmptyArrays: true}},
- {$group:
+ {$group:
{
- _id: "$_id",
+ _id: "$_id",
products: {$push: {_id: "$products._id", avg_review: "$avg_review.avg_stars"}}
}
}
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index c81942bc35f..d562fb7cbc1 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -1444,6 +1444,7 @@ env.Library(
'exec/count.cpp',
'exec/count_scan.cpp',
'exec/delete_stage.cpp',
+ 'exec/disk_use_options.idl',
'exec/distinct_scan.cpp',
'exec/eof.cpp',
'exec/fetch.cpp',
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 83f8e864648..fc81cb6904d 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/commands/test_commands_enabled.h"
#include "mongo/db/cursor_manager.h"
#include "mongo/db/db_raii.h"
+#include "mongo/db/exec/disk_use_options_gen.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/fle_crud.h"
#include "mongo/db/matcher/extensions_callback_real.h"
@@ -158,7 +159,7 @@ boost::intrusive_ptr<ExpressionContext> makeExpressionContext(
verbosity,
false, // fromMongos
false, // needsMerge
- findCommand.getAllowDiskUse(),
+ findCommand.getAllowDiskUse().value_or(allowDiskUseByDefault.load()),
false, // bypassDocumentValidation
false, // isMapReduceCommand
findCommand.getNamespaceOrUUID().nss().value_or(NamespaceString()),
diff --git a/src/mongo/db/commands/map_reduce_agg.cpp b/src/mongo/db/commands/map_reduce_agg.cpp
index b75a18a4a77..29db45b92f8 100644
--- a/src/mongo/db/commands/map_reduce_agg.cpp
+++ b/src/mongo/db/commands/map_reduce_agg.cpp
@@ -44,6 +44,7 @@
#include "mongo/db/commands/mr_common.h"
#include "mongo/db/curop.h"
#include "mongo/db/db_raii.h"
+#include "mongo/db/exec/disk_use_options_gen.h"
#include "mongo/db/exec/document_value/value.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/pipeline/document_source_cursor.h"
@@ -90,9 +91,9 @@ auto makeExpressionContext(OperationContext* opCtx,
auto expCtx = make_intrusive<ExpressionContext>(
opCtx,
verbosity,
- false, // fromMongos
- false, // needsmerge
- true, // allowDiskUse
+ false, // fromMongos
+ false, // needsMerge
+ allowDiskUseByDefault.load(), // allowDiskUse
parsedMr.getBypassDocumentValidation().get_value_or(false),
true, // isMapReduceCommand
parsedMr.getNamespace(),
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index 127898b875e..9cda7fc605e 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -46,6 +46,7 @@
#include "mongo/db/curop.h"
#include "mongo/db/cursor_manager.h"
#include "mongo/db/db_raii.h"
+#include "mongo/db/exec/disk_use_options_gen.h"
#include "mongo/db/exec/document_value/document.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/fle_crud.h"
@@ -441,6 +442,7 @@ boost::intrusive_ptr<ExpressionContext> makeExpressionContext(
expCtx->tempDir = storageGlobalParams.dbpath + "/_tmp";
expCtx->collationMatchesDefault = collationMatchesDefault;
expCtx->forPerShardCursor = request.getPassthroughToShard().has_value();
+ expCtx->allowDiskUse = request.getAllowDiskUse().value_or(allowDiskUseByDefault.load());
// If the request specified v2 resume tokens for change streams, set this on the expCtx. On 6.0
// we only expect this to occur during testing.
diff --git a/src/mongo/db/exec/disk_use_options.idl b/src/mongo/db/exec/disk_use_options.idl
new file mode 100644
index 00000000000..3aaecab634f
--- /dev/null
+++ b/src/mongo/db/exec/disk_use_options.idl
@@ -0,0 +1,43 @@
+# Copyright (C) 2019-present MongoDB, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the Server Side Public License, version 1,
+# as published by MongoDB, Inc.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Server Side Public License for more details.
+#
+# You should have received a copy of the Server Side Public License
+# along with this program. If not, see
+# <http://www.mongodb.com/licensing/server-side-public-license>.
+#
+# As a special exception, the copyright holders give permission to link the
+# code of portions of this program with the OpenSSL library under certain
+# conditions as described in each individual source file and distribute
+# linked combinations including the program with the OpenSSL library. You
+# must comply with the Server Side Public License in all respects for
+# all of the code used other than as permitted herein. If you modify file(s)
+# with this exception, you may extend this exception to your version of the
+# file(s), but you are not obligated to do so. If you do not wish to do so,
+# delete this exception statement from your version. If you delete this
+# exception statement from all source files in the program, then also delete
+# it in the license file.
+#
+
+global:
+ cpp_namespace: "mongo"
+ cpp_includes:
+ - "mongo/db/query/sbe_plan_cache_on_parameter_change.h"
+
+server_parameters:
+
+ allowDiskUseByDefault:
+ description: "Allow queries which exceed their memory budget to spill to disk. This option can
+ be overriden at the per-query level."
+ set_at: [ startup, runtime ]
+ cpp_varname: "allowDiskUseByDefault"
+ cpp_vartype: AtomicWord<bool>
+ default: true
+ on_update: plan_cache_util::clearSbeCacheOnParameterChange
diff --git a/src/mongo/db/query/query_request_helper.cpp b/src/mongo/db/query/query_request_helper.cpp
index 8203e897cb8..410c05fcaf0 100644
--- a/src/mongo/db/query/query_request_helper.cpp
+++ b/src/mongo/db/query/query_request_helper.cpp
@@ -371,7 +371,7 @@ StatusWith<BSONObj> asAggregationCommand(const FindCommandRequest& findCommand)
aggregationBuilder.append(FindCommandRequest::kUnwrappedReadPrefFieldName,
findCommand.getUnwrappedReadPref());
}
- if (findCommand.getAllowDiskUse()) {
+ if (findCommand.getAllowDiskUse().has_value()) {
aggregationBuilder.append(FindCommandRequest::kAllowDiskUseFieldName,
static_cast<bool>(findCommand.getAllowDiskUse()));
}
diff --git a/src/mongo/db/query/query_request_test.cpp b/src/mongo/db/query/query_request_test.cpp
index 20e618eff17..80ed0325802 100644
--- a/src/mongo/db/query/query_request_test.cpp
+++ b/src/mongo/db/query/query_request_test.cpp
@@ -1273,6 +1273,7 @@ TEST(QueryRequestTest, ConvertToAggregationSucceeds) {
auto ar = aggregation_request_helper::parseFromBSONForTests(testns, aggCmd);
ASSERT_OK(ar.getStatus());
ASSERT(!ar.getValue().getExplain());
+ ASSERT(!ar.getValue().getAllowDiskUse().has_value());
ASSERT(ar.getValue().getPipeline().empty());
ASSERT_EQ(ar.getValue().getCursor().getBatchSize().value_or(
aggregation_request_helper::kDefaultBatchSize),
@@ -1515,6 +1516,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithAllowDiskUseTrueSucceeds) {
auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db(), agg.getValue()).body;
auto ar = aggregation_request_helper::parseFromBSONForTests(testns, aggCmd);
ASSERT_OK(ar.getStatus());
+ ASSERT(ar.getValue().getAllowDiskUse().has_value());
ASSERT_EQ(true, ar.getValue().getAllowDiskUse());
}
@@ -1527,6 +1529,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithAllowDiskUseFalseSucceeds) {
auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db(), agg.getValue()).body;
auto ar = aggregation_request_helper::parseFromBSONForTests(testns, aggCmd);
ASSERT_OK(ar.getStatus());
+ ASSERT(ar.getValue().getAllowDiskUse().has_value());
ASSERT_EQ(false, ar.getValue().getAllowDiskUse());
}
diff --git a/src/mongo/shell/assert.js b/src/mongo/shell/assert.js
index bf6be589aa2..ff168bd865d 100644
--- a/src/mongo/shell/assert.js
+++ b/src/mongo/shell/assert.js
@@ -596,6 +596,7 @@ assert = (function() {
msg,
"[" + tojson(error.code) + "] != [" + tojson(expectedCode) + "] are not equal"));
}
+ return error;
};
assert.doesNotThrow = function(func, params, msg) {
diff --git a/src/mongo/shell/query.js b/src/mongo/shell/query.js
index ac863469bd2..a5d920cb109 100644
--- a/src/mongo/shell/query.js
+++ b/src/mongo/shell/query.js
@@ -496,8 +496,8 @@ DBQuery.prototype.collation = function(collationSpec) {
return this._addSpecial("collation", collationSpec);
};
-DBQuery.prototype.allowDiskUse = function() {
- return this._addSpecial("allowDiskUse", true);
+DBQuery.prototype.allowDiskUse = function(value) {
+ return this._addSpecial("allowDiskUse", (value === undefined ? true : value));
};
/**