summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMisha Ivkov <misha.ivkov@10gen.com>2019-07-23 13:34:08 -0400
committerMisha Ivkov <misha.ivkov@10gen.com>2019-07-26 17:02:22 -0400
commit579aee2ca8affcecf38399e4831933dd2df7c67e (patch)
tree349862c2043619ee2e665995eb014cdc310e6b7f
parent630bf59ad61a2f1c9d7d568395da6bc76bb94a5e (diff)
downloadmongo-579aee2ca8affcecf38399e4831933dd2df7c67e.tar.gz
SERVER-42179 Remove internalDocumentSourceSortMaxBlockingSortBytes in favor of
internalQueryExecMaxBlockingSortBytes and change default to 100 MB.
-rw-r--r--buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml8
-rw-r--r--jstests/core/explain_execution_error.js4
-rw-r--r--jstests/core/sortb.js66
-rw-r--r--jstests/core/sortg.js90
-rw-r--r--jstests/core/sortj.js29
-rw-r--r--jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js10
-rw-r--r--jstests/noPassthrough/query_knobs_validation.js7
-rw-r--r--jstests/noPassthrough/use_disk.js4
-rw-r--r--jstests/sharding/in_memory_sort_limit.js8
-rw-r--r--jstests/sharding/sharding_balance2.js10
-rw-r--r--jstests/sharding/sharding_balance3.js7
-rw-r--r--src/mongo/db/pipeline/document_source_sort.cpp6
-rw-r--r--src/mongo/db/pipeline/document_source_sort.h2
-rw-r--r--src/mongo/db/query/query_knobs.idl16
14 files changed, 149 insertions, 118 deletions
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml
index cb7a98722a9..2d6c81f9075 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml
@@ -19,7 +19,13 @@ selector:
# The following tests create large oplog entries, which can cause the secondary to fall off the
# primary's oplog when run as a part of burn_in_tests.
- jstests/core/max_doc_size.js
- - jstests/core/mr_bigobjects.js
+ - jstests/core/mr_bigobject.js
+ # The following tests also create large oplog entries due to the maximum blocking sort size being
+ # 100 MB.
+ - jstests/core/explain_execution_error.js
+ - jstests/core/sortb.js
+ - jstests/core/sortg.js
+ - jstests/core/sortj.js
executor:
archive:
diff --git a/jstests/core/explain_execution_error.js b/jstests/core/explain_execution_error.js
index a59a4b7930f..f80af65b0c0 100644
--- a/jstests/core/explain_execution_error.js
+++ b/jstests/core/explain_execution_error.js
@@ -69,9 +69,9 @@ while (bigStr.length < (1024 * 1024)) {
bigStr += bigStr;
}
-// Make a collection that is about 40 MB * number of shards.
+// Make a collection that is about 120 MB * number of shards.
const numShards = FixtureHelpers.numberOfShardsForCollection(t);
-for (var i = 0; i < 40 * numShards; i++) {
+for (var i = 0; i < 120 * numShards; i++) {
assert.writeOK(t.insert({a: bigStr, b: 1, c: i}));
}
diff --git a/jstests/core/sortb.js b/jstests/core/sortb.js
index e4feea7ecfc..953062a05ec 100644
--- a/jstests/core/sortb.js
+++ b/jstests/core/sortb.js
@@ -1,31 +1,41 @@
// Test that the in memory sort capacity limit is checked for all "top N" sort candidates.
// SERVER-4716
+(function() {
+ "use strict";
-t = db.jstests_sortb;
-t.drop();
-
-t.ensureIndex({b: 1});
-
-for (i = 0; i < 100; ++i) {
- t.save({a: i, b: i});
-}
-
-// These large documents will not be part of the initial set of "top 100" matches, and they will
-// not be part of the final set of "top 100" matches returned to the client. However, they are an
-// intermediate set of "top 100" matches and should trigger an in memory sort capacity exception.
-big = new Array(1024 * 1024).toString();
-for (i = 100; i < 200; ++i) {
- t.save({a: i, b: i, big: big});
-}
-
-for (i = 200; i < 300; ++i) {
- t.save({a: i, b: i});
-}
-
-assert.throws(function() {
- t.find().sort({a: -1}).hint({b: 1}).limit(100).itcount();
-});
-assert.throws(function() {
- t.find().sort({a: -1}).hint({b: 1}).showDiskLoc().limit(100).itcount();
-});
-t.drop(); \ No newline at end of file
+ load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+
+ const t = db.jstests_sortb;
+ t.drop();
+
+ t.ensureIndex({b: 1});
+
+ let i;
+ for (i = 0; i < 100; ++i) {
+ t.save({a: i, b: i});
+ }
+
+ const numShards = FixtureHelpers.numberOfShardsForCollection(t);
+ const numLargeDocumentsToWrite = 120 * numShards;
+
+ // These large documents will not be part of the initial set of "top 100" matches, and they will
+ // not be part of the final set of "top 100" matches returned to the client. However, they are
+ // an intermediate set of "top 100" matches and should trigger an in memory sort capacity
+ // exception.
+ const big = new Array(1024 * 1024).toString();
+ for (; i < 100 + numLargeDocumentsToWrite; ++i) {
+ t.save({a: i, b: i, big: big});
+ }
+
+ for (; i < 200 + numLargeDocumentsToWrite; ++i) {
+ t.save({a: i, b: i});
+ }
+
+ assert.throws(function() {
+ t.find().sort({a: -1}).hint({b: 1}).limit(100).itcount();
+ });
+ assert.throws(function() {
+ t.find().sort({a: -1}).hint({b: 1}).showDiskLoc().limit(100).itcount();
+ });
+ t.drop();
+})();
diff --git a/jstests/core/sortg.js b/jstests/core/sortg.js
index 2315828ad65..3e3c7bf517d 100644
--- a/jstests/core/sortg.js
+++ b/jstests/core/sortg.js
@@ -3,61 +3,65 @@
// @tags: [assumes_no_implicit_index_creation, requires_getmore]
// Test that a memory exception is triggered for in memory sorts, but not for indexed sorts.
+(function() {
+ "use strict";
-t = db.jstests_sortg;
-t.drop();
+ const t = db.jstests_sortg;
+ t.drop();
-big = new Array(1000000).toString();
+ const big = new Array(1000000).toString();
-for (i = 0; i < 100; ++i) {
- t.save({b: 0});
-}
+ let i;
+ for (i = 0; i < 100; ++i) {
+ t.save({b: 0});
+ }
-for (i = 0; i < 40; ++i) {
- t.save({a: 0, x: big});
-}
+ for (i = 0; i < 110; ++i) {
+ t.save({a: 0, x: big});
+ }
-function memoryException(sortSpec, querySpec) {
- querySpec = querySpec || {};
- var ex = assert.throws(function() {
- t.find(querySpec).sort(sortSpec).batchSize(1000).itcount();
- });
- assert(ex.toString().match(/Sort/));
-}
+ function memoryException(sortSpec, querySpec) {
+ querySpec = querySpec || {};
+ var ex = assert.throws(function() {
+ t.find(querySpec).sort(sortSpec).batchSize(1000).itcount();
+ });
+ assert(ex.toString().match(/Sort/));
+ }
-function noMemoryException(sortSpec, querySpec) {
- querySpec = querySpec || {};
- t.find(querySpec).sort(sortSpec).batchSize(1000).itcount();
-}
+ function noMemoryException(sortSpec, querySpec) {
+ querySpec = querySpec || {};
+ t.find(querySpec).sort(sortSpec).batchSize(1000).itcount();
+ }
-// Unindexed sorts.
-memoryException({a: 1});
-memoryException({b: 1});
+ // Unindexed sorts.
+ memoryException({a: 1});
+ memoryException({b: 1});
-// Indexed sorts.
-noMemoryException({_id: 1});
-noMemoryException({$natural: 1});
+ // Indexed sorts.
+ noMemoryException({_id: 1});
+ noMemoryException({$natural: 1});
-assert.eq(1, t.getIndexes().length);
+ assert.eq(1, t.getIndexes().length);
-t.ensureIndex({a: 1});
-t.ensureIndex({b: 1});
-t.ensureIndex({c: 1});
+ t.ensureIndex({a: 1});
+ t.ensureIndex({b: 1});
+ t.ensureIndex({c: 1});
-assert.eq(4, t.getIndexes().length);
+ assert.eq(4, t.getIndexes().length);
-// These sorts are now indexed.
-noMemoryException({a: 1});
-noMemoryException({b: 1});
+ // These sorts are now indexed.
+ noMemoryException({a: 1});
+ noMemoryException({b: 1});
-// A memory exception is triggered for an unindexed sort involving multiple plans.
-memoryException({d: 1}, {b: null, c: null});
+ // A memory exception is triggered for an unindexed sort involving multiple plans.
+ memoryException({d: 1}, {b: null, c: null});
-// With an indexed plan on _id:1 and an unindexed plan on b:1, the indexed plan
-// should succeed even if the unindexed one would exhaust its memory limit.
-noMemoryException({_id: 1}, {b: null});
+ // With an indexed plan on _id:1 and an unindexed plan on b:1, the indexed plan should succeed
+ // even if the unindexed one would exhaust its memory limit.
+ noMemoryException({_id: 1}, {b: null});
-// With an unindexed plan on b:1 recorded for a query, the query should be
-// retried when the unindexed plan exhausts its memory limit.
-noMemoryException({_id: 1}, {b: null});
-t.drop();
+ // With an unindexed plan on b:1 recorded for a query, the query should be retried when the
+ // unindexed plan exhausts its memory limit.
+ noMemoryException({_id: 1}, {b: null});
+ t.drop();
+})();
diff --git a/jstests/core/sortj.js b/jstests/core/sortj.js
index 4d8baa47e8f..625f5aa6722 100644
--- a/jstests/core/sortj.js
+++ b/jstests/core/sortj.js
@@ -1,17 +1,24 @@
// Test an in memory sort memory assertion after a plan has "taken over" in the query optimizer
// cursor.
+(function() {
+ "use strict";
-t = db.jstests_sortj;
-t.drop();
+ load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-t.ensureIndex({a: 1});
+ const t = db.jstests_sortj;
+ t.drop();
-big = new Array(100000).toString();
-for (i = 0; i < 1000; ++i) {
- t.save({a: 1, b: big});
-}
+ t.ensureIndex({a: 1});
-assert.throws(function() {
- t.find({a: {$gte: 0}, c: null}).sort({d: 1}).itcount();
-});
-t.drop(); \ No newline at end of file
+ const numShards = FixtureHelpers.numberOfShardsForCollection(t);
+
+ const big = new Array(100000).toString();
+ for (let i = 0; i < 1200 * numShards; ++i) {
+ t.save({a: 1, b: big});
+ }
+
+ assert.throws(function() {
+ t.find({a: {$gte: 0}, c: null}).sort({d: 1}).itcount();
+ });
+ t.drop();
+})();
diff --git a/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js b/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js
index 27a06a1ecec..6f80c277990 100644
--- a/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js
+++ b/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js
@@ -323,19 +323,19 @@
},
{
test: function(db) {
- const originalSortBytes = db.adminCommand(
- {getParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 1});
+ const originalSortBytes =
+ db.adminCommand({getParameter: 1, internalQueryExecMaxBlockingSortBytes: 1});
assert.commandWorked(originalSortBytes);
assert.commandWorked(db.adminCommand(
- {setParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 10}));
+ {setParameter: 1, internalQueryExecMaxBlockingSortBytes: 10}));
assert.eq(
coll.aggregate([{$match: {a: 1}}, {$sort: {a: 1}}], {allowDiskUse: true})
.itcount(),
1);
assert.commandWorked(db.adminCommand({
setParameter: 1,
- internalDocumentSourceSortMaxBlockingSortBytes:
- originalSortBytes.internalDocumentSourceSortMaxBlockingSortBytes
+ internalQueryExecMaxBlockingSortBytes:
+ originalSortBytes.internalQueryExecMaxBlockingSortBytes
}));
},
logFields:
diff --git a/jstests/noPassthrough/query_knobs_validation.js b/jstests/noPassthrough/query_knobs_validation.js
index 536f4d6f995..d553d49d750 100644
--- a/jstests/noPassthrough/query_knobs_validation.js
+++ b/jstests/noPassthrough/query_knobs_validation.js
@@ -28,13 +28,12 @@
internalQueryPlannerEnableHashIntersection: false,
internalQueryPlanOrChildrenIndependently: true,
internalQueryMaxScansToExplode: 200,
- internalQueryExecMaxBlockingSortBytes: 32 * 1024 * 1024,
+ internalQueryExecMaxBlockingSortBytes: 100 * 1024 * 1024,
internalQueryExecYieldIterations: 128,
internalQueryExecYieldPeriodMS: 10,
internalQueryFacetBufferSizeBytes: 100 * 1024 * 1024,
internalDocumentSourceCursorBatchSizeBytes: 4 * 1024 * 1024,
internalDocumentSourceLookupCacheSizeBytes: 100 * 1024 * 1024,
- internalDocumentSourceSortMaxBlockingSortBytes: 100 * 1024 * 1024,
internalLookupStageIntermediateDocumentMaxSizeBytes: 100 * 1024 * 1024,
internalDocumentSourceGroupMaxMemoryBytes: 100 * 1024 * 1024,
// Should be half the value of 'internalQueryExecYieldIterations' parameter.
@@ -137,10 +136,6 @@
assertSetParameterFails("internalQueryFacetBufferSizeBytes", 0);
assertSetParameterFails("internalQueryFacetBufferSizeBytes", -1);
- assertSetParameterSucceeds("internalDocumentSourceSortMaxBlockingSortBytes", 11);
- assertSetParameterFails("internalDocumentSourceSortMaxBlockingSortBytes", 0);
- assertSetParameterFails("internalDocumentSourceSortMaxBlockingSortBytes", -1);
-
assertSetParameterSucceeds("internalDocumentSourceGroupMaxMemoryBytes", 11);
assertSetParameterFails("internalDocumentSourceGroupMaxMemoryBytes", 0);
assertSetParameterFails("internalDocumentSourceGroupMaxMemoryBytes", -1);
diff --git a/jstests/noPassthrough/use_disk.js b/jstests/noPassthrough/use_disk.js
index ec7778d23da..4221de37ede 100644
--- a/jstests/noPassthrough/use_disk.js
+++ b/jstests/noPassthrough/use_disk.js
@@ -43,7 +43,7 @@
assert.eq(profileObj.hasSortStage, true, tojson(profileObj));
assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 10}));
+ testDB.adminCommand({setParameter: 1, internalQueryExecMaxBlockingSortBytes: 10}));
assert.eq(8,
coll.aggregate([{$match: {a: {$gte: 2}}}, {$sort: {a: 1}}], {allowDiskUse: true})
.itcount());
@@ -137,7 +137,7 @@
// Test that usedDisk is not set for a $lookup with a pipeline that does not use disk.
//
assert.commandWorked(testDB.adminCommand(
- {setParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 100 * 1024 * 1024}));
+ {setParameter: 1, internalQueryExecMaxBlockingSortBytes: 100 * 1024 * 1024}));
resetCollection();
resetForeignCollection();
coll.aggregate(
diff --git a/jstests/sharding/in_memory_sort_limit.js b/jstests/sharding/in_memory_sort_limit.js
index 328ae508f5a..e771212c7d2 100644
--- a/jstests/sharding/in_memory_sort_limit.js
+++ b/jstests/sharding/in_memory_sort_limit.js
@@ -4,7 +4,13 @@
(function() {
'use strict';
- var st = new ShardingTest({shards: 2});
+ var st = new ShardingTest({
+ shards: 2,
+ other: {
+ shardOptions:
+ {setParameter: {internalQueryExecMaxBlockingSortBytes: 32 * 1024 * 1024}}
+ }
+ });
assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
st.ensurePrimaryShard('test', st.shard0.shardName);
diff --git a/jstests/sharding/sharding_balance2.js b/jstests/sharding/sharding_balance2.js
index 7bba7e25bf3..1ba3e0af09f 100644
--- a/jstests/sharding/sharding_balance2.js
+++ b/jstests/sharding/sharding_balance2.js
@@ -6,7 +6,15 @@
var MaxSizeMB = 1;
- var s = new ShardingTest({shards: 2, other: {chunkSize: 1, manualAddShard: true}});
+ var s = new ShardingTest({
+ shards: 2,
+ other: {
+ chunkSize: 1,
+ manualAddShard: true,
+ shardOptions:
+ {setParameter: {internalQueryExecMaxBlockingSortBytes: 32 * 1024 * 1024}}
+ }
+ });
var db = s.getDB("test");
var names = s.getConnNames();
diff --git a/jstests/sharding/sharding_balance3.js b/jstests/sharding/sharding_balance3.js
index 155403e0b7c..7b8c483cc2e 100644
--- a/jstests/sharding/sharding_balance3.js
+++ b/jstests/sharding/sharding_balance3.js
@@ -6,7 +6,12 @@
name: "slow_sharding_balance3",
shards: 2,
mongos: 1,
- other: {chunkSize: 1, enableBalancer: true}
+ other: {
+ chunkSize: 1,
+ enableBalancer: true,
+ shardOptions:
+ {setParameter: {internalQueryExecMaxBlockingSortBytes: 32 * 1024 * 1024}}
+ }
});
s.adminCommand({enablesharding: "test"});
diff --git a/src/mongo/db/pipeline/document_source_sort.cpp b/src/mongo/db/pipeline/document_source_sort.cpp
index dda569e4350..fe32168c170 100644
--- a/src/mongo/db/pipeline/document_source_sort.cpp
+++ b/src/mongo/db/pipeline/document_source_sort.cpp
@@ -227,12 +227,10 @@ intrusive_ptr<DocumentSourceSort> DocumentSourceSort::create(
BSONObj sortOrder,
uint64_t limit,
boost::optional<uint64_t> maxMemoryUsageBytes) {
- auto resolvedMaxBytes = maxMemoryUsageBytes
- ? *maxMemoryUsageBytes
- : internalDocumentSourceSortMaxBlockingSortBytes.load();
+ auto resolvedMaxBytes =
+ maxMemoryUsageBytes ? *maxMemoryUsageBytes : internalQueryExecMaxBlockingSortBytes.load();
intrusive_ptr<DocumentSourceSort> pSort(
new DocumentSourceSort(pExpCtx, sortOrder.getOwned(), limit, resolvedMaxBytes));
-
return pSort;
}
diff --git a/src/mongo/db/pipeline/document_source_sort.h b/src/mongo/db/pipeline/document_source_sort.h
index 44797e3dcf3..114c8f7445b 100644
--- a/src/mongo/db/pipeline/document_source_sort.h
+++ b/src/mongo/db/pipeline/document_source_sort.h
@@ -95,7 +95,7 @@ public:
/**
* Convenience method for creating a $sort stage. If maxMemoryUsageBytes is boost::none,
- * then it will actually use the value of internalDocumentSourceSortMaxBlockingSortBytes.
+ * then it will actually use the value of internalQueryExecMaxBlockingSortBytes.
*/
static boost::intrusive_ptr<DocumentSourceSort> create(
const boost::intrusive_ptr<ExpressionContext>& pExpCtx,
diff --git a/src/mongo/db/query/query_knobs.idl b/src/mongo/db/query/query_knobs.idl
index 2ae8188c94d..b38e1072471 100644
--- a/src/mongo/db/query/query_knobs.idl
+++ b/src/mongo/db/query/query_knobs.idl
@@ -203,12 +203,14 @@ server_parameters:
# Query execution
#
internalQueryExecMaxBlockingSortBytes:
- description: "internal query execute maximum blocking sort in bytes."
+ description: "The maximum amount of memory a query (e.g. a find or aggregate command) is willing
+ to use to execute a blocking sort, measured in bytes. If disk use is allowed, then it may be
+ possible to sort more data, but this limit will still constrain the memory consumption."
set_at: [ startup, runtime ]
cpp_varname: "internalQueryExecMaxBlockingSortBytes"
cpp_vartype: AtomicWord<int>
default:
- expr: 32 * 1024 * 1024
+ expr: 100 * 1024 * 1024
validator:
gte: 0
@@ -238,16 +240,6 @@ server_parameters:
validator:
gt: 0
- internalDocumentSourceSortMaxBlockingSortBytes:
- description: "The maximum size of the dataset that we are prepared to sort in-memory."
- set_at: [ startup, runtime ]
- cpp_varname: "internalDocumentSourceSortMaxBlockingSortBytes"
- cpp_vartype: AtomicWord<long long>
- default:
- expr: 100 * 1024 * 1024
- validator:
- gt: 0
-
internalLookupStageIntermediateDocumentMaxSizeBytes:
description: "Maximum size of the result set that we cache from the foreign collection during a $lookup."
set_at: [ startup, runtime ]