summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Cox <eric.cox@mongodb.com>2021-07-21 19:03:16 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-07-31 01:41:48 +0000
commit07d6e82a05dbe36f94d6a32b9378b0ddabdc8045 (patch)
tree8d9666532e2a1f37805c81a87b758c21fe4d3c57
parent63e3eaa766bf2aea4fad1333e69d7c8d52e612e8 (diff)
downloadmongo-07d6e82a05dbe36f94d6a32b9378b0ddabdc8045.tar.gz
SERVER-57461 Remove SPLIT_LIMITED_SORT and associated QueryPlanner code
-rwxr-xr-xbuildscripts/resmokeconfig/suites/cst_jscore_passthrough.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/multi_shard_local_read_write_multi_stmt_txn_jscore_passthrough.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_jscore_passthrough.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_kill_primary_jscore_passthrough.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_stepdown_primary_jscore_passthrough.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/multi_stmt_txn_jscore_passthrough_with_migration.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_jscore_passthrough.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_kill_primary_jscore_passthrough.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_stepdown_jscore_passthrough.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_terminate_primary_jscore_passthrough.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/sharded_multi_stmt_txn_jscore_passthrough.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/tenant_migration_multi_stmt_txn_jscore_passthrough.yml2
-rw-r--r--jstests/core/api_version_unstable_fields.js1
-rw-r--r--jstests/core/batch_size.js5
-rw-r--r--jstests/core/find_with_ntoreturn_fails.js15
-rw-r--r--jstests/core/sort_with_update_between_getmores.js (renamed from jstests/core/ensure_sorted.js)6
-rw-r--r--jstests/sharding/query/find_getmore_cmd.js9
-rw-r--r--src/mongo/client/dbclient_base.cpp21
-rw-r--r--src/mongo/client/dbclient_base.h9
-rw-r--r--src/mongo/client/dbclient_connection.h4
-rw-r--r--src/mongo/client/dbclient_cursor.cpp60
-rw-r--r--src/mongo/client/dbclient_cursor.h11
-rw-r--r--src/mongo/client/dbclient_mockcursor.cpp13
-rw-r--r--src/mongo/db/SConscript1
-rw-r--r--src/mongo/db/commands/find_cmd.cpp60
-rw-r--r--src/mongo/db/curop.cpp2
-rw-r--r--src/mongo/db/curop.h2
-rw-r--r--src/mongo/db/dbdirectclient.cpp4
-rw-r--r--src/mongo/db/dbdirectclient.h2
-rw-r--r--src/mongo/db/exec/ensure_sorted.cpp100
-rw-r--r--src/mongo/db/exec/ensure_sorted.h82
-rw-r--r--src/mongo/db/exec/plan_stats.h15
-rw-r--r--src/mongo/db/exec/trial_period_utils.cpp5
-rw-r--r--src/mongo/db/query/canonical_query.cpp12
-rw-r--r--src/mongo/db/query/classic_stage_builder.cpp7
-rw-r--r--src/mongo/db/query/find.cpp13
-rw-r--r--src/mongo/db/query/find.h9
-rw-r--r--src/mongo/db/query/find_common.cpp10
-rw-r--r--src/mongo/db/query/get_executor.cpp9
-rw-r--r--src/mongo/db/query/plan_explainer_impl.cpp6
-rw-r--r--src/mongo/db/query/planner_analysis.cpp102
-rw-r--r--src/mongo/db/query/query_planner.cpp3
-rw-r--r--src/mongo/db/query/query_planner_operator_test.cpp75
-rw-r--r--src/mongo/db/query/query_planner_options_test.cpp94
-rw-r--r--src/mongo/db/query/query_planner_params.h21
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.cpp94
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.h50
-rw-r--r--src/mongo/db/query/query_planner_test_lib.cpp34
-rw-r--r--src/mongo/db/query/query_planner_tree_test.cpp57
-rw-r--r--src/mongo/db/query/query_planner_wildcard_index_test.cpp6
-rw-r--r--src/mongo/db/query/query_request_helper.cpp31
-rw-r--r--src/mongo/db/query/query_request_helper.h1
-rw-r--r--src/mongo/db/query/query_request_test.cpp34
-rw-r--r--src/mongo/db/query/query_solution.cpp24
-rw-r--r--src/mongo/db/query/query_solution.h32
-rw-r--r--src/mongo/db/query/stage_types.cpp1
-rw-r--r--src/mongo/db/query/stage_types.h2
-rw-r--r--src/mongo/db/repl/oplog_fetcher.cpp2
-rw-r--r--src/mongo/db/repl/replication_recovery.cpp6
-rw-r--r--src/mongo/dbtests/SConscript1
-rw-r--r--src/mongo/dbtests/mock/mock_dbclient_connection.cpp4
-rw-r--r--src/mongo/dbtests/mock/mock_dbclient_connection.h2
-rw-r--r--src/mongo/dbtests/mock/mock_remote_db_server.cpp2
-rw-r--r--src/mongo/dbtests/mock/mock_remote_db_server.h2
-rw-r--r--src/mongo/dbtests/query_stage_ensure_sorted.cpp195
-rw-r--r--src/mongo/dbtests/querytests.cpp16
-rw-r--r--src/mongo/s/commands/cluster_find_cmd.cpp3
-rw-r--r--src/mongo/s/query/cluster_find.cpp38
-rw-r--r--src/mongo/scripting/mozjs/mongo.cpp4
69 files changed, 315 insertions, 1138 deletions
diff --git a/buildscripts/resmokeconfig/suites/cst_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/cst_jscore_passthrough.yml
index 2dd8f04f84a..fba19a561d7 100755
--- a/buildscripts/resmokeconfig/suites/cst_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/cst_jscore_passthrough.yml
@@ -123,7 +123,6 @@ selector:
- jstests/core/elemmatch_or_pushdown.js
- jstests/core/elemmatch_projection.js
- jstests/core/positional_projection.js
- - jstests/core/ensure_sorted.js
- jstests/core/exists.js
- jstests/core/existsa.js
- jstests/core/explain1.js
@@ -390,6 +389,7 @@ selector:
- jstests/core/sorth.js
- jstests/core/sortj.js
- jstests/core/sortk.js
+ - jstests/core/sort_with_update_between_getmores.js
- jstests/core/sparse_index_supports_ne_null.js
- jstests/core/stages_and_hash.js
- jstests/core/stages_collection_scan.js
diff --git a/buildscripts/resmokeconfig/suites/multi_shard_local_read_write_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/multi_shard_local_read_write_multi_stmt_txn_jscore_passthrough.yml
index 098952c993e..f723ade1a6e 100644
--- a/buildscripts/resmokeconfig/suites/multi_shard_local_read_write_multi_stmt_txn_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/multi_shard_local_read_write_multi_stmt_txn_jscore_passthrough.yml
@@ -205,13 +205,13 @@ selector:
# txn interrupted by command outside of txn before getMore runs.
- jstests/core/commands_namespace_parsing.js
- jstests/core/drop3.js
- - jstests/core/ensure_sorted.js
- jstests/core/geo_s2cursorlimitskip.js
- jstests/core/getmore_invalidated_cursors.js
- jstests/core/getmore_invalidated_documents.js
- jstests/core/kill_cursors.js
- jstests/core/list_indexes.js
- jstests/core/oro.js
+ - jstests/core/sort_with_update_between_getmores.js
# Parallel Shell - we do not signal the override to end a txn when a parallel shell closes.
- jstests/core/awaitdata_getmore_cmd.js
diff --git a/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_jscore_passthrough.yml
index 649bc2b3891..d4414c08c4c 100644
--- a/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_jscore_passthrough.yml
@@ -228,13 +228,13 @@ selector:
# txn interrupted by command outside of txn before getMore runs.
- jstests/core/commands_namespace_parsing.js
- jstests/core/drop3.js
- - jstests/core/ensure_sorted.js
- jstests/core/geo_s2cursorlimitskip.js
- jstests/core/getmore_invalidated_cursors.js
- jstests/core/getmore_invalidated_documents.js
- jstests/core/kill_cursors.js
- jstests/core/list_indexes.js
- jstests/core/oro.js
+ - jstests/core/sort_with_update_between_getmores.js
# Parallel Shell - we do not signal the override to end a txn when a parallel shell closes.
- jstests/core/awaitdata_getmore_cmd.js
diff --git a/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_kill_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_kill_primary_jscore_passthrough.yml
index afc436e4803..a96bfba9f75 100644
--- a/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_kill_primary_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_kill_primary_jscore_passthrough.yml
@@ -219,13 +219,13 @@ selector:
# txn interrupted by command outside of txn before getMore runs.
- jstests/core/commands_namespace_parsing.js
- jstests/core/drop3.js
- - jstests/core/ensure_sorted.js
- jstests/core/geo_s2cursorlimitskip.js
- jstests/core/getmore_invalidated_cursors.js
- jstests/core/getmore_invalidated_documents.js
- jstests/core/kill_cursors.js
- jstests/core/list_indexes.js
- jstests/core/oro.js
+ - jstests/core/sort_with_update_between_getmores.js
# Parallel Shell - we do not signal the override to end a txn when a parallel shell closes.
- jstests/core/awaitdata_getmore_cmd.js
diff --git a/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_stepdown_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_stepdown_primary_jscore_passthrough.yml
index 5846ec7162a..63b1f9fe3d9 100644
--- a/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_stepdown_primary_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_stepdown_primary_jscore_passthrough.yml
@@ -220,13 +220,13 @@ selector:
# txn interrupted by command outside of txn before getMore runs.
- jstests/core/commands_namespace_parsing.js
- jstests/core/drop3.js
- - jstests/core/ensure_sorted.js
- jstests/core/geo_s2cursorlimitskip.js
- jstests/core/getmore_invalidated_cursors.js
- jstests/core/getmore_invalidated_documents.js
- jstests/core/kill_cursors.js
- jstests/core/list_indexes.js
- jstests/core/oro.js
+ - jstests/core/sort_with_update_between_getmores.js
# Parallel Shell - we do not signal the override to end a txn when a parallel shell closes.
- jstests/core/awaitdata_getmore_cmd.js
diff --git a/buildscripts/resmokeconfig/suites/multi_stmt_txn_jscore_passthrough_with_migration.yml b/buildscripts/resmokeconfig/suites/multi_stmt_txn_jscore_passthrough_with_migration.yml
index 623cbe2d10b..9ae1b6de3c6 100644
--- a/buildscripts/resmokeconfig/suites/multi_stmt_txn_jscore_passthrough_with_migration.yml
+++ b/buildscripts/resmokeconfig/suites/multi_stmt_txn_jscore_passthrough_with_migration.yml
@@ -236,13 +236,13 @@ selector:
# txn interrupted by command outside of txn before getMore runs.
- jstests/core/commands_namespace_parsing.js
- jstests/core/drop3.js
- - jstests/core/ensure_sorted.js
- jstests/core/geo_s2cursorlimitskip.js
- jstests/core/getmore_invalidated_cursors.js
- jstests/core/getmore_invalidated_documents.js
- jstests/core/kill_cursors.js
- jstests/core/list_indexes.js
- jstests/core/oro.js
+ - jstests/core/sort_with_update_between_getmores.js
# Parallel Shell - we do not signal the override to end a txn when a parallel shell closes.
- jstests/core/awaitdata_getmore_cmd.js
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_jscore_passthrough.yml
index 28276078610..17e7a81efb8 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_jscore_passthrough.yml
@@ -176,7 +176,6 @@ selector:
# txn interrupted by command outside of txn before getMore runs.
- jstests/core/commands_namespace_parsing.js
- jstests/core/drop3.js
- - jstests/core/ensure_sorted.js
- jstests/core/geo_s2cursorlimitskip.js
- jstests/core/getmore_invalidated_cursors.js
- jstests/core/getmore_invalidated_documents.js
@@ -186,6 +185,7 @@ selector:
- jstests/core/list_indexes_invalidation.js
- jstests/core/list_namespaces_invalidation.js
- jstests/core/oro.js
+ - jstests/core/sort_with_update_between_getmores.js
# Parallel Shell - we do not signal the override to end a txn when a parallel shell closes.
- jstests/core/awaitdata_getmore_cmd.js
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_kill_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_kill_primary_jscore_passthrough.yml
index 519acfb6f2e..2734e2e3a6c 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_kill_primary_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_kill_primary_jscore_passthrough.yml
@@ -164,13 +164,13 @@ selector:
# txn interrupted by command outside of txn before getMore runs.
- jstests/core/commands_namespace_parsing.js
- jstests/core/drop3.js
- - jstests/core/ensure_sorted.js
- jstests/core/geo_s2cursorlimitskip.js
- jstests/core/getmore_invalidated_cursors.js
- jstests/core/getmore_invalidated_documents.js
- jstests/core/kill_cursors.js
- jstests/core/list_indexes.js
- jstests/core/oro.js
+ - jstests/core/sort_with_update_between_getmores.js
# Parallel Shell - we do not signal the override to end a txn when a parallel shell closes.
- jstests/core/awaitdata_getmore_cmd.js
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_stepdown_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_stepdown_jscore_passthrough.yml
index 25b4e55b041..e91183d797f 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_stepdown_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_stepdown_jscore_passthrough.yml
@@ -163,13 +163,13 @@ selector:
# txn interrupted by command outside of txn before getMore runs.
- jstests/core/commands_namespace_parsing.js
- jstests/core/drop3.js
- - jstests/core/ensure_sorted.js
- jstests/core/geo_s2cursorlimitskip.js
- jstests/core/getmore_invalidated_cursors.js
- jstests/core/getmore_invalidated_documents.js
- jstests/core/kill_cursors.js
- jstests/core/list_indexes.js
- jstests/core/oro.js
+ - jstests/core/sort_with_update_between_getmores.js
# Parallel Shell - we do not signal the override to end a txn when a parallel shell closes.
- jstests/core/awaitdata_getmore_cmd.js
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_terminate_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_terminate_primary_jscore_passthrough.yml
index a0a30f7a18b..81d5855d266 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_terminate_primary_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_terminate_primary_jscore_passthrough.yml
@@ -160,13 +160,13 @@ selector:
# txn interrupted by command outside of txn before getMore runs.
- jstests/core/commands_namespace_parsing.js
- jstests/core/drop3.js
- - jstests/core/ensure_sorted.js
- jstests/core/geo_s2cursorlimitskip.js
- jstests/core/getmore_invalidated_cursors.js
- jstests/core/getmore_invalidated_documents.js
- jstests/core/kill_cursors.js
- jstests/core/list_indexes.js
- jstests/core/oro.js
+ - jstests/core/sort_with_update_between_getmores.js
# Parallel Shell - we do not signal the override to end a txn when a parallel shell closes.
- jstests/core/awaitdata_getmore_cmd.js
diff --git a/buildscripts/resmokeconfig/suites/sharded_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_multi_stmt_txn_jscore_passthrough.yml
index 812df6897f8..bb696f73817 100644
--- a/buildscripts/resmokeconfig/suites/sharded_multi_stmt_txn_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_multi_stmt_txn_jscore_passthrough.yml
@@ -200,7 +200,6 @@ selector:
# txn interrupted by command outside of txn before getMore runs.
- jstests/core/commands_namespace_parsing.js
- jstests/core/drop3.js
- - jstests/core/ensure_sorted.js
- jstests/core/geo_s2cursorlimitskip.js
- jstests/core/getmore_invalidated_cursors.js
- jstests/core/getmore_invalidated_documents.js
@@ -208,6 +207,7 @@ selector:
- jstests/core/list_indexes.js
- jstests/core/list_namespaces_invalidation.js
- jstests/core/oro.js
+ - jstests/core/sort_with_update_between_getmores.js
# Parallel Shell - we do not signal the override to end a txn when a parallel shell closes.
- jstests/core/awaitdata_getmore_cmd.js
diff --git a/buildscripts/resmokeconfig/suites/tenant_migration_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/tenant_migration_multi_stmt_txn_jscore_passthrough.yml
index 86761f02edf..ea74bf90a81 100644
--- a/buildscripts/resmokeconfig/suites/tenant_migration_multi_stmt_txn_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/tenant_migration_multi_stmt_txn_jscore_passthrough.yml
@@ -279,7 +279,6 @@ selector:
# txn interrupted by command outside of txn before getMore runs.
- jstests/core/commands_namespace_parsing.js
- jstests/core/drop3.js
- - jstests/core/ensure_sorted.js
- jstests/core/geo_s2cursorlimitskip.js
- jstests/core/getmore_invalidated_cursors.js
- jstests/core/getmore_invalidated_documents.js
@@ -288,6 +287,7 @@ selector:
- jstests/core/list_indexes_invalidation.js
- jstests/core/list_namespaces_invalidation.js
- jstests/core/oro.js
+ - jstests/core/sort_with_update_between_getmores.js
# Parallel Shell - we do not signal the override to end a txn when a parallel shell closes.
- jstests/core/compact_keeps_indexes.js
diff --git a/jstests/core/api_version_unstable_fields.js b/jstests/core/api_version_unstable_fields.js
index 31741ad9e0d..30793f517c3 100644
--- a/jstests/core/api_version_unstable_fields.js
+++ b/jstests/core/api_version_unstable_fields.js
@@ -25,7 +25,6 @@ const unstableFieldsForAggregate = {
};
const unstableFieldsForFind = {
- ntoreturn: 10,
min: {"a": 1},
max: {"a": 1},
returnKey: false,
diff --git a/jstests/core/batch_size.js b/jstests/core/batch_size.js
index 297303d7f36..833f294abb2 100644
--- a/jstests/core/batch_size.js
+++ b/jstests/core/batch_size.js
@@ -96,11 +96,6 @@ if (FixtureHelpers.isMongos(db)) {
} else {
assert.eq(6, explain.executionStats.nReturned);
}
-
-// -------
-
-// During plan ranking, we treat ntoreturn as a limit. This prevents us from buffering too much
-// data in a blocking sort stage during plan ranking.
t.drop();
// Generate big string to use in the object - 1MB+ String
diff --git a/jstests/core/find_with_ntoreturn_fails.js b/jstests/core/find_with_ntoreturn_fails.js
new file mode 100644
index 00000000000..d74fc47e796
--- /dev/null
+++ b/jstests/core/find_with_ntoreturn_fails.js
@@ -0,0 +1,15 @@
+/**
+ * Tests that 'runCommand' with 'ntoreturn' throws the expected error code and that 'limit' and
+ * 'batchSize' cannot be used alongside of 'ntoreturn'.
+ * @tags: [requires_fcv_51]
+ */
+(function() {
+"use strict";
+
+const testDB = db.getSiblingDB("test_ntoreturn");
+assert.commandFailedWithCode(testDB.runCommand({find: "coll", ntoreturn: 1}), [5746101, 5746102]);
+assert.commandFailedWithCode(testDB.runCommand({find: "coll", limit: 1, ntoreturn: 1}),
+ ErrorCodes.BadValue);
+assert.commandFailedWithCode(testDB.runCommand({find: "coll", batchSize: 1, ntoreturn: 1}),
+ ErrorCodes.BadValue);
+}());
diff --git a/jstests/core/ensure_sorted.js b/jstests/core/sort_with_update_between_getmores.js
index a8fa02e5bb0..038ff5b7875 100644
--- a/jstests/core/ensure_sorted.js
+++ b/jstests/core/sort_with_update_between_getmores.js
@@ -6,11 +6,11 @@
// requires_getmore,
// ]
-// SERVER-17011 Tests whether queries which specify sort and batch size can generate results out of
-// order due to the ntoreturn hack. The EnsureSortedStage should solve this problem.
+// This test checks that a sort query with an update between getMores() doesn't produce out-of-order
+// results when the update touches a field on which the query is sorting.
(function() {
'use strict';
-const collName = "ensure_sorted";
+const collName = jsTestName();
const coll = db[collName];
const kDocList =
[{_id: 0, a: 1, b: 4}, {_id: 1, a: 2, b: 3}, {_id: 2, a: 3, b: 2}, {_id: 3, a: 4, b: 1}];
diff --git a/jstests/sharding/query/find_getmore_cmd.js b/jstests/sharding/query/find_getmore_cmd.js
index 997d3bb2207..77c34535cd4 100644
--- a/jstests/sharding/query/find_getmore_cmd.js
+++ b/jstests/sharding/query/find_getmore_cmd.js
@@ -81,14 +81,11 @@ assert.eq(cmdRes.cursor.ns, coll.getFullName());
assert.eq(cmdRes.cursor.firstBatch.length, 1);
assert.eq(cmdRes.cursor.firstBatch[0], {_id: -5, a: 8});
-// Find where adding limit/ntoreturn and skip overflows.
-var largeInt = new NumberLong('9223372036854775807');
+// Find where adding limit and skip overflows.
+const largeInt = new NumberLong('9223372036854775807');
cmdRes = db.runCommand({find: coll.getName(), skip: largeInt, limit: largeInt});
assert.commandFailed(cmdRes);
-cmdRes = db.runCommand({find: coll.getName(), skip: largeInt, ntoreturn: largeInt});
-assert.commandFailed(cmdRes);
-cmdRes =
- db.runCommand({find: coll.getName(), skip: largeInt, ntoreturn: largeInt, singleBatch: true});
+cmdRes = db.runCommand({find: coll.getName(), skip: largeInt, limit: largeInt, singleBatch: true});
assert.commandFailed(cmdRes);
// A predicate with $where.
diff --git a/src/mongo/client/dbclient_base.cpp b/src/mongo/client/dbclient_base.cpp
index 1a14f42a93c..81e597c48cf 100644
--- a/src/mongo/client/dbclient_base.cpp
+++ b/src/mongo/client/dbclient_base.cpp
@@ -590,7 +590,7 @@ list<BSONObj> DBClientBase::getCollectionInfos(const string& db, const BSONObj&
if (id != 0) {
const std::string ns = cursorObj["ns"].String();
- unique_ptr<DBClientCursor> cursor = getMore(ns, id, 0, 0);
+ unique_ptr<DBClientCursor> cursor = getMore(ns, id);
while (cursor->more()) {
infos.push_back(cursor->nextSafe().getOwned());
}
@@ -660,16 +660,16 @@ bool DBClientBase::exists(const string& ns) {
void DBClientBase::findN(vector<BSONObj>& out,
const string& ns,
Query query,
- int nToReturn,
+ int limit,
int nToSkip,
const BSONObj* fieldsToReturn,
int queryOptions,
boost::optional<BSONObj> readConcernObj) {
- out.reserve(nToReturn);
+ out.reserve(limit);
unique_ptr<DBClientCursor> c = this->query(NamespaceString(ns),
query,
- nToReturn,
+ limit,
nToSkip,
fieldsToReturn,
queryOptions,
@@ -686,9 +686,7 @@ void DBClientBase::findN(vector<BSONObj>& out,
"Deprecated ShardConfigStale flag encountered in query result",
!c->hasResultFlag(ResultFlag_ShardConfigStaleDeprecated));
- for (int i = 0; i < nToReturn; i++) {
- if (!c->more())
- break;
+ while (c->more()) {
out.push_back(c->nextSafe());
}
}
@@ -747,7 +745,7 @@ const uint64_t DBClientBase::INVALID_SOCK_CREATION_TIME = std::numeric_limits<ui
unique_ptr<DBClientCursor> DBClientBase::query(const NamespaceStringOrUUID& nsOrUuid,
Query query,
- int nToReturn,
+ int limit,
int nToSkip,
const BSONObj* fieldsToReturn,
int queryOptions,
@@ -756,7 +754,7 @@ unique_ptr<DBClientCursor> DBClientBase::query(const NamespaceStringOrUUID& nsOr
unique_ptr<DBClientCursor> c(new DBClientCursor(this,
nsOrUuid,
query.obj,
- nToReturn,
+ limit,
nToSkip,
fieldsToReturn,
queryOptions,
@@ -769,10 +767,9 @@ unique_ptr<DBClientCursor> DBClientBase::query(const NamespaceStringOrUUID& nsOr
unique_ptr<DBClientCursor> DBClientBase::getMore(const string& ns,
long long cursorId,
- int nToReturn,
int options) {
unique_ptr<DBClientCursor> c(
- new DBClientCursor(this, NamespaceString(ns), cursorId, nToReturn, options));
+ new DBClientCursor(this, NamespaceString(ns), cursorId, 0 /* limit */, options));
if (c->init())
return c;
return nullptr;
@@ -1018,7 +1015,7 @@ std::list<BSONObj> DBClientBase::_getIndexSpecs(const NamespaceStringOrUUID& nsO
if (nsOrUuid.nss()) {
invariant((*nsOrUuid.nss()).toString() == cursorNs);
}
- unique_ptr<DBClientCursor> cursor = getMore(cursorNs, id, 0, 0);
+ unique_ptr<DBClientCursor> cursor = getMore(cursorNs, id);
while (cursor->more()) {
specs.push_back(cursor->nextSafe().getOwned());
}
diff --git a/src/mongo/client/dbclient_base.h b/src/mongo/client/dbclient_base.h
index c8beea97aff..83b1f688819 100644
--- a/src/mongo/client/dbclient_base.h
+++ b/src/mongo/client/dbclient_base.h
@@ -78,7 +78,7 @@ class DBClientQueryInterface {
virtual std::unique_ptr<DBClientCursor> query(
const NamespaceStringOrUUID& nsOrUuid,
Query query,
- int nToReturn = 0,
+ int limit = 0,
int nToSkip = 0,
const BSONObj* fieldsToReturn = nullptr,
int queryOptions = 0,
@@ -138,7 +138,7 @@ public:
void findN(std::vector<BSONObj>& out,
const std::string& ns,
Query query,
- int nToReturn,
+ int limit,
int nToSkip = 0,
const BSONObj* fieldsToReturn = nullptr,
int queryOptions = 0,
@@ -592,7 +592,7 @@ public:
You may format as
{ query: { ... }, orderby: { ... } }
to specify a sort order.
- @param nToReturn n to return (i.e., limit). 0 = unlimited
+ @param limit - the maximum number of documents that the cursor should return. 0 = unlimited.
@param nToSkip start with the nth item
@param fieldsToReturn optional template of which fields to select. if unspecified,
returns all fields
@@ -604,7 +604,7 @@ public:
std::unique_ptr<DBClientCursor> query(
const NamespaceStringOrUUID& nsOrUuid,
Query query,
- int nToReturn = 0,
+ int limit = 0,
int nToSkip = 0,
const BSONObj* fieldsToReturn = nullptr,
int queryOptions = 0,
@@ -650,7 +650,6 @@ public:
*/
virtual std::unique_ptr<DBClientCursor> getMore(const std::string& ns,
long long cursorId,
- int nToReturn = 0,
int options = 0);
/**
diff --git a/src/mongo/client/dbclient_connection.h b/src/mongo/client/dbclient_connection.h
index a4ea1c299e2..cbc978887fe 100644
--- a/src/mongo/client/dbclient_connection.h
+++ b/src/mongo/client/dbclient_connection.h
@@ -149,7 +149,7 @@ public:
std::unique_ptr<DBClientCursor> query(
const NamespaceStringOrUUID& nsOrUuid,
Query query = Query(),
- int nToReturn = 0,
+ int limit = 0,
int nToSkip = 0,
const BSONObj* fieldsToReturn = nullptr,
int queryOptions = 0,
@@ -158,7 +158,7 @@ public:
checkConnection();
return DBClientBase::query(nsOrUuid,
query,
- nToReturn,
+ limit,
nToSkip,
fieldsToReturn,
queryOptions,
diff --git a/src/mongo/client/dbclient_cursor.cpp b/src/mongo/client/dbclient_cursor.cpp
index 96a509da122..d83d444af8e 100644
--- a/src/mongo/client/dbclient_cursor.cpp
+++ b/src/mongo/client/dbclient_cursor.cpp
@@ -87,16 +87,6 @@ Message assembleCommandRequest(DBClientBase* cli,
} // namespace
-int DBClientCursor::nextBatchSize() {
- if (nToReturn == 0)
- return batchSize;
-
- if (batchSize == 0)
- return nToReturn;
-
- return batchSize < nToReturn ? batchSize : nToReturn;
-}
-
Message DBClientCursor::_assembleInit() {
if (cursorId) {
return _assembleGetMore();
@@ -107,16 +97,21 @@ Message DBClientCursor::_assembleInit() {
// expected for a legacy OP_QUERY. Therefore, we use the legacy parsing code supplied by
// query_request_helper. When actually issuing the request to the remote node, we will
// assemble a find command.
- auto findCommand =
- query_request_helper::fromLegacyQuery(_nsOrUuid,
- query,
- fieldsToReturn ? *fieldsToReturn : BSONObj(),
- nToSkip,
- nextBatchSize(),
- opts);
+ auto findCommand = query_request_helper::fromLegacyQuery(
+ _nsOrUuid, query, fieldsToReturn ? *fieldsToReturn : BSONObj(), nToSkip, opts);
// If there was a problem building the query request, report that.
uassertStatusOK(findCommand.getStatus());
+ // Despite the request being generated using the legacy OP_QUERY format above, we will never set
+ // the 'ntoreturn' parameter on the find command request, since this is an OP_QUERY-specific
+ // concept. Instead, we always use 'batchSize' and 'limit', which are provided separately to us
+ // by the client.
+ if (limit) {
+ findCommand.getValue()->setLimit(limit);
+ }
+ if (batchSize) {
+ findCommand.getValue()->setBatchSize(batchSize);
+ }
if (query.getBoolField("$readOnce")) {
// Legacy queries don't handle readOnce.
findCommand.getValue()->setReadOnce(true);
@@ -152,9 +147,9 @@ Message DBClientCursor::_assembleInit() {
Message DBClientCursor::_assembleGetMore() {
invariant(cursorId);
- std::int64_t batchSize = nextBatchSize();
auto getMoreRequest = GetMoreCommandRequest(cursorId, ns.coll().toString());
- getMoreRequest.setBatchSize(boost::make_optional(batchSize != 0, batchSize));
+ getMoreRequest.setBatchSize(
+ boost::make_optional(batchSize != 0, static_cast<int64_t>(batchSize)));
getMoreRequest.setMaxTimeMS(boost::make_optional(
tailableAwaitData(),
static_cast<std::int64_t>(durationCount<Milliseconds>(_awaitDataTimeout))));
@@ -240,11 +235,6 @@ void DBClientCursor::requestMore() {
invariant(!_connectionHasPendingReplies);
verify(cursorId && batch.pos == batch.objs.size());
- if (haveLimit) {
- nToReturn -= batch.objs.size();
- verify(nToReturn > 0);
- }
-
auto doRequestMore = [&] {
Message toSend = _assembleGetMore();
Message response;
@@ -269,7 +259,7 @@ void DBClientCursor::requestMore() {
void DBClientCursor::exhaustReceiveMore() {
verify(cursorId);
verify(batch.pos == batch.objs.size());
- uassert(40675, "Cannot have limit for exhaust query", !haveLimit);
+ uassert(40675, "Cannot have limit for exhaust query", limit == 0);
Message response;
verify(_client);
uassertStatusOK(
@@ -328,9 +318,6 @@ bool DBClientCursor::more() {
if (!_putBack.empty())
return true;
- if (haveLimit && static_cast<int>(batch.pos) >= nToReturn)
- return false;
-
if (batch.pos < batch.objs.size())
return true;
@@ -428,7 +415,7 @@ void DBClientCursor::attach(AScopedConnection* conn) {
DBClientCursor::DBClientCursor(DBClientBase* client,
const NamespaceStringOrUUID& nsOrUuid,
const BSONObj& query,
- int nToReturn,
+ int limit,
int nToSkip,
const BSONObj* fieldsToReturn,
int queryOptions,
@@ -438,7 +425,7 @@ DBClientCursor::DBClientCursor(DBClientBase* client,
nsOrUuid,
query,
0, // cursorId
- nToReturn,
+ limit,
nToSkip,
fieldsToReturn,
queryOptions,
@@ -450,7 +437,7 @@ DBClientCursor::DBClientCursor(DBClientBase* client,
DBClientCursor::DBClientCursor(DBClientBase* client,
const NamespaceStringOrUUID& nsOrUuid,
long long cursorId,
- int nToReturn,
+ int limit,
int queryOptions,
std::vector<BSONObj> initialBatch,
boost::optional<Timestamp> operationTime)
@@ -458,7 +445,7 @@ DBClientCursor::DBClientCursor(DBClientBase* client,
nsOrUuid,
BSONObj(), // query
cursorId,
- nToReturn,
+ limit,
0, // nToSkip
nullptr, // fieldsToReturn
queryOptions,
@@ -471,7 +458,7 @@ DBClientCursor::DBClientCursor(DBClientBase* client,
const NamespaceStringOrUUID& nsOrUuid,
const BSONObj& query,
long long cursorId,
- int nToReturn,
+ int limit,
int nToSkip,
const BSONObj* fieldsToReturn,
int queryOptions,
@@ -485,8 +472,7 @@ DBClientCursor::DBClientCursor(DBClientBase* client,
_nsOrUuid(nsOrUuid),
ns(nsOrUuid.nss() ? *nsOrUuid.nss() : NamespaceString(nsOrUuid.dbname())),
query(query),
- nToReturn(nToReturn),
- haveLimit(nToReturn > 0 && !(queryOptions & QueryOption_CursorTailable)),
+ limit(limit),
nToSkip(nToSkip),
fieldsToReturn(fieldsToReturn),
opts(queryOptions),
@@ -496,7 +482,9 @@ DBClientCursor::DBClientCursor(DBClientBase* client,
_ownCursor(true),
wasError(false),
_readConcernObj(readConcernObj),
- _operationTime(operationTime) {}
+ _operationTime(operationTime) {
+ tassert(5746103, "DBClientCursor limit must be non-negative", limit >= 0);
+}
/* static */
StatusWith<std::unique_ptr<DBClientCursor>> DBClientCursor::fromAggregationRequest(
diff --git a/src/mongo/client/dbclient_cursor.h b/src/mongo/client/dbclient_cursor.h
index c3555a698e6..831798cc7ce 100644
--- a/src/mongo/client/dbclient_cursor.h
+++ b/src/mongo/client/dbclient_cursor.h
@@ -146,7 +146,7 @@ public:
DBClientCursor(DBClientBase* client,
const NamespaceStringOrUUID& nsOrUuid,
const BSONObj& query,
- int nToReturn,
+ int limit,
int nToSkip,
const BSONObj* fieldsToReturn,
int queryOptions,
@@ -156,7 +156,7 @@ public:
DBClientCursor(DBClientBase* client,
const NamespaceStringOrUUID& nsOrUuid,
long long cursorId,
- int nToReturn,
+ int limit,
int options,
std::vector<BSONObj> initialBatch = {},
boost::optional<Timestamp> operationTime = boost::none);
@@ -281,7 +281,7 @@ private:
const NamespaceStringOrUUID& nsOrUuid,
const BSONObj& query,
long long cursorId,
- int nToReturn,
+ int limit,
int nToSkip,
const BSONObj* fieldsToReturn,
int queryOptions,
@@ -290,8 +290,6 @@ private:
boost::optional<BSONObj> readConcernObj,
boost::optional<Timestamp> operationTime);
- int nextBatchSize();
-
DBClientBase* _client;
std::string _originalHost;
NamespaceStringOrUUID _nsOrUuid;
@@ -300,8 +298,7 @@ private:
// command.
NamespaceString ns;
BSONObj query;
- int nToReturn;
- bool haveLimit;
+ int limit;
int nToSkip;
const BSONObj* fieldsToReturn;
int opts;
diff --git a/src/mongo/client/dbclient_mockcursor.cpp b/src/mongo/client/dbclient_mockcursor.cpp
index 40b505c7abe..1647111c0b4 100644
--- a/src/mongo/client/dbclient_mockcursor.cpp
+++ b/src/mongo/client/dbclient_mockcursor.cpp
@@ -53,7 +53,13 @@ DBClientMockCursor::DBClientMockCursor(mongo::DBClientBase* client,
}
bool DBClientMockCursor::more() {
+ if (_batchSize && batch.pos == _batchSize) {
+ _fillNextBatch();
+ }
+ return batch.pos < batch.objs.size();
+}
+void DBClientMockCursor::_fillNextBatch() {
// Throw if requested via failpoint.
mockCursorThrowErrorOnGetMore.execute([&](const BSONObj& data) {
auto errorString = data["errorType"].valueStringDataSafe();
@@ -64,13 +70,6 @@ bool DBClientMockCursor::more() {
uasserted(errorCode, message);
});
- if (_batchSize && batch.pos == _batchSize) {
- _fillNextBatch();
- }
- return batch.pos < batch.objs.size();
-}
-
-void DBClientMockCursor::_fillNextBatch() {
int leftInBatch = _batchSize;
batch.objs.clear();
while (_iter.more() && (!_batchSize || leftInBatch--)) {
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index dba7055c7b7..cf5fe18de9c 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -1173,7 +1173,6 @@ env.Library(
'exec/count_scan.cpp',
'exec/delete.cpp',
'exec/distinct_scan.cpp',
- 'exec/ensure_sorted.cpp',
'exec/eof.cpp',
'exec/fetch.cpp',
'exec/geo_near.cpp',
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index e3a2e22a910..9dd3393360e 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -65,6 +65,44 @@ namespace {
const auto kTermField = "term"_sd;
+// Older client code before FCV 5.1 could still send 'ntoreturn' to mean a 'limit' or 'batchSize'.
+// This helper translates an 'ntoreturn' to an appropriate combination of 'limit', 'singleBatch',
+// and 'batchSize'. The translation rules are as follows: when 'ntoreturn' < 0 we have 'singleBatch'
+// semantics so we set 'limit= -ntoreturn' and 'singleBatch=true' flag to 'true'. When 'ntoreturn'
+// == 1 this is a limit-1 case so we set the 'limit' accordingly. Lastly when 'ntoreturn' > 1, it's
+// interpreted as a 'batchSize'. When the cluster is fully upgraded to v5.1 'ntoreturn' should no
+// longer be used on incoming requests.
+//
+// This helper can be removed when we no longer have to interoperate with 5.0 or older nodes.
+// longer have to interoperate with 5.0 or older nodes.
+std::unique_ptr<FindCommandRequest> translateNtoReturnToLimitOrBatchSize(
+ std::unique_ptr<FindCommandRequest> findCmd) {
+ const auto& fcvState = serverGlobalParams.featureCompatibility;
+ uassert(5746102,
+ "the ntoreturn find command parameter is not supported when FCV >= 5.1",
+ !(fcvState.isVersionInitialized() &&
+ fcvState.isGreaterThanOrEqualTo(
+ ServerGlobalParams::FeatureCompatibility::Version::kVersion51) &&
+ findCmd->getNtoreturn()));
+
+ const auto ntoreturn = findCmd->getNtoreturn().get_value_or(0);
+ if (ntoreturn) {
+ if (ntoreturn < 0) {
+ uassert(5746100,
+ "bad ntoreturn value in query",
+ ntoreturn != std::numeric_limits<int>::min());
+ findCmd->setLimit(-ntoreturn);
+ findCmd->setSingleBatch(true);
+ } else if (ntoreturn == 1) {
+ findCmd->setLimit(1);
+ } else {
+ findCmd->setBatchSize(ntoreturn);
+ }
+ }
+ findCmd->setNtoreturn(boost::none);
+ return findCmd;
+}
+
// Parses the command object to a FindCommandRequest. If the client request did not specify any
// runtime constants, make them available to the query here.
std::unique_ptr<FindCommandRequest> parseCmdObjectToFindCommandRequest(OperationContext* opCtx,
@@ -74,7 +112,8 @@ std::unique_ptr<FindCommandRequest> parseCmdObjectToFindCommandRequest(Operation
std::move(cmdObj),
std::move(nss),
APIParameters::get(opCtx).getAPIStrict().value_or(false));
- return findCommand;
+
+ return translateNtoReturnToLimitOrBatchSize(std::move(findCommand));
}
boost::intrusive_ptr<ExpressionContext> makeExpressionContext(
@@ -123,6 +162,16 @@ boost::intrusive_ptr<ExpressionContext> makeExpressionContext(
}
/**
+ * Fills out the CurOp for "opCtx" with information about this query.
+ */
+void beginQueryOp(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& queryObj) {
+ auto curOp = CurOp::get(opCtx);
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ curOp->setOpDescription_inlock(queryObj);
+ curOp->setNS_inlock(nss.ns());
+}
+
+/**
* A command for running .find() queries.
*/
class FindCmd final : public Command {
@@ -369,7 +418,6 @@ public:
opCtx->lockState()->skipAcquireTicket();
}
-
// Acquire locks. If the query is on a view, we release our locks and convert the query
// request into an aggregation command.
boost::optional<AutoGetCollectionForReadCommandMaybeLockFree> ctx;
@@ -393,13 +441,7 @@ public:
opCtx, nss, ReadPreferenceSetting::get(opCtx).canRunOnSecondary()));
// Fill out curop information.
- //
- // We pass negative values for 'ntoreturn' and 'ntoskip' to indicate that these values
- // should be omitted from the log line. Limit and skip information is already present in
- // the find command parameters, so these fields are redundant.
- const int ntoreturn = -1;
- const int ntoskip = -1;
- beginQueryOp(opCtx, nss, _request.body, ntoreturn, ntoskip);
+ beginQueryOp(opCtx, nss, _request.body);
// Finish the parsing step by using the FindCommandRequest to create a CanonicalQuery.
const ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index aba407a169a..b9e7ce060fe 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -828,8 +828,6 @@ void OpDebug::report(OperationContext* opCtx,
if (mongotCursorId) {
pAttrs->add("mongot", makeMongotDebugStatsObject());
}
- OPDEBUG_TOATTR_HELP(ntoreturn);
- OPDEBUG_TOATTR_HELP(ntoskip);
OPDEBUG_TOATTR_HELP_BOOL(exhaust);
OPDEBUG_TOATTR_HELP_OPTIONAL("keysExamined", additiveMetrics.keysExamined);
diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h
index d95b56e41e9..8c3adec3a91 100644
--- a/src/mongo/db/curop.h
+++ b/src/mongo/db/curop.h
@@ -224,8 +224,6 @@ public:
// detailed options
long long cursorid{-1};
- long long ntoreturn{-1};
- long long ntoskip{-1};
bool exhaust{false};
// For search using mongot.
diff --git a/src/mongo/db/dbdirectclient.cpp b/src/mongo/db/dbdirectclient.cpp
index efbdc3db106..1c3dfc46e6b 100644
--- a/src/mongo/db/dbdirectclient.cpp
+++ b/src/mongo/db/dbdirectclient.cpp
@@ -158,7 +158,7 @@ void DBDirectClient::say(Message& toSend, bool isRetry, string* actualServer) {
unique_ptr<DBClientCursor> DBDirectClient::query(const NamespaceStringOrUUID& nsOrUuid,
Query query,
- int nToReturn,
+ int limit,
int nToSkip,
const BSONObj* fieldsToReturn,
int queryOptions,
@@ -166,7 +166,7 @@ unique_ptr<DBClientCursor> DBDirectClient::query(const NamespaceStringOrUUID& ns
boost::optional<BSONObj> readConcernObj) {
invariant(!readConcernObj, "passing readConcern to DBDirectClient functions is not supported");
return DBClientBase::query(
- nsOrUuid, query, nToReturn, nToSkip, fieldsToReturn, queryOptions, batchSize);
+ nsOrUuid, query, limit, nToSkip, fieldsToReturn, queryOptions, batchSize);
}
write_ops::FindAndModifyCommandReply DBDirectClient::findAndModify(
diff --git a/src/mongo/db/dbdirectclient.h b/src/mongo/db/dbdirectclient.h
index f3dd6c6393d..b580ff2e94e 100644
--- a/src/mongo/db/dbdirectclient.h
+++ b/src/mongo/db/dbdirectclient.h
@@ -57,7 +57,7 @@ public:
virtual std::unique_ptr<DBClientCursor> query(
const NamespaceStringOrUUID& nsOrUuid,
Query query,
- int nToReturn = 0,
+ int limit = 0,
int nToSkip = 0,
const BSONObj* fieldsToReturn = nullptr,
int queryOptions = 0,
diff --git a/src/mongo/db/exec/ensure_sorted.cpp b/src/mongo/db/exec/ensure_sorted.cpp
deleted file mode 100644
index f713a2eed3a..00000000000
--- a/src/mongo/db/exec/ensure_sorted.cpp
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/exec/ensure_sorted.h"
-
-#include <memory>
-
-#include "mongo/db/exec/scoped_timer.h"
-#include "mongo/db/query/find_common.h"
-
-namespace mongo {
-
-using std::unique_ptr;
-
-const char* EnsureSortedStage::kStageType = "ENSURE_SORTED";
-
-EnsureSortedStage::EnsureSortedStage(ExpressionContext* expCtx,
- BSONObj pattern,
- WorkingSet* ws,
- std::unique_ptr<PlanStage> child)
- : PlanStage(kStageType, expCtx), _ws(ws), _sortKeyComparator(pattern) {
- _children.emplace_back(std::move(child));
-}
-
-bool EnsureSortedStage::isEOF() {
- return child()->isEOF();
-}
-
-PlanStage::StageState EnsureSortedStage::doWork(WorkingSetID* out) {
- StageState stageState = child()->work(out);
-
- if (PlanStage::ADVANCED == stageState) {
- // We extract the sort key from the WSM's metadata. This must have been generated by a
- // SortKeyGeneratorStage descendent in the execution tree.
- WorkingSetMember* member = _ws->get(*out);
- auto curSortKey = member->metadata().getSortKey();
- invariant(!curSortKey.missing());
-
- if (!_prevSortKey.missing() && !isInOrder(_prevSortKey, curSortKey)) {
- // 'member' is out of order. Drop it from the result set.
- _ws->free(*out);
- ++_specificStats.nDropped;
- return PlanStage::NEED_TIME;
- }
-
- _prevSortKey = curSortKey;
- return PlanStage::ADVANCED;
- }
-
- return stageState;
-}
-
-unique_ptr<PlanStageStats> EnsureSortedStage::getStats() {
- _commonStats.isEOF = isEOF();
- unique_ptr<PlanStageStats> ret =
- std::make_unique<PlanStageStats>(_commonStats, STAGE_ENSURE_SORTED);
- ret->specific = std::make_unique<EnsureSortedStats>(_specificStats);
- ret->children.emplace_back(child()->getStats());
- return ret;
-}
-
-const SpecificStats* EnsureSortedStage::getSpecificStats() const {
- return &_specificStats;
-}
-
-bool EnsureSortedStage::isInOrder(const Value& lhsKey, const Value& rhsKey) const {
- // No need to compare with a collator, since the sort keys were extracted by the
- // SortKeyGenerator, which has already mapped strings to their comparison keys.
- return _sortKeyComparator(lhsKey, rhsKey) <= 0;
-}
-
-} // namespace mongo
diff --git a/src/mongo/db/exec/ensure_sorted.h b/src/mongo/db/exec/ensure_sorted.h
deleted file mode 100644
index 9d917b93744..00000000000
--- a/src/mongo/db/exec/ensure_sorted.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include "mongo/db/exec/plan_stage.h"
-#include "mongo/db/exec/sort_key_comparator.h"
-
-namespace mongo {
-
-/**
- * Takes the output of its child and drops any results that are not in the sort order specified by
- * 'pattern'. Thus, if the pattern is {a: -1} and the following documents are inputted:
- * {a: 9}, {a: 6}, {a: 8}, {a, 1}
- * The third document will be dropped so that the output is:
- * {a: 9}, {a: 6}, {a, 1}
- */
-class EnsureSortedStage final : public PlanStage {
-public:
- EnsureSortedStage(ExpressionContext* expCtx,
- BSONObj pattern,
- WorkingSet* ws,
- std::unique_ptr<PlanStage> child);
-
- bool isEOF() final;
- StageState doWork(WorkingSetID* out) final;
-
- StageType stageType() const final {
- return STAGE_ENSURE_SORTED;
- }
-
- std::unique_ptr<PlanStageStats> getStats() final;
-
- const SpecificStats* getSpecificStats() const final;
-
- static const char* kStageType;
-
-private:
- /**
- * Returns whether the result with the lhsSortKey should come before the result with the
- * rhsSortKey in sort order.
- */
- bool isInOrder(const Value& lhsSortKey, const Value& rhsSortKey) const;
-
- WorkingSet* _ws;
-
- // Comparator that is aware of the pattern that we're sorting by.
- SortKeyComparator _sortKeyComparator;
-
- // The sort key of the previous result.
- Value _prevSortKey;
-
- EnsureSortedStats _specificStats;
-};
-
-} // namespace mongo
diff --git a/src/mongo/db/exec/plan_stats.h b/src/mongo/db/exec/plan_stats.h
index 92e96c27894..d38bf6c9437 100644
--- a/src/mongo/db/exec/plan_stats.h
+++ b/src/mongo/db/exec/plan_stats.h
@@ -397,21 +397,6 @@ struct DistinctScanStats : public SpecificStats {
BSONObj indexBounds;
};
-struct EnsureSortedStats : public SpecificStats {
- EnsureSortedStats() : nDropped(0) {}
-
- std::unique_ptr<SpecificStats> clone() const final {
- return std::make_unique<EnsureSortedStats>(*this);
- }
-
- uint64_t estimateObjectSizeInBytes() const {
- return sizeof(*this);
- }
-
- // The number of out-of-order results that were dropped.
- long long nDropped;
-};
-
struct FetchStats : public SpecificStats {
FetchStats() = default;
diff --git a/src/mongo/db/exec/trial_period_utils.cpp b/src/mongo/db/exec/trial_period_utils.cpp
index 80e55f890cd..3ed27ad751c 100644
--- a/src/mongo/db/exec/trial_period_utils.cpp
+++ b/src/mongo/db/exec/trial_period_utils.cpp
@@ -54,10 +54,7 @@ size_t getTrialPeriodNumToReturn(const CanonicalQuery& query) {
// Determine the number of results which we will produce during the plan ranking phase before
// stopping.
size_t numResults = static_cast<size_t>(internalQueryPlanEvaluationMaxResults.load());
- if (query.getFindCommandRequest().getNtoreturn()) {
- numResults = std::min(static_cast<size_t>(*query.getFindCommandRequest().getNtoreturn()),
- numResults);
- } else if (query.getFindCommandRequest().getLimit()) {
+ if (query.getFindCommandRequest().getLimit()) {
numResults =
std::min(static_cast<size_t>(*query.getFindCommandRequest().getLimit()), numResults);
}
diff --git a/src/mongo/db/query/canonical_query.cpp b/src/mongo/db/query/canonical_query.cpp
index b714624bd34..de2e39895ff 100644
--- a/src/mongo/db/query/canonical_query.cpp
+++ b/src/mongo/db/query/canonical_query.cpp
@@ -68,6 +68,10 @@ StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
MatchExpressionParser::AllowedFeatureSet allowedFeatures,
const ProjectionPolicies& projectionPolicies,
std::vector<std::unique_ptr<InnerPipelineStageInterface>> pipeline) {
+ tassert(5746107,
+ "ntoreturn should not be set on the findCommand",
+ findCommand->getNtoreturn() == boost::none);
+
auto status = query_request_helper::validateFindCommandRequest(*findCommand);
if (!status.isOK()) {
return status;
@@ -498,10 +502,6 @@ std::string CanonicalQuery::toString() const {
ss << " skip=" << *_findCommand->getSkip();
}
- if (_findCommand->getNtoreturn()) {
- ss << " ntoreturn=" << *_findCommand->getNtoreturn() << '\n';
- }
-
// The expression tree puts an endl on for us.
ss << "Tree: " << _root->debugString();
ss << "Sort: " << _findCommand->getSort().toString() << '\n';
@@ -535,10 +535,6 @@ std::string CanonicalQuery::toStringShort() const {
ss << " skip: " << *_findCommand->getSkip();
}
- if (_findCommand->getNtoreturn()) {
- ss << " ntoreturn=" << *_findCommand->getNtoreturn();
- }
-
return ss;
}
diff --git a/src/mongo/db/query/classic_stage_builder.cpp b/src/mongo/db/query/classic_stage_builder.cpp
index 5e8387e83cd..de5cd658f5c 100644
--- a/src/mongo/db/query/classic_stage_builder.cpp
+++ b/src/mongo/db/query/classic_stage_builder.cpp
@@ -44,7 +44,6 @@
#include "mongo/db/exec/collection_scan.h"
#include "mongo/db/exec/count_scan.h"
#include "mongo/db/exec/distinct_scan.h"
-#include "mongo/db/exec/ensure_sorted.h"
#include "mongo/db/exec/eof.h"
#include "mongo/db/exec/fetch.h"
#include "mongo/db/exec/geo_near.h"
@@ -370,12 +369,6 @@ std::unique_ptr<PlanStage> ClassicStageBuilder::build(const QuerySolutionNode* r
params.endKeyInclusive = csn->endKeyInclusive;
return std::make_unique<CountScan>(expCtx, _collection, std::move(params), _ws);
}
- case STAGE_ENSURE_SORTED: {
- const EnsureSortedNode* esn = static_cast<const EnsureSortedNode*>(root);
- auto childStage = build(esn->children[0]);
- return std::make_unique<EnsureSortedStage>(
- expCtx, esn->pattern, _ws, std::move(childStage));
- }
case STAGE_EOF: {
return std::make_unique<EOFStage>(expCtx);
}
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index 0f791f2d11e..c380e4419e2 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -104,19 +104,6 @@ bool shouldSaveCursorGetMore(PlanExecutor* exec, bool isTailable) {
return isTailable || !exec->isEOF();
}
-void beginQueryOp(OperationContext* opCtx,
- const NamespaceString& nss,
- const BSONObj& queryObj,
- long long ntoreturn,
- long long ntoskip) {
- auto curOp = CurOp::get(opCtx);
- curOp->debug().ntoreturn = ntoreturn;
- curOp->debug().ntoskip = ntoskip;
- stdx::lock_guard<Client> lk(*opCtx->getClient());
- curOp->setOpDescription_inlock(queryObj);
- curOp->setNS_inlock(nss.ns());
-}
-
void endQueryOp(OperationContext* opCtx,
const CollectionPtr& collection,
const PlanExecutor& exec,
diff --git a/src/mongo/db/query/find.h b/src/mongo/db/query/find.h
index 65ff1ed01f5..2b45efcd602 100644
--- a/src/mongo/db/query/find.h
+++ b/src/mongo/db/query/find.h
@@ -64,15 +64,6 @@ bool shouldSaveCursor(OperationContext* opCtx,
bool shouldSaveCursorGetMore(PlanExecutor* exec, bool isTailable);
/**
- * Fills out the CurOp for "opCtx" with information about this query.
- */
-void beginQueryOp(OperationContext* opCtx,
- const NamespaceString& nss,
- const BSONObj& queryObj,
- long long ntoreturn,
- long long ntoskip);
-
-/**
* 1) Fills out CurOp for "opCtx" with information regarding this query's execution.
* 2) Reports index usage to the CollectionQueryInfo.
*
diff --git a/src/mongo/db/query/find_common.cpp b/src/mongo/db/query/find_common.cpp
index 4f34e0b1cf4..a6cb38186ca 100644
--- a/src/mongo/db/query/find_common.cpp
+++ b/src/mongo/db/query/find_common.cpp
@@ -59,14 +59,16 @@ const OperationContext::Decoration<AwaitDataState> awaitDataState =
OperationContext::declareDecoration<AwaitDataState>();
bool FindCommon::enoughForFirstBatch(const FindCommandRequest& findCommand, long long numDocs) {
- auto effectiveBatchSize =
- findCommand.getBatchSize() ? findCommand.getBatchSize() : findCommand.getNtoreturn();
- if (!effectiveBatchSize) {
+ auto batchSize = findCommand.getBatchSize();
+ tassert(5746104,
+ "ntoreturn on the find command should not be set",
+ findCommand.getNtoreturn() == boost::none);
+ if (!batchSize) {
// We enforce a default batch size for the initial find if no batch size is specified.
return numDocs >= query_request_helper::kDefaultBatchSize;
}
- return numDocs >= effectiveBatchSize.value();
+ return numDocs >= batchSize.value();
}
bool FindCommon::haveSpaceForNext(const BSONObj& nextDoc, long long numDocs, int bytesBuffered) {
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index c62c497d7ba..285e8c005d0 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -351,8 +351,6 @@ void fillOutPlannerParams(OperationContext* opCtx,
plannerParams->options |= QueryPlannerParams::GENERATE_COVERED_IXSCANS;
}
- plannerParams->options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
-
if (shouldWaitForOplogVisibility(
opCtx, collection, canonicalQuery->getFindCommandRequest().getTailable())) {
plannerParams->options |= QueryPlannerParams::OPLOG_SCAN_WAIT_FOR_VISIBLE;
@@ -1182,15 +1180,12 @@ inline bool isQuerySbeCompatible(OperationContext* opCtx,
return part.fieldPath &&
!FieldRef(part.fieldPath->fullPath()).hasNumericPathComponents();
});
- // A find command with 'ntoreturn' is not supported in SBE due to the possibility of an
- // ENSURE_SORTED stage.
- const bool doesNotNeedEnsureSorted = !cq->getFindCommandRequest().getNtoreturn();
// Queries against a time-series collection are not currently supported by SBE.
const bool isQueryNotAgainstTimeseriesCollection = !(cq->nss().isTimeseriesBucketsCollection());
return allExpressionsSupported && isNotCount && doesNotContainMetadataRequirements &&
- doesNotNeedEnsureSorted && isQueryNotAgainstTimeseriesCollection &&
- doesNotSortOnMetaOrPathWithNumericComponents && isNotOplog;
+ isQueryNotAgainstTimeseriesCollection && doesNotSortOnMetaOrPathWithNumericComponents &&
+ isNotOplog;
}
} // namespace
diff --git a/src/mongo/db/query/plan_explainer_impl.cpp b/src/mongo/db/query/plan_explainer_impl.cpp
index 2bbb1ff7726..e832698de78 100644
--- a/src/mongo/db/query/plan_explainer_impl.cpp
+++ b/src/mongo/db/query/plan_explainer_impl.cpp
@@ -332,12 +332,6 @@ void statsToBSON(const PlanStageStats& stats,
if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("keysExamined", static_cast<long long>(spec->keysExamined));
}
- } else if (STAGE_ENSURE_SORTED == stats.stageType) {
- EnsureSortedStats* spec = static_cast<EnsureSortedStats*>(stats.specific.get());
-
- if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
- bob->appendNumber("nDropped", spec->nDropped);
- }
} else if (STAGE_FETCH == stats.stageType) {
FetchStats* spec = static_cast<FetchStats*>(stats.specific.get());
if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp
index dc943fd771c..1b3faf6fae1 100644
--- a/src/mongo/db/query/planner_analysis.cpp
+++ b/src/mongo/db/query/planner_analysis.cpp
@@ -539,10 +539,6 @@ std::unique_ptr<QuerySolutionNode> tryPushdownProjectBeneathSort(
bool canUseSimpleSort(const QuerySolutionNode& solnRoot,
const CanonicalQuery& cq,
const QueryPlannerParams& plannerParams) {
- const bool splitLimitedSortEligible = cq.getFindCommandRequest().getNtoreturn() &&
- !cq.getFindCommandRequest().getSingleBatch() &&
- plannerParams.options & QueryPlannerParams::SPLIT_LIMITED_SORT;
-
// The simple sort stage discards any metadata other than sort key metadata. It can only be used
// if there are no metadata dependencies, or the only metadata dependency is a 'kSortKey'
// dependency.
@@ -554,11 +550,7 @@ bool canUseSimpleSort(const QuerySolutionNode& solnRoot,
// record ids along through the sorting process is wasted work when these ids will never be
// consumed later in the execution of the query. If the record ids are needed, however, then
// we can't use the simple sort stage.
- !(plannerParams.options & QueryPlannerParams::PRESERVE_RECORD_ID)
- // Disable for queries which have an ntoreturn value and are eligible for the "split limited
- // sort" hack. Such plans require record ids to be present for deduping, but the simple sort
- // stage discards record ids.
- && !splitLimitedSortEligible;
+ !(plannerParams.options & QueryPlannerParams::PRESERVE_RECORD_ID);
}
} // namespace
@@ -765,6 +757,10 @@ QuerySolutionNode* QueryPlannerAnalysis::analyzeSort(const CanonicalQuery& query
*blockingSortOut = false;
const FindCommandRequest& findCommand = query.getFindCommandRequest();
+ tassert(5746105,
+ "ntoreturn on the find command should not be set",
+ findCommand.getNtoreturn() == boost::none);
+
const BSONObj& sortObj = findCommand.getSort();
if (sortObj.isEmpty()) {
@@ -839,67 +835,9 @@ QuerySolutionNode* QueryPlannerAnalysis::analyzeSort(const CanonicalQuery& query
// the limit N and skip count M. The sort should return an ordered list
// N + M items so that the skip stage can discard the first M results.
if (findCommand.getLimit()) {
- // We have a true limit. The limit can be combined with the SORT stage.
+ // The limit can be combined with the SORT stage.
sortNodeRaw->limit = static_cast<size_t>(*findCommand.getLimit()) +
static_cast<size_t>(findCommand.getSkip().value_or(0));
- } else if (findCommand.getNtoreturn()) {
- // We have an ntoreturn specified by an OP_QUERY style find. This is used
- // by clients to mean both batchSize and limit.
- //
- // Overflow here would be bad and could cause a nonsense limit. Cast
- // skip and limit values to unsigned ints to make sure that the
- // sum is never stored as signed. (See SERVER-13537).
- sortNodeRaw->limit = static_cast<size_t>(*findCommand.getNtoreturn()) +
- static_cast<size_t>(findCommand.getSkip().value_or(0));
-
- // This is a SORT with a limit. The wire protocol has a single quantity called "numToReturn"
- // which could mean either limit or batchSize. We have no idea what the client intended.
- // One way to handle the ambiguity of a limited OR stage is to use the SPLIT_LIMITED_SORT
- // hack.
- //
- // If singleBatch is true (meaning that 'ntoreturn' was initially passed to the server as a
- // negative value), then we treat numToReturn as a limit. Since there is no limit-batchSize
- // ambiguity in this case, we do not use the SPLIT_LIMITED_SORT hack.
- //
- // If numToReturn is really a limit, then we want to add a limit to this SORT stage, and
- // hence perform a topK.
- //
- // If numToReturn is really a batchSize, then we want to perform a regular blocking sort.
- //
- // Since we don't know which to use, just join the two options with an OR, with the topK
- // first. If the client wants a limit, they'll get the efficiency of topK. If they want a
- // batchSize, the other OR branch will deliver the missing results. The OR stage handles
- // deduping.
- //
- // We must also add an ENSURE_SORTED node above the OR to ensure that the final results are
- // in correct sorted order, which may not be true if the data is concurrently modified.
- //
- // Not allowed for geo or text, because we assume elsewhere that those stages appear just
- // once.
- if (!findCommand.getSingleBatch() &&
- params.options & QueryPlannerParams::SPLIT_LIMITED_SORT &&
- !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT) &&
- !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO) &&
- !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)) {
- // If we're here then the SPLIT_LIMITED_SORT hack is turned on, and the query is of a
- // type that allows the hack.
- //
- // The EnsureSortedStage consumes sort key metadata, so we must instruct the sort to
- // attach it.
- sortNodeRaw->addSortKeyMetadata = true;
-
- auto orNode = std::make_unique<OrNode>();
- orNode->children.push_back(solnRoot);
- auto sortClone = static_cast<SortNode*>(sortNodeRaw->clone());
- sortClone->limit = 0;
- orNode->children.push_back(sortClone);
-
- // Add ENSURE_SORTED above the OR.
- auto ensureSortedNode = std::make_unique<EnsureSortedNode>();
- ensureSortedNode->pattern = sortNodeRaw->pattern;
- ensureSortedNode->children.push_back(orNode.release());
- solnRoot = ensureSortedNode.release();
- }
} else {
sortNodeRaw->limit = 0;
}
@@ -1006,26 +944,14 @@ std::unique_ptr<QuerySolution> QueryPlannerAnalysis::analyzeDataAccess(
}
}
- // When there is both a blocking sort and a limit, the limit will
- // be enforced by the blocking sort.
- // Otherwise, we need to limit the results in the case of a hard limit
- // (ie. limit in raw query is negative)
- if (!hasSortStage) {
- // We don't have a sort stage. This means that, if there is a limit, we will have
- // to enforce it ourselves since it's not handled inside SORT.
- if (findCommand.getLimit()) {
- LimitNode* limit = new LimitNode();
- limit->limit = *findCommand.getLimit();
- limit->children.push_back(solnRoot.release());
- solnRoot.reset(limit);
- } else if (findCommand.getNtoreturn() && findCommand.getSingleBatch()) {
- // We have a "legacy limit", i.e. a negative ntoreturn value from an OP_QUERY style
- // find.
- LimitNode* limit = new LimitNode();
- limit->limit = *findCommand.getNtoreturn();
- limit->children.push_back(solnRoot.release());
- solnRoot.reset(limit);
- }
+ // When there is both a blocking sort and a limit, the limit will be enforced by the blocking
+ // sort. Otherwise, we will have to enforce the limit ourselves since it's not handled inside
+ // SORT.
+ if (!hasSortStage && findCommand.getLimit()) {
+ LimitNode* limit = new LimitNode();
+ limit->limit = *findCommand.getLimit();
+ limit->children.push_back(solnRoot.release());
+ solnRoot.reset(limit);
}
solnRoot = tryPushdownProjectBeneathSort(std::move(solnRoot));
diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp
index 0d13c318193..fda5b87b9e4 100644
--- a/src/mongo/db/query/query_planner.cpp
+++ b/src/mongo/db/query/query_planner.cpp
@@ -155,9 +155,6 @@ string optionString(size_t options) {
case QueryPlannerParams::IS_COUNT:
ss << "IS_COUNT ";
break;
- case QueryPlannerParams::SPLIT_LIMITED_SORT:
- ss << "SPLIT_LIMITED_SORT ";
- break;
case QueryPlannerParams::GENERATE_COVERED_IXSCANS:
ss << "GENERATE_COVERED_IXSCANS ";
break;
diff --git a/src/mongo/db/query/query_planner_operator_test.cpp b/src/mongo/db/query/query_planner_operator_test.cpp
index 687c335fef1..94feaf2b3fe 100644
--- a/src/mongo/db/query/query_planner_operator_test.cpp
+++ b/src/mongo/db/query/query_planner_operator_test.cpp
@@ -562,7 +562,7 @@ TEST_F(QueryPlannerTest, ExistsBoundsCompound) {
TEST_F(QueryPlannerTest, BasicSkipNoIndex) {
addIndex(BSON("a" << 1));
- runQuerySkipNToReturn(BSON("x" << 5), 3, 0);
+ runQuerySkipLimit(BSON("x" << 5), 3, 0);
ASSERT_EQUALS(getNumSolutions(), 1U);
assertSolutionExists("{skip: {n: 3, node: {cscan: {dir: 1, filter: {x: 5}}}}}");
@@ -571,7 +571,7 @@ TEST_F(QueryPlannerTest, BasicSkipNoIndex) {
TEST_F(QueryPlannerTest, BasicSkipWithIndex) {
addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySkipNToReturn(BSON("a" << 5), 8, 0);
+ runQuerySkipLimit(BSON("a" << 5), 8, 0);
ASSERT_EQUALS(getNumSolutions(), 2U);
assertSolutionExists("{skip: {n: 8, node: {cscan: {dir: 1, filter: {a: 5}}}}}");
@@ -583,7 +583,7 @@ TEST_F(QueryPlannerTest, BasicSkipWithIndex) {
TEST_F(QueryPlannerTest, CoveredSkipWithIndex) {
addIndex(fromjson("{a: 1, b: 1}"));
- runQuerySortProjSkipNToReturn(
+ runQuerySortProjSkipLimit(
fromjson("{a: 5}"), BSONObj(), fromjson("{_id: 0, a: 1, b: 1}"), 8, 0);
ASSERT_EQUALS(getNumSolutions(), 2U);
@@ -598,7 +598,7 @@ TEST_F(QueryPlannerTest, CoveredSkipWithIndex) {
TEST_F(QueryPlannerTest, SkipEvaluatesAfterFetchWithPredicate) {
addIndex(fromjson("{a: 1}"));
- runQuerySkipNToReturn(fromjson("{a: 5, b: 7}"), 8, 0);
+ runQuerySkipLimit(fromjson("{a: 5, b: 7}"), 8, 0);
ASSERT_EQUALS(getNumSolutions(), 2U);
assertSolutionExists("{skip: {n: 8, node: {cscan: {dir: 1, filter: {a: 5, b: 7}}}}}");
@@ -616,7 +616,7 @@ TEST_F(QueryPlannerTest, SkipEvaluatesAfterFetchWithPredicate) {
TEST_F(QueryPlannerTest, SkipEvaluatesBeforeFetchForIndexedOr) {
addIndex(fromjson("{a: 1}"));
- runQuerySkipNToReturn(fromjson("{$or: [{a: 5}, {a: 7}]}"), 8, 0);
+ runQuerySkipLimit(fromjson("{$or: [{a: 5}, {a: 7}]}"), 8, 0);
ASSERT_EQUALS(getNumSolutions(), 2U);
assertSolutionExists(
@@ -630,25 +630,16 @@ TEST_F(QueryPlannerTest, SkipEvaluatesBeforeFetchForIndexedOr) {
TEST_F(QueryPlannerTest, BasicLimitNoIndex) {
addIndex(BSON("a" << 1));
- runQuerySkipNToReturn(BSON("x" << 5), 0, -3);
+ runQuerySkipLimit(BSON("x" << 5), 0, 3);
ASSERT_EQUALS(getNumSolutions(), 1U);
assertSolutionExists("{limit: {n: 3, node: {cscan: {dir: 1, filter: {x: 5}}}}}");
}
-TEST_F(QueryPlannerTest, BasicSoftLimitNoIndex) {
- addIndex(BSON("a" << 1));
-
- runQuerySkipNToReturn(BSON("x" << 5), 0, 3);
-
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
-}
-
TEST_F(QueryPlannerTest, BasicLimitWithIndex) {
addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySkipNToReturn(BSON("a" << 5), 0, -5);
+ runQuerySkipLimit(BSON("a" << 5), 0, 5);
ASSERT_EQUALS(getNumSolutions(), 2U);
assertSolutionExists("{limit: {n: 5, node: {cscan: {dir: 1, filter: {a: 5}}}}}");
@@ -657,22 +648,10 @@ TEST_F(QueryPlannerTest, BasicLimitWithIndex) {
"{ixscan: {filter: null, pattern: {a: 1, b: 1}}}}}}}");
}
-TEST_F(QueryPlannerTest, BasicSoftLimitWithIndex) {
- addIndex(BSON("a" << 1 << "b" << 1));
-
- runQuerySkipNToReturn(BSON("a" << 5), 0, 5);
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a: 5}}}}");
- assertSolutionExists(
- "{fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {a: 1, b: 1}}}}}");
-}
-
TEST_F(QueryPlannerTest, SkipAndLimit) {
addIndex(BSON("x" << 1));
- runQuerySkipNToReturn(BSON("x" << BSON("$lte" << 4)), 7, -2);
+ runQuerySkipLimit(BSON("x" << BSON("$lte" << 4)), 7, 2);
ASSERT_EQUALS(getNumSolutions(), 2U);
assertSolutionExists(
@@ -683,21 +662,6 @@ TEST_F(QueryPlannerTest, SkipAndLimit) {
"{skip: {n: 7, node: {ixscan: {filter: null, pattern: {x: 1}}}}}}}}}");
}
-TEST_F(QueryPlannerTest, SkipAndSoftLimit) {
- addIndex(BSON("x" << 1));
-
- runQuerySkipNToReturn(BSON("x" << BSON("$lte" << 4)), 7, 2);
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists(
- "{skip: {n: 7, node: "
- "{cscan: {dir: 1, filter: {x: {$lte: 4}}}}}}");
- assertSolutionExists(
- "{fetch: {filter: null, node: {skip: {n: 7, node: "
- "{ixscan: {filter: null, pattern: {x: 1}}}}}}}");
-}
-
-
//
// Basic sort
//
@@ -803,7 +767,7 @@ TEST_F(QueryPlannerTest, CompoundIndexWithEqualityPredicatesProvidesSort) {
TEST_F(QueryPlannerTest, SortLimit) {
// Negative limit indicates hard limit - see query_request_helper.cpp
- runQuerySortProjSkipNToReturn(BSONObj(), fromjson("{a: 1}"), BSONObj(), 0, -3);
+ runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 0, 3);
assertNumSolutions(1U);
assertSolutionExists(
"{sort: {pattern: {a: 1}, limit: 3, type: 'simple', node:"
@@ -811,7 +775,7 @@ TEST_F(QueryPlannerTest, SortLimit) {
}
TEST_F(QueryPlannerTest, SortSkip) {
- runQuerySortProjSkipNToReturn(BSONObj(), fromjson("{a: 1}"), BSONObj(), 2, 0);
+ runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 2, 0);
assertNumSolutions(1U);
// If only skip is provided, do not limit sort.
assertSolutionExists(
@@ -821,7 +785,7 @@ TEST_F(QueryPlannerTest, SortSkip) {
}
TEST_F(QueryPlannerTest, SortSkipLimit) {
- runQuerySortProjSkipNToReturn(BSONObj(), fromjson("{a: 1}"), BSONObj(), 2, -3);
+ runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 2, 3);
assertNumSolutions(1U);
// Limit in sort node should be adjusted by skip count
assertSolutionExists(
@@ -830,23 +794,6 @@ TEST_F(QueryPlannerTest, SortSkipLimit) {
"{cscan: {dir: 1}}}}}}}}");
}
-TEST_F(QueryPlannerTest, SortSoftLimit) {
- runQuerySortProjSkipNToReturn(BSONObj(), fromjson("{a: 1}"), BSONObj(), 0, 3);
- assertNumSolutions(1U);
- assertSolutionExists(
- "{sort: {pattern: {a: 1}, limit: 3, type: 'simple', node:"
- "{cscan: {dir: 1}}}}");
-}
-
-TEST_F(QueryPlannerTest, SortSkipSoftLimit) {
- runQuerySortProjSkipNToReturn(BSONObj(), fromjson("{a: 1}"), BSONObj(), 2, 3);
- assertNumSolutions(1U);
- assertSolutionExists(
- "{skip: {n: 2, node: "
- "{sort: {pattern: {a: 1}, limit: 5, type: 'simple', node: "
- "{cscan: {dir: 1}}}}}}}}");
-}
-
// Push project behind sort even when there is a skip between them.
TEST_F(QueryPlannerTest, PushProjectBehindSortWithSkipBetween) {
runQueryAsCommand(fromjson(R"({
diff --git a/src/mongo/db/query/query_planner_options_test.cpp b/src/mongo/db/query/query_planner_options_test.cpp
index c83f920ada9..0f80df93301 100644
--- a/src/mongo/db/query/query_planner_options_test.cpp
+++ b/src/mongo/db/query/query_planner_options_test.cpp
@@ -353,73 +353,6 @@ TEST_F(QueryPlannerTest, HintInvalid) {
}
//
-// Test the "split limited sort stages" hack.
-//
-
-TEST_F(QueryPlannerTest, SplitLimitedSort) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- params.options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- runQuerySortProjSkipNToReturn(fromjson("{a: 1}"), fromjson("{b: 1}"), BSONObj(), 0, 3);
-
- assertNumSolutions(2U);
- // First solution has no blocking stage; no need to split.
- assertSolutionExists(
- "{fetch: {filter: {a:1}, node: "
- "{ixscan: {filter: null, pattern: {b: 1}}}}}");
- // Second solution has a blocking sort with a limit: it gets split and
- // joined with an OR stage.
- assertSolutionExists(
- "{ensureSorted: {pattern: {b: 1}, node: "
- "{or: {nodes: ["
- "{sort: {pattern: {b: 1}, limit: 3, type: 'default', node: "
- "{fetch: {node: {ixscan: {pattern: {a: 1}}}}}}}, "
- "{sort: {pattern: {b: 1}, limit: 0, type: 'default', node: "
- "{fetch: {node: {ixscan: {pattern: {a: 1}}}}}}}]}}}}");
-}
-
-// The same query run as a find command with a limit should not require the "split limited sort"
-// hack.
-TEST_F(QueryPlannerTest, NoSplitLimitedSortAsCommand) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- params.options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- runQueryAsCommand(fromjson("{find: 'testns', filter: {a: 1}, sort: {b: 1}, limit: 3}"));
-
- assertNumSolutions(2U);
- assertSolutionExists(
- "{limit: {n: 3, node: {fetch: {filter: {a:1}, node: "
- "{ixscan: {filter: null, pattern: {b: 1}}}}}}}");
- assertSolutionExists(
- "{sort: {pattern: {b: 1}, limit: 3, type: 'simple', node: {fetch: {filter: null,"
- "node: {ixscan: {pattern: {a: 1}}}}}}}");
-}
-
-// Same query run as a find command with a batchSize rather than a limit should not require
-// the "split limited sort" hack, and should not have any limit represented inside the plan.
-TEST_F(QueryPlannerTest, NoSplitLimitedSortAsCommandBatchSize) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- params.options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- runQueryAsCommand(fromjson("{find: 'testns', filter: {a: 1}, sort: {b: 1}, batchSize: 3}"));
-
- assertNumSolutions(2U);
- assertSolutionExists(
- "{fetch: {filter: {a: 1}, node: {ixscan: "
- "{filter: null, pattern: {b: 1}}}}}");
- assertSolutionExists(
- "{sort: {pattern: {b: 1}, limit: 0, type: 'simple', node: {fetch: {filter: null,"
- "node: {ixscan: {pattern: {a: 1}}}}}}}");
-}
-
-
-//
// Test shard filter query planning
//
@@ -885,33 +818,6 @@ TEST_F(QueryPlannerTest, TagAccordingToCacheFailsOnBadInput) {
ASSERT_NOT_OK(s);
}
-// A query run as a find command with a sort and ntoreturn should generate a plan implementing
-// the 'ntoreturn hack'.
-TEST_F(QueryPlannerTest, NToReturnHackWithFindCommand) {
- params.options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
-
- runQueryAsCommand(fromjson("{find: 'testns', sort: {a:1}, ntoreturn:3}"));
-
- assertNumSolutions(1U);
- assertSolutionExists(
- "{ensureSorted: {pattern: {a: 1}, node: "
- "{or: {nodes: ["
- "{sort: {limit:3, pattern: {a:1}, type: 'default', node: {cscan: {dir:1}}}}, "
- "{sort: {limit:0, pattern: {a:1}, type: 'default', node: {cscan: {dir:1}}}}"
- "]}}}}");
-}
-
-TEST_F(QueryPlannerTest, NToReturnHackWithSingleBatch) {
- params.options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
-
- runQueryAsCommand(fromjson("{find: 'testns', sort: {a:1}, ntoreturn:3, singleBatch:true}"));
-
- assertNumSolutions(1U);
- assertSolutionExists(
- "{sort: {pattern: {a:1}, limit:3, type: 'simple', node: "
- "{cscan: {dir:1, filter: {}}}}}");
-}
-
TEST_F(QueryPlannerTest, DollarResumeAfterFieldPropagatedFromQueryRequestToStageBuilder) {
BSONObj cmdObj = BSON("find" << nss.ns() << "hint" << BSON("$natural" << 1) << "sort"
<< BSON("$natural" << 1) << "$_requestResumeToken" << true
diff --git a/src/mongo/db/query/query_planner_params.h b/src/mongo/db/query/query_planner_params.h
index deb452eacf6..be00b55fa38 100644
--- a/src/mongo/db/query/query_planner_params.h
+++ b/src/mongo/db/query/query_planner_params.h
@@ -74,34 +74,29 @@ struct QueryPlannerParams {
// could be eligible for the COUNT_SCAN.
IS_COUNT = 1 << 4,
- // Set this if you want to handle batchSize properly with sort(). If limits on SORT
- // stages are always actually limits, then this should be left off. If they are
- // sometimes to be interpreted as batchSize, then this should be turned on.
- SPLIT_LIMITED_SORT = 1 << 5,
-
// Set this to generate covered whole IXSCAN plans.
- GENERATE_COVERED_IXSCANS = 1 << 6,
+ GENERATE_COVERED_IXSCANS = 1 << 5,
// Set this to track the most recent timestamp seen by this cursor while scanning the oplog.
- TRACK_LATEST_OPLOG_TS = 1 << 7,
+ TRACK_LATEST_OPLOG_TS = 1 << 6,
// Set this so that collection scans on the oplog wait for visibility before reading.
- OPLOG_SCAN_WAIT_FOR_VISIBLE = 1 << 8,
+ OPLOG_SCAN_WAIT_FOR_VISIBLE = 1 << 7,
// Set this so that getExecutorDistinct() will only use a plan that _guarantees_ it will
// return exactly one document per value of the distinct field. See the comments above the
// declaration of getExecutorDistinct() for more detail.
- STRICT_DISTINCT_ONLY = 1 << 9,
+ STRICT_DISTINCT_ONLY = 1 << 8,
// Instruct the planner that the caller is expecting to consume the record ids associated
// with documents returned by the plan. Any generated query solution must not discard record
// ids. In some cases, record ids can be discarded as an optimization when they will not be
// consumed downstream.
- PRESERVE_RECORD_ID = 1 << 10,
+ PRESERVE_RECORD_ID = 1 << 9,
// Set this on an oplog scan to uassert that the oplog has not already rolled over the
// minimum 'ts' timestamp specified in the query.
- ASSERT_MIN_TS_HAS_NOT_FALLEN_OFF_OPLOG = 1 << 11,
+ ASSERT_MIN_TS_HAS_NOT_FALLEN_OFF_OPLOG = 1 << 10,
// Instruct the plan enumerator to enumerate contained $ors in a special order. $or
// enumeration can generate an exponential number of plans, and is therefore limited at some
@@ -118,11 +113,11 @@ struct QueryPlannerParams {
// order, we would get assignments [a_b, a_b], [a_c, a_c], [a_c, a_b], then [a_b, a_c]. This
// is thought to be helpful in general, but particularly in cases where all children of the
// $or use the same fields and have the same indexes available, as in this example.
- ENUMERATE_OR_CHILDREN_LOCKSTEP = 1 << 12,
+ ENUMERATE_OR_CHILDREN_LOCKSTEP = 1 << 11,
// Ensure that any plan generated returns data that is "owned." That is, all BSONObjs are
// in an "owned" state and are not pointing to data that belongs to the storage engine.
- RETURN_OWNED_DATA = 1 << 13,
+ RETURN_OWNED_DATA = 1 << 12,
};
// See Options enum above.
diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp
index 65afb8565ab..abac4db5598 100644
--- a/src/mongo/db/query/query_planner_test_fixture.cpp
+++ b/src/mongo/db/query/query_planner_test_fixture.cpp
@@ -267,37 +267,35 @@ void QueryPlannerTest::addIndex(const IndexEntry& ie) {
}
void QueryPlannerTest::runQuery(BSONObj query) {
- runQuerySortProjSkipNToReturn(query, BSONObj(), BSONObj(), 0, 0);
+ runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), 0, 0);
}
void QueryPlannerTest::runQuerySortProj(const BSONObj& query,
const BSONObj& sort,
const BSONObj& proj) {
- runQuerySortProjSkipNToReturn(query, sort, proj, 0, 0);
+ runQuerySortProjSkipLimit(query, sort, proj, 0, 0);
}
-void QueryPlannerTest::runQuerySkipNToReturn(const BSONObj& query,
- long long skip,
- long long ntoreturn) {
- runQuerySortProjSkipNToReturn(query, BSONObj(), BSONObj(), skip, ntoreturn);
+void QueryPlannerTest::runQuerySkipLimit(const BSONObj& query, long long skip, long long limit) {
+ runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), skip, limit);
}
void QueryPlannerTest::runQueryHint(const BSONObj& query, const BSONObj& hint) {
- runQuerySortProjSkipNToReturnHint(query, BSONObj(), BSONObj(), 0, 0, hint);
+ runQuerySortProjSkipLimitHint(query, BSONObj(), BSONObj(), 0, 0, hint);
}
-void QueryPlannerTest::runQuerySortProjSkipNToReturn(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long ntoreturn) {
- runQuerySortProjSkipNToReturnHint(query, sort, proj, skip, ntoreturn, BSONObj());
+void QueryPlannerTest::runQuerySortProjSkipLimit(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit) {
+ runQuerySortProjSkipLimitHint(query, sort, proj, skip, limit, BSONObj());
}
void QueryPlannerTest::runQuerySortHint(const BSONObj& query,
const BSONObj& sort,
const BSONObj& hint) {
- runQuerySortProjSkipNToReturnHint(query, sort, BSONObj(), 0, 0, hint);
+ runQuerySortProjSkipLimitHint(query, sort, BSONObj(), 0, 0, hint);
}
void QueryPlannerTest::runQueryHintMinMax(const BSONObj& query,
@@ -307,20 +305,20 @@ void QueryPlannerTest::runQueryHintMinMax(const BSONObj& query,
runQueryFull(query, BSONObj(), BSONObj(), 0, 0, hint, minObj, maxObj);
}
-void QueryPlannerTest::runQuerySortProjSkipNToReturnHint(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long ntoreturn,
- const BSONObj& hint) {
- runQueryFull(query, sort, proj, skip, ntoreturn, hint, BSONObj(), BSONObj());
+void QueryPlannerTest::runQuerySortProjSkipLimitHint(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint) {
+ runQueryFull(query, sort, proj, skip, limit, hint, BSONObj(), BSONObj());
}
void QueryPlannerTest::runQueryFull(const BSONObj& query,
const BSONObj& sort,
const BSONObj& proj,
long long skip,
- long long ntoreturn,
+ long long limit,
const BSONObj& hint,
const BSONObj& minObj,
const BSONObj& maxObj) {
@@ -333,13 +331,8 @@ void QueryPlannerTest::runQueryFull(const BSONObj& query,
if (skip) {
findCommand->setSkip(skip);
}
- if (ntoreturn) {
- if (ntoreturn < 0) {
- ASSERT_NE(ntoreturn, std::numeric_limits<long long>::min());
- ntoreturn = -ntoreturn;
- findCommand->setSingleBatch(true);
- }
- findCommand->setNtoreturn(ntoreturn);
+ if (limit) {
+ findCommand->setLimit(limit);
}
findCommand->setHint(hint);
findCommand->setMin(minObj);
@@ -360,25 +353,25 @@ void QueryPlannerTest::runQueryFull(const BSONObj& query,
}
void QueryPlannerTest::runInvalidQuery(const BSONObj& query) {
- runInvalidQuerySortProjSkipNToReturn(query, BSONObj(), BSONObj(), 0, 0);
+ runInvalidQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), 0, 0);
}
void QueryPlannerTest::runInvalidQuerySortProj(const BSONObj& query,
const BSONObj& sort,
const BSONObj& proj) {
- runInvalidQuerySortProjSkipNToReturn(query, sort, proj, 0, 0);
+ runInvalidQuerySortProjSkipLimit(query, sort, proj, 0, 0);
}
-void QueryPlannerTest::runInvalidQuerySortProjSkipNToReturn(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long ntoreturn) {
- runInvalidQuerySortProjSkipNToReturnHint(query, sort, proj, skip, ntoreturn, BSONObj());
+void QueryPlannerTest::runInvalidQuerySortProjSkipLimit(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit) {
+ runInvalidQuerySortProjSkipLimitHint(query, sort, proj, skip, limit, BSONObj());
}
void QueryPlannerTest::runInvalidQueryHint(const BSONObj& query, const BSONObj& hint) {
- runInvalidQuerySortProjSkipNToReturnHint(query, BSONObj(), BSONObj(), 0, 0, hint);
+ runInvalidQuerySortProjSkipLimitHint(query, BSONObj(), BSONObj(), 0, 0, hint);
}
void QueryPlannerTest::runInvalidQueryHintMinMax(const BSONObj& query,
@@ -388,20 +381,20 @@ void QueryPlannerTest::runInvalidQueryHintMinMax(const BSONObj& query,
runInvalidQueryFull(query, BSONObj(), BSONObj(), 0, 0, hint, minObj, maxObj);
}
-void QueryPlannerTest::runInvalidQuerySortProjSkipNToReturnHint(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long ntoreturn,
- const BSONObj& hint) {
- runInvalidQueryFull(query, sort, proj, skip, ntoreturn, hint, BSONObj(), BSONObj());
+void QueryPlannerTest::runInvalidQuerySortProjSkipLimitHint(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint) {
+ runInvalidQueryFull(query, sort, proj, skip, limit, hint, BSONObj(), BSONObj());
}
void QueryPlannerTest::runInvalidQueryFull(const BSONObj& query,
const BSONObj& sort,
const BSONObj& proj,
long long skip,
- long long ntoreturn,
+ long long limit,
const BSONObj& hint,
const BSONObj& minObj,
const BSONObj& maxObj) {
@@ -414,13 +407,8 @@ void QueryPlannerTest::runInvalidQueryFull(const BSONObj& query,
if (skip) {
findCommand->setSkip(skip);
}
- if (ntoreturn) {
- if (ntoreturn < 0) {
- ASSERT_NE(ntoreturn, std::numeric_limits<long long>::min());
- ntoreturn = -ntoreturn;
- findCommand->setSingleBatch(true);
- }
- findCommand->setNtoreturn(ntoreturn);
+ if (limit) {
+ findCommand->setLimit(limit);
}
findCommand->setHint(hint);
findCommand->setMin(minObj);
diff --git a/src/mongo/db/query/query_planner_test_fixture.h b/src/mongo/db/query/query_planner_test_fixture.h
index 17083e9fcb6..bb0f0ed3e5e 100644
--- a/src/mongo/db/query/query_planner_test_fixture.h
+++ b/src/mongo/db/query/query_planner_test_fixture.h
@@ -91,15 +91,15 @@ protected:
void runQuerySortProj(const BSONObj& query, const BSONObj& sort, const BSONObj& proj);
- void runQuerySkipNToReturn(const BSONObj& query, long long skip, long long ntoreturn);
+ void runQuerySkipLimit(const BSONObj& query, long long skip, long long limit);
void runQueryHint(const BSONObj& query, const BSONObj& hint);
- void runQuerySortProjSkipNToReturn(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long ntoreturn);
+ void runQuerySortProjSkipLimit(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit);
void runQuerySortHint(const BSONObj& query, const BSONObj& sort, const BSONObj& hint);
@@ -108,18 +108,18 @@ protected:
const BSONObj& minObj,
const BSONObj& maxObj);
- void runQuerySortProjSkipNToReturnHint(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long ntoreturn,
- const BSONObj& hint);
+ void runQuerySortProjSkipLimitHint(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint);
void runQueryFull(const BSONObj& query,
const BSONObj& sort,
const BSONObj& proj,
long long skip,
- long long ntoreturn,
+ long long limit,
const BSONObj& hint,
const BSONObj& minObj,
const BSONObj& maxObj);
@@ -132,11 +132,11 @@ protected:
void runInvalidQuerySortProj(const BSONObj& query, const BSONObj& sort, const BSONObj& proj);
- void runInvalidQuerySortProjSkipNToReturn(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long ntoreturn);
+ void runInvalidQuerySortProjSkipLimit(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit);
void runInvalidQueryHint(const BSONObj& query, const BSONObj& hint);
@@ -145,18 +145,18 @@ protected:
const BSONObj& minObj,
const BSONObj& maxObj);
- void runInvalidQuerySortProjSkipNToReturnHint(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long ntoreturn,
- const BSONObj& hint);
+ void runInvalidQuerySortProjSkipLimitHint(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint);
void runInvalidQueryFull(const BSONObj& query,
const BSONObj& sort,
const BSONObj& proj,
long long skip,
- long long ntoreturn,
+ long long limit,
const BSONObj& hint,
const BSONObj& minObj,
const BSONObj& maxObj);
diff --git a/src/mongo/db/query/query_planner_test_lib.cpp b/src/mongo/db/query/query_planner_test_lib.cpp
index c2027ece913..400a5a55bb1 100644
--- a/src/mongo/db/query/query_planner_test_lib.cpp
+++ b/src/mongo/db/query/query_planner_test_lib.cpp
@@ -1088,41 +1088,7 @@ Status QueryPlannerTestLib::solutionMatches(const BSONObj& testSoln,
return solutionMatches(child.Obj(), fn->children[0], relaxBoundsCheck)
.withContext("mismatch below shard filter stage");
- } else if (STAGE_ENSURE_SORTED == trueSoln->getType()) {
- const EnsureSortedNode* esn = static_cast<const EnsureSortedNode*>(trueSoln);
-
- BSONElement el = testSoln["ensureSorted"];
- if (el.eoo() || !el.isABSONObj()) {
- return {ErrorCodes::Error{5619208},
- "found a ensureSorted stage in the solution but no "
- "corresponding 'ensureSorted' object in the provided JSON"};
- }
- BSONObj esObj = el.Obj();
- invariant(bsonObjFieldsAreInSet(esObj, {"node", "pattern"}));
-
- BSONElement patternEl = esObj["pattern"];
- if (patternEl.eoo() || !patternEl.isABSONObj()) {
- return {ErrorCodes::Error{5676407},
- "found an ensureSorted stage in the solution but no 'pattern' object in the "
- "provided JSON"};
- }
- BSONElement child = esObj["node"];
- if (child.eoo() || !child.isABSONObj()) {
- return {ErrorCodes::Error{5676406},
- "found an ensureSorted stage in the solution but no 'node' sub-object in "
- "the provided JSON"};
- }
-
- if (!SimpleBSONObjComparator::kInstance.evaluate(patternEl.Obj() == esn->pattern)) {
- return {ErrorCodes::Error{5698302},
- str::stream() << "found an ensureSorted stage in the solution with "
- "mismatching 'pattern'. Expected: "
- << patternEl << " Found: " << esn->pattern};
- }
- return solutionMatches(child.Obj(), esn->children[0], relaxBoundsCheck)
- .withContext("mismatch below ensureSorted stage");
}
-
return {ErrorCodes::Error{5698301},
str::stream() << "Unknown query solution node found: " << trueSoln->toString()};
} // namespace mongo
diff --git a/src/mongo/db/query/query_planner_tree_test.cpp b/src/mongo/db/query/query_planner_tree_test.cpp
index 23269c35633..9d403989792 100644
--- a/src/mongo/db/query/query_planner_tree_test.cpp
+++ b/src/mongo/db/query/query_planner_tree_test.cpp
@@ -746,43 +746,41 @@ TEST_F(QueryPlannerTest, InCompoundIndexLastOrEquivalent) {
}
*/
-// SERVER-1205
TEST_F(QueryPlannerTest, InWithSort) {
addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySortProjSkipNToReturn(fromjson("{a: {$in: [1, 2]}}"), BSON("b" << 1), BSONObj(), 0, 1);
+ runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}}"), BSON("b" << 1), BSONObj(), 0, 1);
assertSolutionExists(
"{sort: {pattern: {b: 1}, limit: 1, type: 'simple', node: "
"{cscan: {dir: 1}}}}");
assertSolutionExists(
- "{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a: 1, b: 1}}}, {ixscan: {pattern: {a: 1, b: 1}}}]}}}}");
+ "{limit: {n: 1, node: {fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a: 1, b: 1}}}, {ixscan: {pattern: {a: 1, b: 1}}}]}}}}}}");
}
-// SERVER-1205
TEST_F(QueryPlannerTest, InWithoutSort) {
addIndex(BSON("a" << 1 << "b" << 1));
// No sort means we don't bother to blow up the bounds.
- runQuerySortProjSkipNToReturn(fromjson("{a: {$in: [1, 2]}}"), BSONObj(), BSONObj(), 0, 1);
+ runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}}"), BSONObj(), BSONObj(), 0, 1);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
+ assertSolutionExists("{limit: {n: 1, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{limit: {n: 1, node: {fetch: {node: {ixscan: {pattern: {a: 1, b: 1}}}}}}}");
}
-// SERVER-1205
TEST_F(QueryPlannerTest, ManyInWithSort) {
addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
- runQuerySortProjSkipNToReturn(fromjson("{a: {$in: [1, 2]}, b:{$in:[1,2]}, c:{$in:[1,2]}}"),
- BSON("d" << 1),
- BSONObj(),
- 0,
- 1);
+ runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}, b:{$in:[1,2]}, c:{$in:[1,2]}}"),
+ BSON("d" << 1),
+ BSONObj(),
+ 0,
+ 1);
assertSolutionExists(
"{sort: {pattern: {d: 1}, limit: 1, type: 'simple', node: "
"{cscan: {dir: 1}}}}");
assertSolutionExists(
- "{fetch: {node: {mergeSort: {nodes: "
+ "{limit: {n: 1, node: {fetch: {node: {mergeSort: {nodes: "
"[{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
"{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
"{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
@@ -790,19 +788,18 @@ TEST_F(QueryPlannerTest, ManyInWithSort) {
"{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
"{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
"{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
- "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}}]}}}}");
+ "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}}]}}}}}}");
}
-// SERVER-1205
TEST_F(QueryPlannerTest, TooManyToExplode) {
addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
- runQuerySortProjSkipNToReturn(fromjson("{a: {$in: [1,2,3,4,5,6]},"
- "b:{$in:[1,2,3,4,5,6,7,8]},"
- "c:{$in:[1,2,3,4,5,6,7,8]}}"),
- BSON("d" << 1),
- BSONObj(),
- 0,
- 1);
+ runQuerySortProjSkipLimit(fromjson("{a: {$in: [1,2,3,4,5,6]},"
+ "b:{$in:[1,2,3,4,5,6,7,8]},"
+ "c:{$in:[1,2,3,4,5,6,7,8]}}"),
+ BSON("d" << 1),
+ BSONObj(),
+ 0,
+ 1);
// We cap the # of ixscans we're willing to create.
assertNumSolutions(2);
@@ -1066,11 +1063,11 @@ TEST_F(QueryPlannerTest, ExplodeForSortIxscanFetchOrAndIxscanOr) {
TEST_F(QueryPlannerTest, InWithSortAndLimitTrailingField) {
addIndex(BSON("a" << 1 << "b" << -1 << "c" << 1));
- runQuerySortProjSkipNToReturn(fromjson("{a: {$in: [1, 2]}, b: {$gte: 0}}"),
- fromjson("{b: -1}"),
- BSONObj(), // no projection
- 0, // no skip
- -1); // .limit(1)
+ runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}, b: {$gte: 0}}"),
+ fromjson("{b: -1}"),
+ BSONObj(), // no projection
+ 0, // no skip
+ 1); // .limit(1)
assertNumSolutions(2U);
assertSolutionExists(
@@ -1215,7 +1212,6 @@ TEST_F(QueryPlannerTest, NonIndexEqualitiesNotProvided) {
// Sort orders
//
-// SERVER-1205.
TEST_F(QueryPlannerTest, MergeSort) {
addIndex(BSON("a" << 1 << "c" << 1));
addIndex(BSON("b" << 1 << "c" << 1));
@@ -1230,7 +1226,6 @@ TEST_F(QueryPlannerTest, MergeSort) {
"[{ixscan: {pattern: {a: 1, c: 1}}}, {ixscan: {pattern: {b: 1, c: 1}}}]}}}}");
}
-// SERVER-1205 as well.
TEST_F(QueryPlannerTest, NoMergeSortIfNoSortWanted) {
addIndex(BSON("a" << 1 << "c" << 1));
addIndex(BSON("b" << 1 << "c" << 1));
diff --git a/src/mongo/db/query/query_planner_wildcard_index_test.cpp b/src/mongo/db/query/query_planner_wildcard_index_test.cpp
index 7ebcafb4c21..cd943126a17 100644
--- a/src/mongo/db/query/query_planner_wildcard_index_test.cpp
+++ b/src/mongo/db/query/query_planner_wildcard_index_test.cpp
@@ -576,7 +576,7 @@ TEST_F(QueryPlannerWildcardTest, OrWithOneRegularAndOneWildcardIndexPathUsesTwoI
TEST_F(QueryPlannerWildcardTest, BasicSkip) {
addWildcardIndex(BSON("$**" << 1));
- runQuerySkipNToReturn(BSON("a" << 5), 8, 0);
+ runQuerySkipLimit(BSON("a" << 5), 8, 0);
assertNumSolutions(1U);
assertSolutionExists(
@@ -587,7 +587,7 @@ TEST_F(QueryPlannerWildcardTest, BasicSkip) {
TEST_F(QueryPlannerWildcardTest, CoveredSkip) {
addWildcardIndex(BSON("$**" << 1));
- runQuerySortProjSkipNToReturn(fromjson("{a: 5}"), BSONObj(), fromjson("{_id: 0, a: 1}"), 8, 0);
+ runQuerySortProjSkipLimit(fromjson("{a: 5}"), BSONObj(), fromjson("{_id: 0, a: 1}"), 8, 0);
assertNumSolutions(1U);
assertSolutionExists(
@@ -598,7 +598,7 @@ TEST_F(QueryPlannerWildcardTest, CoveredSkip) {
TEST_F(QueryPlannerWildcardTest, BasicLimit) {
addWildcardIndex(BSON("$**" << 1));
- runQuerySkipNToReturn(BSON("a" << 5), 0, -5);
+ runQuerySkipLimit(BSON("a" << 5), 0, 5);
assertNumSolutions(1U);
assertSolutionExists(
diff --git a/src/mongo/db/query/query_request_helper.cpp b/src/mongo/db/query/query_request_helper.cpp
index 5f5ce186be8..7db63a1427b 100644
--- a/src/mongo/db/query/query_request_helper.cpp
+++ b/src/mongo/db/query/query_request_helper.cpp
@@ -186,7 +186,6 @@ Status initFullQuery(const BSONObj& top, FindCommandRequest* findCommand) {
}
Status initFindCommandRequest(int ntoskip,
- int ntoreturn,
int queryOptions,
const BSONObj& queryObj,
const BSONObj& proj,
@@ -199,24 +198,6 @@ Status initFindCommandRequest(int ntoskip,
findCommand->setSkip(ntoskip);
}
- if (ntoreturn) {
- if (ntoreturn < 0) {
- if (ntoreturn == std::numeric_limits<int>::min()) {
- // ntoreturn is negative but can't be negated.
- return Status(ErrorCodes::BadValue, "bad ntoreturn value in query");
- }
- findCommand->setNtoreturn(-ntoreturn);
- findCommand->setSingleBatch(true);
- } else {
- findCommand->setNtoreturn(ntoreturn);
- }
- }
-
- // An ntoreturn of 1 is special because it also means to return at most one batch.
- if (findCommand->getNtoreturn().value_or(0) == 1) {
- findCommand->setSingleBatch(true);
- }
-
// Initialize flags passed as 'queryOptions' bit vector.
initFromInt(queryOptions, findCommand);
@@ -421,12 +402,11 @@ StatusWith<std::unique_ptr<FindCommandRequest>> fromLegacyQuery(NamespaceStringO
const BSONObj& queryObj,
const BSONObj& proj,
int ntoskip,
- int ntoreturn,
int queryOptions) {
auto findCommand = std::make_unique<FindCommandRequest>(std::move(nssOrUuid));
- Status status = initFindCommandRequest(
- ntoskip, ntoreturn, queryOptions, queryObj, proj, true, findCommand.get());
+ Status status =
+ initFindCommandRequest(ntoskip, queryOptions, queryObj, proj, true, findCommand.get());
if (!status.isOK()) {
return status;
}
@@ -437,6 +417,9 @@ StatusWith<std::unique_ptr<FindCommandRequest>> fromLegacyQuery(NamespaceStringO
StatusWith<BSONObj> asAggregationCommand(const FindCommandRequest& findCommand) {
BSONObjBuilder aggregationBuilder;
+ // The find command will translate away ntoreturn above this layer.
+ tassert(5746106, "ntoreturn should not be set in the findCommand", !findCommand.getNtoreturn());
+
// First, check if this query has options that are not supported in aggregation.
if (!findCommand.getMin().isEmpty()) {
return {ErrorCodes::InvalidPipelineOperator,
@@ -472,10 +455,6 @@ StatusWith<BSONObj> asAggregationCommand(const FindCommandRequest& findCommand)
str::stream() << "Option " << FindCommandRequest::kAllowPartialResultsFieldName
<< " not supported in aggregation."};
}
- if (findCommand.getNtoreturn()) {
- return {ErrorCodes::BadValue,
- str::stream() << "Cannot convert to an aggregation if ntoreturn is set."};
- }
if (findCommand.getSort()[query_request_helper::kNaturalSortField]) {
return {ErrorCodes::InvalidPipelineOperator,
str::stream() << "Sort option " << query_request_helper::kNaturalSortField
diff --git a/src/mongo/db/query/query_request_helper.h b/src/mongo/db/query/query_request_helper.h
index 14409c62e39..c925c06dd38 100644
--- a/src/mongo/db/query/query_request_helper.h
+++ b/src/mongo/db/query/query_request_helper.h
@@ -154,7 +154,6 @@ StatusWith<std::unique_ptr<FindCommandRequest>> fromLegacyQuery(NamespaceStringO
const BSONObj& queryObj,
const BSONObj& proj,
int ntoskip,
- int ntoreturn,
int queryOptions);
} // namespace query_request_helper
diff --git a/src/mongo/db/query/query_request_test.cpp b/src/mongo/db/query/query_request_test.cpp
index 2e92a48f4e9..6eeb8704f26 100644
--- a/src/mongo/db/query/query_request_test.cpp
+++ b/src/mongo/db/query/query_request_test.cpp
@@ -1375,12 +1375,6 @@ TEST(QueryRequestTest, ConvertToAggregationWithAllowPartialResultsFails) {
ASSERT_NOT_OK(query_request_helper::asAggregationCommand(findCommand));
}
-TEST(QueryRequestTest, ConvertToAggregationWithNToReturnFails) {
- FindCommandRequest findCommand(testns);
- findCommand.setNtoreturn(7);
- ASSERT_NOT_OK(query_request_helper::asAggregationCommand(findCommand));
-}
-
TEST(QueryRequestTest, ConvertToAggregationWithRequestResumeTokenFails) {
FindCommandRequest findCommand(testns);
findCommand.setRequestResumeToken(true);
@@ -1556,7 +1550,6 @@ TEST(QueryRequestTest, ConvertToFindWithAllowDiskUseFalseSucceeds) {
TEST(QueryRequestTest, ParseFromLegacyQuery) {
const auto kSkip = 1;
- const auto kNToReturn = 2;
const NamespaceString nss("test.testns");
BSONObj queryObj = fromjson(R"({
query: {query: 1},
@@ -1567,7 +1560,7 @@ TEST(QueryRequestTest, ParseFromLegacyQuery) {
})");
unique_ptr<FindCommandRequest> findCommand(assertGet(query_request_helper::fromLegacyQuery(
- nss, queryObj, BSON("proj" << 1), kSkip, kNToReturn, QueryOption_Exhaust)));
+ nss, queryObj, BSON("proj" << 1), kSkip, QueryOption_Exhaust)));
ASSERT_EQ(*findCommand->getNamespaceOrUUID().nss(), nss);
ASSERT_BSONOBJ_EQ(findCommand->getFilter(), fromjson("{query: 1}"));
@@ -1577,7 +1570,7 @@ TEST(QueryRequestTest, ParseFromLegacyQuery) {
ASSERT_BSONOBJ_EQ(findCommand->getMin(), fromjson("{x: 'min'}"));
ASSERT_BSONOBJ_EQ(findCommand->getMax(), fromjson("{x: 'max'}"));
ASSERT_EQ(findCommand->getSkip(), boost::optional<int64_t>(kSkip));
- ASSERT_EQ(findCommand->getNtoreturn(), boost::optional<int64_t>(kNToReturn));
+ ASSERT_FALSE(findCommand->getNtoreturn());
ASSERT_EQ(findCommand->getSingleBatch(), false);
ASSERT_EQ(findCommand->getNoCursorTimeout(), false);
ASSERT_EQ(findCommand->getTailable(), false);
@@ -1589,13 +1582,12 @@ TEST(QueryRequestTest, ParseFromLegacyQueryOplogReplayFlagAllowed) {
auto queryObj = fromjson("{query: {query: 1}, orderby: {sort: 1}}");
const BSONObj projectionObj{};
const auto nToSkip = 0;
- const auto nToReturn = 0;
// Test that parsing succeeds even if the oplog replay bit is set in the OP_QUERY message. This
// flag may be set by old clients.
auto options = QueryOption_OplogReplay_DEPRECATED;
- unique_ptr<FindCommandRequest> findCommand(assertGet(query_request_helper::fromLegacyQuery(
- nss, queryObj, projectionObj, nToSkip, nToReturn, options)));
+ unique_ptr<FindCommandRequest> findCommand(assertGet(
+ query_request_helper::fromLegacyQuery(nss, queryObj, projectionObj, nToSkip, options)));
// Verify that if we reserialize the find command, the 'oplogReplay' field
// does not appear.
@@ -1615,8 +1607,8 @@ TEST(QueryRequestTest, ParseFromLegacyQueryUnwrapped) {
foo: 1
})");
const NamespaceString nss("test.testns");
- unique_ptr<FindCommandRequest> findCommand(assertGet(query_request_helper::fromLegacyQuery(
- nss, queryObj, BSONObj(), 0, 0, QueryOption_Exhaust)));
+ unique_ptr<FindCommandRequest> findCommand(assertGet(
+ query_request_helper::fromLegacyQuery(nss, queryObj, BSONObj(), 0, QueryOption_Exhaust)));
ASSERT_EQ(*findCommand->getNamespaceOrUUID().nss(), nss);
ASSERT_BSONOBJ_EQ(findCommand->getFilter(), fromjson("{foo: 1}"));
@@ -1636,18 +1628,6 @@ TEST(QueryRequestHelperTest, ValidateResponseWrongDataType) {
ErrorCodes::TypeMismatch);
}
-TEST(QueryRequestTest, ParseFromLegacyQueryTooNegativeNToReturn) {
- BSONObj queryObj = fromjson(R"({
- foo: 1
- })");
-
- const NamespaceString nss("test.testns");
- ASSERT_NOT_OK(
- query_request_helper::fromLegacyQuery(
- nss, queryObj, BSONObj(), 0, std::numeric_limits<int>::min(), QueryOption_Exhaust)
- .getStatus());
-}
-
TEST(QueryRequestTest, ParseFromLegacyQueryExplainError) {
BSONObj queryObj = fromjson(R"({
query: {query: 1},
@@ -1656,7 +1636,7 @@ TEST(QueryRequestTest, ParseFromLegacyQueryExplainError) {
const NamespaceString nss("test.testns");
ASSERT_EQUALS(
- query_request_helper::fromLegacyQuery(nss, queryObj, BSONObj(), 0, -1, QueryOption_Exhaust)
+ query_request_helper::fromLegacyQuery(nss, queryObj, BSONObj(), 0, QueryOption_Exhaust)
.getStatus()
.code(),
static_cast<ErrorCodes::Error>(5856600));
diff --git a/src/mongo/db/query/query_solution.cpp b/src/mongo/db/query/query_solution.cpp
index 28663f40e1c..d7561bb9675 100644
--- a/src/mongo/db/query/query_solution.cpp
+++ b/src/mongo/db/query/query_solution.cpp
@@ -1296,30 +1296,6 @@ QuerySolutionNode* CountScanNode::clone() const {
}
//
-// EnsureSortedNode
-//
-
-void EnsureSortedNode::appendToString(str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "ENSURE_SORTED\n";
- addIndent(ss, indent + 1);
- *ss << "pattern = " << pattern.toString() << '\n';
- addCommon(ss, indent);
- addIndent(ss, indent + 1);
- *ss << "Child:" << '\n';
- children[0]->appendToString(ss, indent + 2);
-}
-
-QuerySolutionNode* EnsureSortedNode::clone() const {
- EnsureSortedNode* copy = new EnsureSortedNode();
- cloneBaseData(copy);
-
- copy->pattern = this->pattern;
-
- return copy;
-}
-
-//
// EofNode
//
diff --git a/src/mongo/db/query/query_solution.h b/src/mongo/db/query/query_solution.h
index 9d0c8f5f9bf..c0d3411dd44 100644
--- a/src/mongo/db/query/query_solution.h
+++ b/src/mongo/db/query/query_solution.h
@@ -1172,38 +1172,6 @@ struct CountScanNode : public QuerySolutionNodeWithSortSet {
bool endKeyInclusive;
};
-/**
- * This stage drops results that are out of sorted order.
- */
-struct EnsureSortedNode : public QuerySolutionNode {
- EnsureSortedNode() {}
- virtual ~EnsureSortedNode() {}
-
- virtual StageType getType() const {
- return STAGE_ENSURE_SORTED;
- }
-
- virtual void appendToString(str::stream* ss, int indent) const;
-
- bool fetched() const {
- return children[0]->fetched();
- }
- FieldAvailability getFieldAvailability(const std::string& field) const {
- return children[0]->getFieldAvailability(field);
- }
- bool sortedByDiskLoc() const {
- return children[0]->sortedByDiskLoc();
- }
- const ProvidedSortSet& providedSorts() const {
- return children[0]->providedSorts();
- }
-
- QuerySolutionNode* clone() const;
-
- // The pattern that the results should be sorted by.
- BSONObj pattern;
-};
-
struct EofNode : public QuerySolutionNodeWithSortSet {
EofNode() {}
diff --git a/src/mongo/db/query/stage_types.cpp b/src/mongo/db/query/stage_types.cpp
index 9c54f668495..9b53650ee36 100644
--- a/src/mongo/db/query/stage_types.cpp
+++ b/src/mongo/db/query/stage_types.cpp
@@ -43,7 +43,6 @@ StringData stageTypeToString(StageType stageType) {
{STAGE_COUNT_SCAN, "COUNT_SCAN"_sd},
{STAGE_DELETE, "DELETE"_sd},
{STAGE_DISTINCT_SCAN, "DISTINCT_SCAN"_sd},
- {STAGE_ENSURE_SORTED, "SORTED"_sd},
{STAGE_EOF, "EOF"_sd},
{STAGE_FETCH, "FETCH"_sd},
{STAGE_GEO_NEAR_2D, "GEO_NEAR_2D"_sd},
diff --git a/src/mongo/db/query/stage_types.h b/src/mongo/db/query/stage_types.h
index fd3ce6fa261..ae6a2712b7b 100644
--- a/src/mongo/db/query/stage_types.h
+++ b/src/mongo/db/query/stage_types.h
@@ -71,8 +71,6 @@ enum StageType {
// scan stage is an ixscan with some key-skipping behvaior that only distinct uses.
STAGE_DISTINCT_SCAN,
- STAGE_ENSURE_SORTED,
-
STAGE_EOF,
STAGE_FETCH,
diff --git a/src/mongo/db/repl/oplog_fetcher.cpp b/src/mongo/db/repl/oplog_fetcher.cpp
index d5948177dbb..137812112c8 100644
--- a/src/mongo/db/repl/oplog_fetcher.cpp
+++ b/src/mongo/db/repl/oplog_fetcher.cpp
@@ -539,7 +539,7 @@ void OplogFetcher::_createNewCursor(bool initialFind) {
std::make_unique<DBClientCursor>(_conn.get(),
_nss,
_makeFindQuery(findTimeout),
- 0 /* nToReturn */,
+ 0 /* limit */,
0 /* nToSkip */,
nullptr /* fieldsToReturn */,
QueryOption_CursorTailable | QueryOption_AwaitData |
diff --git a/src/mongo/db/repl/replication_recovery.cpp b/src/mongo/db/repl/replication_recovery.cpp
index 2af2027aaec..24d885ddaba 100644
--- a/src/mongo/db/repl/replication_recovery.cpp
+++ b/src/mongo/db/repl/replication_recovery.cpp
@@ -145,9 +145,11 @@ public:
: BSON("$gte" << _oplogApplicationStartPoint);
_cursor = _client->query(NamespaceString::kRsOplogNamespace,
QUERY("ts" << predicate),
- /*batchSize*/ 0,
+ /*limit*/ 0,
/*skip*/ 0,
- /*projection*/ nullptr);
+ /*projection*/ nullptr,
+ /*options*/ 0,
+ /*batchSize*/ 0);
// Check that the first document matches our appliedThrough point then skip it since it's
// already been applied.
diff --git a/src/mongo/dbtests/SConscript b/src/mongo/dbtests/SConscript
index 9b6aa2dab62..b7a554f967b 100644
--- a/src/mongo/dbtests/SConscript
+++ b/src/mongo/dbtests/SConscript
@@ -107,7 +107,6 @@ env.Program(
'query_stage_count_scan.cpp',
'query_stage_delete.cpp',
'query_stage_distinct.cpp',
- 'query_stage_ensure_sorted.cpp',
'query_stage_fetch.cpp',
'query_stage_ixscan.cpp',
'query_stage_limit_skip.cpp',
diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
index ec17681e12e..e3a21434aa2 100644
--- a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
+++ b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
@@ -101,7 +101,7 @@ std::pair<rpc::UniqueReply, DBClientBase*> MockDBClientConnection::runCommandWit
std::unique_ptr<mongo::DBClientCursor> MockDBClientConnection::query(
const NamespaceStringOrUUID& nsOrUuid,
mongo::Query query,
- int nToReturn,
+ int limit,
int nToSkip,
const BSONObj* fieldsToReturn,
int queryOptions,
@@ -113,7 +113,7 @@ std::unique_ptr<mongo::DBClientCursor> MockDBClientConnection::query(
mongo::BSONArray result(_remoteServer->query(_remoteServerInstanceID,
nsOrUuid,
query,
- nToReturn,
+ limit,
nToSkip,
fieldsToReturn,
queryOptions,
diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.h b/src/mongo/dbtests/mock/mock_dbclient_connection.h
index eb2485aeee0..1643b4f76fb 100644
--- a/src/mongo/dbtests/mock/mock_dbclient_connection.h
+++ b/src/mongo/dbtests/mock/mock_dbclient_connection.h
@@ -123,7 +123,7 @@ public:
std::unique_ptr<mongo::DBClientCursor> query(
const NamespaceStringOrUUID& nsOrUuid,
mongo::Query query = mongo::Query(),
- int nToReturn = 0,
+ int limit = 0,
int nToSkip = 0,
const mongo::BSONObj* fieldsToReturn = nullptr,
int queryOptions = 0,
diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.cpp b/src/mongo/dbtests/mock/mock_remote_db_server.cpp
index 9c3fe249327..e1e635665a8 100644
--- a/src/mongo/dbtests/mock/mock_remote_db_server.cpp
+++ b/src/mongo/dbtests/mock/mock_remote_db_server.cpp
@@ -198,7 +198,7 @@ BSONObj MockRemoteDBServer::project(projection_executor::ProjectionExecutor* pro
mongo::BSONArray MockRemoteDBServer::query(MockRemoteDBServer::InstanceID id,
const NamespaceStringOrUUID& nsOrUuid,
mongo::Query query,
- int nToReturn,
+ int limit,
int nToSkip,
const BSONObj* fieldsToReturn,
int queryOptions,
diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.h b/src/mongo/dbtests/mock/mock_remote_db_server.h
index e49778b5e72..968d1603f11 100644
--- a/src/mongo/dbtests/mock/mock_remote_db_server.h
+++ b/src/mongo/dbtests/mock/mock_remote_db_server.h
@@ -167,7 +167,7 @@ public:
mongo::BSONArray query(InstanceID id,
const NamespaceStringOrUUID& nsOrUuid,
mongo::Query query = mongo::Query(),
- int nToReturn = 0,
+ int limit = 0,
int nToSkip = 0,
const mongo::BSONObj* fieldsToReturn = nullptr,
int queryOptions = 0,
diff --git a/src/mongo/dbtests/query_stage_ensure_sorted.cpp b/src/mongo/dbtests/query_stage_ensure_sorted.cpp
deleted file mode 100644
index cddb3586d60..00000000000
--- a/src/mongo/dbtests/query_stage_ensure_sorted.cpp
+++ /dev/null
@@ -1,195 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include <memory>
-
-#include "mongo/db/exec/ensure_sorted.h"
-#include "mongo/db/exec/queued_data_stage.h"
-#include "mongo/db/exec/sort_key_generator.h"
-#include "mongo/db/json.h"
-#include "mongo/db/query/collation/collator_interface_mock.h"
-#include "mongo/db/query/query_test_service_context.h"
-#include "mongo/unittest/unittest.h"
-
-namespace mongo {
-
-namespace {
-
-const NamespaceString kTestNss = NamespaceString("db.dummy");
-
-class QueryStageEnsureSortedTest : public unittest::Test {
-public:
- /**
- * Test function to verify the EnsureSortedStage.
- * patternStr is the JSON representation of the sort pattern BSONObj.
- * inputStr represents the input data set in a BSONObj.
- * {input: [doc1, doc2, doc3, ...]}
- * expectedStr represents the expected output data set.
- * {output: [docA, docB, docC, ...]}
- * collator is passed to EnsureSortedStage() for string comparisons.
- */
- void testWork(const char* patternStr,
- const char* inputStr,
- const char* expectedStr,
- std::unique_ptr<CollatorInterface> collator = nullptr) {
- auto opCtx = _serviceContext.makeOperationContext();
-
- // Create a mock ExpressionContext.
- boost::intrusive_ptr<ExpressionContext> expCtx(
- make_intrusive<ExpressionContext>(opCtx.get(), std::move(collator), kTestNss));
-
- WorkingSet ws;
- auto queuedDataStage = std::make_unique<QueuedDataStage>(expCtx.get(), &ws);
- BSONObj inputObj = fromjson(inputStr);
- BSONElement inputElt = inputObj["input"];
- ASSERT(inputElt.isABSONObj());
-
- for (auto&& elt : inputElt.embeddedObject()) {
- ASSERT(elt.isABSONObj());
- BSONObj obj = elt.embeddedObject().getOwned();
-
- // Insert obj from input array into working set.
- WorkingSetID id = ws.allocate();
- WorkingSetMember* wsm = ws.get(id);
- wsm->doc = {SnapshotId(), Document{obj}};
- wsm->transitionToOwnedObj();
- queuedDataStage->pushBack(id);
- }
-
- // Initialization.
- BSONObj pattern = fromjson(patternStr);
- auto sortKeyGen = std::make_unique<SortKeyGeneratorStage>(
- expCtx.get(), std::move(queuedDataStage), &ws, pattern);
- EnsureSortedStage ess(expCtx.get(), pattern, &ws, std::move(sortKeyGen));
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = PlanStage::NEED_TIME;
-
- // Documents are inserted into BSON document in this format:
- // {output: [docA, docB, docC, ...]}
- BSONObjBuilder bob;
- BSONArrayBuilder arr(bob.subarrayStart("output"));
- while (state != PlanStage::IS_EOF) {
- state = ess.work(&id);
- if (state == PlanStage::ADVANCED) {
- WorkingSetMember* member = ws.get(id);
- auto obj = member->doc.value().toBson();
- arr.append(obj);
- }
- }
- ASSERT_TRUE(ess.isEOF());
- arr.doneFast();
- BSONObj outputObj = bob.obj();
-
- // Compare the results against what we expect.
- BSONObj expectedObj = fromjson(expectedStr);
- ASSERT_BSONOBJ_EQ(outputObj, expectedObj);
- }
-
-protected:
- QueryTestServiceContext _serviceContext;
-};
-
-TEST_F(QueryStageEnsureSortedTest, EnsureSortedEmptyWorkingSet) {
- auto opCtx = _serviceContext.makeOperationContext();
-
- // Create a mock ExpressionContext.
- boost::intrusive_ptr<ExpressionContext> pExpCtx(
- new ExpressionContext(opCtx.get(), nullptr, kTestNss));
-
- WorkingSet ws;
- auto queuedDataStage = std::make_unique<QueuedDataStage>(pExpCtx.get(), &ws);
- auto sortKeyGen = std::make_unique<SortKeyGeneratorStage>(
- pExpCtx, std::move(queuedDataStage), &ws, BSONObj());
- EnsureSortedStage ess(pExpCtx.get(), BSONObj(), &ws, std::move(sortKeyGen));
-
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = PlanStage::NEED_TIME;
- while (state == PlanStage::NEED_TIME || state == PlanStage::NEED_YIELD) {
- state = ess.work(&id);
- ASSERT_NE(state, PlanStage::ADVANCED);
- }
- ASSERT_EQ(state, PlanStage::IS_EOF);
-}
-
-//
-// EnsureSorted on already sorted order should make no change.
-//
-
-TEST_F(QueryStageEnsureSortedTest, EnsureAlreadySortedAscending) {
- testWork("{a: 1}", "{input: [{a: 1}, {a: 2}, {a: 3}]}", "{output: [{a: 1}, {a: 2}, {a: 3}]}");
-}
-
-TEST_F(QueryStageEnsureSortedTest, EnsureAlreadySortedDescending) {
- testWork("{a: -1}", "{input: [{a: 3}, {a: 2}, {a: 1}]}", "{output: [{a: 3}, {a: 2}, {a: 1}]}");
-}
-
-TEST_F(QueryStageEnsureSortedTest, EnsureIrrelevantSortKey) {
- testWork("{b: 1}", "{input: [{a: 2}, {a: 1}, {a: 3}]}", "{output: [{a: 2}, {a: 1}, {a: 3}]}");
-}
-
-//
-// EnsureSorted should drop unsorted results.
-//
-
-TEST_F(QueryStageEnsureSortedTest, EnsureSortedOnAscending) {
- testWork("{a: 1}",
- "{input: [{a: 1}, {a: 2}, {a: 0}, {a: 4}, {a: 6}]}",
- "{output: [{a: 1}, {a: 2}, {a: 4}, {a: 6}]}");
-}
-
-TEST_F(QueryStageEnsureSortedTest, EnsureSortedOnDescending) {
- testWork("{a: -1}",
- "{input: [{a: 6}, {a: 4}, {a: 3}, {a: 9}, {a: 8}]}",
- "{output: [{a: 6}, {a: 4}, {a: 3}]}");
-}
-
-TEST_F(QueryStageEnsureSortedTest, EnsureSortedCompoundKey) {
- testWork("{a: -1, b: 1}",
- "{input: [{a: 6, b: 10}, {a: 6, b: 8}, {a: 6, b: 12}, {a: 9, b: 13}, {a: 5, b: 1}]}",
- "{output: [{a: 6, b: 10}, {a: 6, b: 12}, {a: 5, b: 1}]}");
-}
-
-TEST_F(QueryStageEnsureSortedTest, EnsureSortedStringsNullCollator) {
- testWork("{a: 1}", "{input: [{a: 'abc'}, {a: 'cba'}]}", "{output: [{a: 'abc'}, {a: 'cba'}]}");
-}
-
-TEST_F(QueryStageEnsureSortedTest, EnsureSortedStringsCollator) {
- auto collator =
- std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
- testWork("{a: 1}",
- "{input: [{a: 'abc'}, {a: 'cba'}]}",
- "{output: [{a: 'abc'}]}",
- std::move(collator));
-}
-
-} // namespace
-
-} // namespace mongo
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index b99b686f3bf..75278418410 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -299,7 +299,8 @@ public:
insert(ns, BSON("a" << 1));
insert(ns, BSON("a" << 2));
insert(ns, BSON("a" << 3));
- unique_ptr<DBClientCursor> cursor = _client.query(NamespaceString(ns), BSONObj(), 2);
+ unique_ptr<DBClientCursor> cursor =
+ _client.query(NamespaceString(ns), BSONObj(), 0, 0, nullptr, 0, 2);
long long cursorId = cursor->getCursorId();
cursor->decouple();
cursor.reset();
@@ -458,10 +459,11 @@ public:
insert(ns, BSON("a" << 2));
unique_ptr<DBClientCursor> c = _client.query(NamespaceString(ns),
Query().hint(BSON("$natural" << 1)),
- 2,
+ 0,
0,
nullptr,
- QueryOption_CursorTailable);
+ QueryOption_CursorTailable,
+ 2);
ASSERT(0 != c->getCursorId());
while (c->more())
c->next();
@@ -591,10 +593,11 @@ public:
insert(ns, BSON("a" << 1));
unique_ptr<DBClientCursor> c = _client.query(NamespaceString(ns),
Query().hint(BSON("$natural" << 1)),
- 2,
+ 0,
0,
nullptr,
- QueryOption_CursorTailable);
+ QueryOption_CursorTailable,
+ 2);
c->next();
c->next();
ASSERT(!c->more());
@@ -1915,7 +1918,8 @@ public:
{
// With five results and a batch size of 5, a cursor is created since we don't know
// there are no more results.
- std::unique_ptr<DBClientCursor> c = _client.query(NamespaceString(ns()), Query(), 5);
+ std::unique_ptr<DBClientCursor> c =
+ _client.query(NamespaceString(ns()), Query(), 0, 0, nullptr, 0, 5);
ASSERT(c->more());
ASSERT_NE(0, c->getCursorId());
for (int i = 0; i < 5; ++i) {
diff --git a/src/mongo/s/commands/cluster_find_cmd.cpp b/src/mongo/s/commands/cluster_find_cmd.cpp
index e4460b8c147..46374f2e505 100644
--- a/src/mongo/s/commands/cluster_find_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_cmd.cpp
@@ -78,6 +78,9 @@ std::unique_ptr<FindCommandRequest> parseCmdObjectToFindCommandRequest(Operation
uassert(51202,
"Cannot specify runtime constants option to a mongos",
!findCommand->getLegacyRuntimeConstants());
+ uassert(5746101,
+ "Cannot specify ntoreturn in a find command against mongos",
+ findCommand->getNtoreturn() == boost::none);
return findCommand;
}
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index cca6cf1ee02..180b0b32449 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -110,40 +110,6 @@ StatusWith<std::unique_ptr<FindCommandRequest>> transformQueryForShards(
newLimit = newLimitValue;
}
- // Similarly, if nToReturn is set, we forward the sum of nToReturn and the skip.
- boost::optional<int64_t> newNToReturn;
- if (findCommand.getNtoreturn()) {
- // 'singleBatch' and ntoreturn mean the same as 'singleBatch' and limit, so perform the
- // conversion.
- if (findCommand.getSingleBatch()) {
- int64_t newLimitValue;
- if (overflow::add(*findCommand.getNtoreturn(),
- findCommand.getSkip().value_or(0),
- &newLimitValue)) {
- return Status(ErrorCodes::Overflow,
- str::stream()
- << "sum of ntoreturn and skip cannot be represented as a 64-bit "
- "integer, ntoreturn: "
- << *findCommand.getNtoreturn()
- << ", skip: " << findCommand.getSkip().value_or(0));
- }
- newLimit = newLimitValue;
- } else {
- int64_t newNToReturnValue;
- if (overflow::add(*findCommand.getNtoreturn(),
- findCommand.getSkip().value_or(0),
- &newNToReturnValue)) {
- return Status(ErrorCodes::Overflow,
- str::stream()
- << "sum of ntoreturn and skip cannot be represented as a 64-bit "
- "integer, ntoreturn: "
- << *findCommand.getNtoreturn()
- << ", skip: " << findCommand.getSkip().value_or(0));
- }
- newNToReturn = newNToReturnValue;
- }
- }
-
// If there is a sort other than $natural, we send a sortKey meta-projection to the remote node.
BSONObj newProjection = findCommand.getProjection();
if (!findCommand.getSort().isEmpty() &&
@@ -166,7 +132,6 @@ StatusWith<std::unique_ptr<FindCommandRequest>> transformQueryForShards(
newQR->setProjection(newProjection);
newQR->setSkip(boost::none);
newQR->setLimit(newLimit);
- newQR->setNtoreturn(newNToReturn);
// Even if the client sends us singleBatch=true, we may need to retrieve
// multiple batches from a shard in order to return the single requested batch to the client.
@@ -264,8 +229,7 @@ CursorId runQueryWithoutRetrying(OperationContext* opCtx,
ClusterClientCursorParams params(
query.nss(), APIParameters::get(opCtx), readPref, ReadConcernArgs::get(opCtx));
params.originatingCommandObj = CurOp::get(opCtx)->opDescription().getOwned();
- params.batchSize =
- findCommand.getBatchSize() ? findCommand.getBatchSize() : findCommand.getNtoreturn();
+ params.batchSize = findCommand.getBatchSize();
params.tailableMode = query_request_helper::getTailableMode(findCommand);
params.isAllowPartialResults = findCommand.getAllowPartialResults();
params.lsid = opCtx->getLogicalSessionId();
diff --git a/src/mongo/scripting/mozjs/mongo.cpp b/src/mongo/scripting/mozjs/mongo.cpp
index fe7bba442bb..52c85259573 100644
--- a/src/mongo/scripting/mozjs/mongo.cpp
+++ b/src/mongo/scripting/mozjs/mongo.cpp
@@ -363,14 +363,14 @@ void MongoBase::Functions::find::call(JSContext* cx, JS::CallArgs args) {
if (haveFields)
fields = ValueWriter(cx, args.get(2)).toBSON();
- int nToReturn = ValueWriter(cx, args.get(3)).toInt32();
+ int limit = ValueWriter(cx, args.get(3)).toInt32();
int nToSkip = ValueWriter(cx, args.get(4)).toInt32();
int batchSize = ValueWriter(cx, args.get(5)).toInt32();
int options = ValueWriter(cx, args.get(6)).toInt32();
std::unique_ptr<DBClientCursor> cursor(conn->query(NamespaceString(ns),
q,
- nToReturn,
+ limit,
nToSkip,
haveFields ? &fields : nullptr,
options,