summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTess Avitabile <tess.avitabile@mongodb.com>2017-01-11 17:14:40 -0500
committerTess Avitabile <tess.avitabile@mongodb.com>2017-01-13 17:56:02 -0500
commit92e599237444912607e70a745fe5c0aa00dd4caf (patch)
tree3241f19d67635438a2ceb142d0584d0bb38b5bce
parent5c2aac3b24d0680418ee8fab1fa6e53be2a0eede (diff)
downloadmongo-92e599237444912607e70a745fe5c0aa00dd4caf.tar.gz
SERVER-24623 Remove single document aggregation result option
-rw-r--r--jstests/aggregation/bugs/server5782.js10
-rw-r--r--jstests/aggregation/bugs/server6189.js5
-rw-r--r--jstests/aggregation/bugs/server7695_isodates.js4
-rw-r--r--jstests/aggregation/bugs/server7768.js4
-rw-r--r--jstests/aggregation/bugs/server9444.js6
-rw-r--r--jstests/aggregation/extras/utils.js10
-rw-r--r--jstests/aggregation/mongos_slaveok.js3
-rw-r--r--jstests/aggregation/sources/facet/use_cases.js6
-rw-r--r--jstests/aggregation/testall.js165
-rw-r--r--jstests/aggregation/testshard1.js95
-rw-r--r--jstests/auth/lib/commands_lib.js41
-rw-r--r--jstests/core/index_stats.js2
-rw-r--r--jstests/core/max_time_ms.js2
-rw-r--r--jstests/core/operation_latency_histogram.js6
-rw-r--r--jstests/core/views/invalid_system_views.js10
-rw-r--r--jstests/core/views/views_aggregation.js3
-rw-r--r--jstests/core/views/views_all_commands.js2
-rw-r--r--jstests/core/views/views_coll_stats.js3
-rw-r--r--jstests/core/views/views_collation.js106
-rw-r--r--jstests/libs/read_committed_lib.js14
-rw-r--r--jstests/noPassthrough/commands_handle_kill.js2
-rw-r--r--jstests/noPassthrough/read_majority.js8
-rw-r--r--jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js2
-rw-r--r--jstests/readonly/aggregate.js1
-rw-r--r--jstests/replsets/commands_that_write_accept_wc.js2
-rw-r--r--jstests/sharding/authCommands.js8
-rw-r--r--jstests/sharding/commands_that_write_accept_wc_shards.js8
-rw-r--r--jstests/sharding/error_propagation.js8
-rw-r--r--jstests/sharding/max_time_ms_sharded.js7
-rw-r--r--jstests/sharding/read_pref_cmd.js4
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp23
-rw-r--r--src/mongo/db/pipeline/aggregation_request.cpp17
-rw-r--r--src/mongo/db/pipeline/aggregation_request.h18
-rw-r--r--src/mongo/db/pipeline/aggregation_request_test.cpp71
-rw-r--r--src/mongo/db/pipeline/pipeline.cpp24
-rw-r--r--src/mongo/db/pipeline/pipeline.h7
-rw-r--r--src/mongo/db/query/count_request_test.cpp5
-rw-r--r--src/mongo/db/query/parsed_distinct_test.cpp5
-rw-r--r--src/mongo/db/query/query_request_test.cpp10
-rw-r--r--src/mongo/db/views/resolved_view.cpp13
-rw-r--r--src/mongo/db/views/resolved_view_test.cpp45
41 files changed, 418 insertions, 367 deletions
diff --git a/jstests/aggregation/bugs/server5782.js b/jstests/aggregation/bugs/server5782.js
index 47bb7dd8826..f28694e19a8 100644
--- a/jstests/aggregation/bugs/server5782.js
+++ b/jstests/aggregation/bugs/server5782.js
@@ -6,13 +6,15 @@ db.server5782.save({string: "foo"});
// check that without $literal we end up comparing a field with itself and the result is true
var result = db.runCommand({
aggregate: "server5782",
- pipeline: [{$project: {stringis$string: {$eq: ["$string", '$string']}}}]
+ pipeline: [{$project: {stringis$string: {$eq: ["$string", '$string']}}}],
+ cursor: {}
});
-assert.eq(result.result[0].stringis$string, true);
+assert.eq(result.cursor.firstBatch[0].stringis$string, true);
// check that with $literal we end up comparing a field with '$string' and the result is true
var result = db.runCommand({
aggregate: "server5782",
- pipeline: [{$project: {stringis$string: {$eq: ["$string", {$literal: '$string'}]}}}]
+ pipeline: [{$project: {stringis$string: {$eq: ["$string", {$literal: '$string'}]}}}],
+ cursor: {}
});
-assert.eq(result.result[0].stringis$string, false);
+assert.eq(result.cursor.firstBatch[0].stringis$string, false);
diff --git a/jstests/aggregation/bugs/server6189.js b/jstests/aggregation/bugs/server6189.js
index f8cb88194e2..ebf21f8cb7d 100644
--- a/jstests/aggregation/bugs/server6189.js
+++ b/jstests/aggregation/bugs/server6189.js
@@ -38,7 +38,8 @@ function test(date, testSynthetics) {
,
format: {$dateToString: {format: ISOfmt, date: '$date'}}
}
- }]
+ }],
+ cursor: {}
});
if (date.valueOf() < 0 && _isWindows() && res.code == 16422) {
@@ -54,7 +55,7 @@ function test(date, testSynthetics) {
}
assert.commandWorked(res);
- assert.eq(res.result[0], {
+ assert.eq(res.cursor.firstBatch[0], {
year: date.getUTCFullYear(),
month: date.getUTCMonth() + 1 // jan == 1
,
diff --git a/jstests/aggregation/bugs/server7695_isodates.js b/jstests/aggregation/bugs/server7695_isodates.js
index b6a97fe1ec0..194cb7f02c4 100644
--- a/jstests/aggregation/bugs/server7695_isodates.js
+++ b/jstests/aggregation/bugs/server7695_isodates.js
@@ -16,7 +16,7 @@
var pipeline = [{$project: {_id: 0, result: {}}}];
pipeline[0].$project.result[op] = value;
var msg = "Exptected {" + op + ": " + value + "} to equal: " + expResult;
- var res = coll.runCommand('aggregate', {pipeline: pipeline});
+ var res = coll.runCommand('aggregate', {pipeline: pipeline, cursor: {}});
// in the case of $dateToString the date is on property date
var date = value.date || value;
@@ -33,7 +33,7 @@
return;
}
- assert.eq(res.result[0].result, expResult, pipeline);
+ assert.eq(res.cursor.firstBatch[0].result, expResult, pipeline);
}
// While development, there was a bug which caused an error with $dateToString if the order of
diff --git a/jstests/aggregation/bugs/server7768.js b/jstests/aggregation/bugs/server7768.js
index a820dd7526e..05cebcc97f5 100644
--- a/jstests/aggregation/bugs/server7768.js
+++ b/jstests/aggregation/bugs/server7768.js
@@ -6,8 +6,8 @@ db[collection].insert({foo: 1});
res = db.runCommand({
'aggregate': collection,
'pipeline': [{'$project': {'_id': false, 'foo': true}}],
- $readPreference: {'mode': 'primary'}
+ $readPreference: {'mode': 'primary'}, 'cursor': {}
});
assert.commandWorked(res);
-assert.eq(res.result, [{foo: 1}]);
+assert.eq(res.cursor.firstBatch, [{foo: 1}]);
diff --git a/jstests/aggregation/bugs/server9444.js b/jstests/aggregation/bugs/server9444.js
index b2f027d314c..b246c3e1ffd 100644
--- a/jstests/aggregation/bugs/server9444.js
+++ b/jstests/aggregation/bugs/server9444.js
@@ -24,17 +24,17 @@
function test(pipeline, outOfMemoryCode) {
// ensure by default we error out if exceeding memory limit
- var res = t.runCommand('aggregate', {pipeline: pipeline});
+ var res = t.runCommand('aggregate', {pipeline: pipeline, cursor: {}});
assert.commandFailed(res);
assert.eq(res.code, outOfMemoryCode);
// ensure allowDiskUse: false does what it says
- res = t.runCommand('aggregate', {pipeline: pipeline, allowDiskUse: false});
+ res = t.runCommand('aggregate', {pipeline: pipeline, cursor: {}, allowDiskUse: false});
assert.commandFailed(res);
assert.eq(res.code, outOfMemoryCode);
// allowDiskUse only supports bool. In particular, numbers aren't allowed.
- res = t.runCommand('aggregate', {pipeline: pipeline, allowDiskUse: 1});
+ res = t.runCommand('aggregate', {pipeline: pipeline, cursor: {}, allowDiskUse: 1});
assert.commandFailed(res);
// ensure we work when allowDiskUse === true
diff --git a/jstests/aggregation/extras/utils.js b/jstests/aggregation/extras/utils.js
index 2dd1038388c..fcad201f24e 100644
--- a/jstests/aggregation/extras/utils.js
+++ b/jstests/aggregation/extras/utils.js
@@ -259,16 +259,6 @@ function assertErrorCode(coll, pipe, code, errmsg) {
pipe = [pipe];
}
- // Test non-cursor
- var res = coll.runCommand("aggregate", {pipeline: pipe});
- if (res.ok || res.code != code)
- printjson({pipeline: pipe, result: res});
-
- /* assert failure with proper error code */
- assert(!res.ok, errmsg || "failed in assertErrorCode");
- assert.eq(res.code, code);
-
- // Test with cursors
var cmd = {pipeline: pipe};
// cmd.cursor = {};
cmd.cursor = {batchSize: 0};
diff --git a/jstests/aggregation/mongos_slaveok.js b/jstests/aggregation/mongos_slaveok.js
index 866e98da30c..58440deed14 100644
--- a/jstests/aggregation/mongos_slaveok.js
+++ b/jstests/aggregation/mongos_slaveok.js
@@ -24,7 +24,8 @@
// wait for mongos to recognize that the slave is up
awaitRSClientHosts(st.s, secNode, {ok: true});
- var res = testDB.runCommand({aggregate: 'user', pipeline: [{$project: {x: 1}}]});
+ var res =
+ testDB.runCommand({aggregate: 'user', pipeline: [{$project: {x: 1}}], cursor: {}});
assert(res.ok, 'aggregate command failed: ' + tojson(res));
var profileQuery = {op: 'command', ns: 'test.user', 'command.aggregate': 'user'};
diff --git a/jstests/aggregation/sources/facet/use_cases.js b/jstests/aggregation/sources/facet/use_cases.js
index ede162d6a93..45791cab5d3 100644
--- a/jstests/aggregation/sources/facet/use_cases.js
+++ b/jstests/aggregation/sources/facet/use_cases.js
@@ -131,7 +131,8 @@
pipeline: [{
$lookup:
{from: shardedCollName, localField: "_id", foreignField: "_id", as: "results"}
- }]
+ }],
+ cursor: {}
}));
assert.eq(
28769, res.code, "Expected aggregation to fail due to $lookup on a sharded collection");
@@ -151,7 +152,8 @@
}
}]
}
- }]
+ }],
+ cursor: {}
}));
assert.eq(
28769, res.code, "Expected aggregation to fail due to $lookup on a sharded collection");
diff --git a/jstests/aggregation/testall.js b/jstests/aggregation/testall.js
index c58d3e1e023..66a4e76de35 100644
--- a/jstests/aggregation/testall.js
+++ b/jstests/aggregation/testall.js
@@ -9,7 +9,8 @@ load('jstests/aggregation/data/articles.js');
db = db.getSiblingDB("aggdb");
// just passing through fields
-var p1 = db.runCommand({aggregate: "article", pipeline: [{$project: {tags: 1, pageViews: 1}}]});
+var p1 = db.runCommand(
+ {aggregate: "article", pipeline: [{$project: {tags: 1, pageViews: 1}}], cursor: {}});
var p1result = [
{"_id": 1, "pageViews": 5, "tags": ["fun", "good", "fun"]},
@@ -17,10 +18,10 @@ var p1result = [
{"_id": 3, "pageViews": 6, "tags": ["nasty", "filthy"]}
];
-assert.docEq(p1.result, p1result, 'p1 failed');
+assert.docEq(p1.cursor.firstBatch, p1result, 'p1 failed');
// a simple array unwinding
-var u1 = db.runCommand({aggregate: "article", pipeline: [{$unwind: "$tags"}]});
+var u1 = db.runCommand({aggregate: "article", pipeline: [{$unwind: "$tags"}], cursor: {}});
var u1result = [
{
@@ -110,12 +111,12 @@ var u1result = [
}
];
-assert.docEq(u1.result, u1result, 'u1 failed');
+assert.docEq(u1.cursor.firstBatch, u1result, 'u1 failed');
// unwind an array at the end of a dotted path
db.ut.drop();
db.ut.save({_id: 4, a: 1, b: {e: 7, f: [4, 3, 2, 1]}, c: 12, d: 17});
-var u2 = db.runCommand({aggregate: "ut", pipeline: [{$unwind: "$b.f"}]});
+var u2 = db.runCommand({aggregate: "ut", pipeline: [{$unwind: "$b.f"}], cursor: {}});
var u2result = [
{"_id": 4, "a": 1, "b": {"e": 7, "f": 4}, "c": 12, "d": 17},
@@ -124,12 +125,13 @@ var u2result = [
{"_id": 4, "a": 1, "b": {"e": 7, "f": 1}, "c": 12, "d": 17}
];
-assert.docEq(u2.result, u2result, 'u2 failed');
+assert.docEq(u2.cursor.firstBatch, u2result, 'u2 failed');
// combining a projection with unwinding an array
var p2 = db.runCommand({
aggregate: "article",
- pipeline: [{$project: {author: 1, tags: 1, pageViews: 1}}, {$unwind: "$tags"}]
+ pipeline: [{$project: {author: 1, tags: 1, pageViews: 1}}, {$unwind: "$tags"}],
+ cursor: {}
});
var p2result = [
@@ -142,22 +144,24 @@ var p2result = [
{"_id": 3, "author": "jane", "pageViews": 6, "tags": "filthy"}
];
-assert.docEq(p2.result, p2result, 'p2 failed');
+assert.docEq(p2.cursor.firstBatch, p2result, 'p2 failed');
// pulling values out of subdocuments
var p3 = db.runCommand({
aggregate: "article",
- pipeline: [{$project: {otherfoo: "$other.foo", otherbar: "$other.bar"}}]
+ pipeline: [{$project: {otherfoo: "$other.foo", otherbar: "$other.bar"}}],
+ cursor: {}
});
var p3result = [{"_id": 1, "otherfoo": 5}, {"_id": 2, "otherbar": 14}, {"_id": 3, "otherbar": 14}];
-assert.docEq(p3.result, p3result, 'p3 failed');
+assert.docEq(p3.cursor.firstBatch, p3result, 'p3 failed');
// projection includes a computed value
var p4 = db.runCommand({
aggregate: "article",
- pipeline: [{$project: {author: 1, daveWroteIt: {$eq: ["$author", "dave"]}}}]
+ pipeline: [{$project: {author: 1, daveWroteIt: {$eq: ["$author", "dave"]}}}],
+ cursor: {}
});
var p4result = [
@@ -166,7 +170,7 @@ var p4result = [
{"_id": 3, "author": "jane", "daveWroteIt": false}
];
-assert.docEq(p4.result, p4result, 'p4 failed');
+assert.docEq(p4.cursor.firstBatch, p4result, 'p4 failed');
// projection includes a virtual (fabricated) document
var p5 = db.runCommand({
@@ -175,7 +179,8 @@ var p5 = db.runCommand({
{$project: {author: 1, pageViews: 1, tags: 1}},
{$unwind: "$tags"},
{$project: {author: 1, subDocument: {foo: "$pageViews", bar: "$tags"}}}
- ]
+ ],
+ cursor: {}
});
var p5result = [
@@ -188,7 +193,7 @@ var p5result = [
{"_id": 3, "author": "jane", "subDocument": {"foo": 6, "bar": "filthy"}}
];
-assert.docEq(p5.result, p5result, 'p5 failed');
+assert.docEq(p5.cursor.firstBatch, p5result, 'p5 failed');
// multi-step aggregate
// nested expressions in computed fields
@@ -206,7 +211,8 @@ var p6 = db.runCommand({
weLikeIt: {$or: [{$eq: ["$author", "dave"]}, {$eq: ["$tags", "good"]}]}
}
}
- ]
+ ],
+ cursor: {}
});
var p6result = [
@@ -268,24 +274,25 @@ var p6result = [
}
];
-assert.docEq(p6.result, p6result, 'p6 failed');
+assert.docEq(p6.cursor.firstBatch, p6result, 'p6 failed');
// slightly more complex computed expression; $ifNull
var p7 = db.runCommand({
aggregate: "article",
pipeline:
- [{$project: {theSum: {$add: ["$pageViews", {$ifNull: ["$other.foo", "$other.bar"]}]}}}]
+ [{$project: {theSum: {$add: ["$pageViews", {$ifNull: ["$other.foo", "$other.bar"]}]}}}],
+ cursor: {}
});
var p7result = [{"_id": 1, "theSum": 10}, {"_id": 2, "theSum": 21}, {"_id": 3, "theSum": 20}];
-assert.docEq(p7.result, p7result, 'p7 failed');
+assert.docEq(p7.cursor.firstBatch, p7result, 'p7 failed');
// dotted path inclusion; _id exclusion
var p8 = db.runCommand({
aggregate: "article",
- pipeline:
- [{$project: {_id: 0, author: 1, tags: 1, "comments.author": 1}}, {$unwind: "$tags"}]
+ pipeline: [{$project: {_id: 0, author: 1, tags: 1, "comments.author": 1}}, {$unwind: "$tags"}],
+ cursor: {}
});
var p8result = [
@@ -298,12 +305,13 @@ var p8result = [
{"author": "jane", "tags": "filthy", "comments": [{"author": "will"}, {"author": "jenny"}]}
];
-assert.docEq(p8.result, p8result, 'p8 failed');
+assert.docEq(p8.cursor.firstBatch, p8result, 'p8 failed');
// collapse a dotted path with an intervening array
var p9 = db.runCommand({
aggregate: "article",
- pipeline: [{$project: {_id: 0, author: 1, commentsAuthor: "$comments.author"}}]
+ pipeline: [{$project: {_id: 0, author: 1, commentsAuthor: "$comments.author"}}],
+ cursor: {}
});
var p9result = [
@@ -312,10 +320,10 @@ var p9result = [
{"author": "jane", "commentsAuthor": ["will", "jenny"]}
];
-assert.docEq(p9.result, p9result, 'p9 failed');
+assert.docEq(p9.cursor.firstBatch, p9result, 'p9 failed');
// simple sort
-var p10 = db.runCommand({aggregate: "article", pipeline: [{$sort: {title: 1}}]});
+var p10 = db.runCommand({aggregate: "article", pipeline: [{$sort: {title: 1}}], cursor: {}});
var p10result = [
{
@@ -357,7 +365,7 @@ var p10result = [
}
];
-assert.docEq(p10.result, p10result, 'p10 failed');
+assert.docEq(p10.cursor.firstBatch, p10result, 'p10 failed');
// unwind on nested array
db.p11.drop();
@@ -373,7 +381,8 @@ var p11 = db.runCommand({
pipeline: [
{$unwind: "$items.authors"},
{$project: {name: 1, author: "$items.authors"}},
- ]
+ ],
+ cursor: {}
});
p11result = [
@@ -382,7 +391,7 @@ p11result = [
{"_id": 5, "name": "MongoDB", "author": "bjornar"}
];
-assert.docEq(p11.result, p11result, 'p11 failed');
+assert.docEq(p11.cursor.firstBatch, p11result, 'p11 failed');
// multiply test
var p12 = db.runCommand({
@@ -390,23 +399,23 @@ var p12 = db.runCommand({
pipeline: [{
$project:
{theProduct: {$multiply: ["$pageViews", {$ifNull: ["$other.foo", "$other.bar"]}]}}
- }]
+ }],
+ cursor: {}
});
var p12result =
[{"_id": 1, "theProduct": 25}, {"_id": 2, "theProduct": 98}, {"_id": 3, "theProduct": 84}];
-assert.docEq(p12.result, p12result, 'p12 failed');
+assert.docEq(p12.cursor.firstBatch, p12result, 'p12 failed');
// subtraction test
var p13 = db.runCommand({
aggregate: "article",
pipeline: [{
- $project: {
- theDifference:
- {$subtract: ["$pageViews", {$ifNull: ["$other.foo", "$other.bar"]}]}
- }
- }]
+ $project:
+ {theDifference: {$subtract: ["$pageViews", {$ifNull: ["$other.foo", "$other.bar"]}]}}
+ }],
+ cursor: {}
});
var p13result = [
@@ -415,7 +424,7 @@ var p13result = [
{"_id": 3, "theDifference": -8}
];
-assert.docEq(p13.result, p13result, 'p13 failed');
+assert.docEq(p13.cursor.firstBatch, p13result, 'p13 failed');
// mod test
var p14 = db.runCommand({
@@ -429,17 +438,21 @@ var p14 = db.runCommand({
]
}
}
- }]
+ }],
+ cursor: {}
});
var p14result =
[{"_id": 1, "theRemainder": 0}, {"_id": 2, "theRemainder": 0}, {"_id": 3, "theRemainder": 2}];
-assert.docEq(p14.result, p14result, 'p14 failed');
+assert.docEq(p14.cursor.firstBatch, p14result, 'p14 failed');
// toUpper test
-var p15 = db.runCommand(
- {aggregate: "article", pipeline: [{$project: {author: {$toUpper: "$author"}, pageViews: 1}}]});
+var p15 = db.runCommand({
+ aggregate: "article",
+ pipeline: [{$project: {author: {$toUpper: "$author"}, pageViews: 1}}],
+ cursor: {}
+});
var p15result = [
{"_id": 1, "author": "BOB", "pageViews": 5},
@@ -447,7 +460,7 @@ var p15result = [
{"_id": 3, "author": "JANE", "pageViews": 6}
];
-assert.docEq(p15.result, p15result, 'p15 failed');
+assert.docEq(p15.cursor.firstBatch, p15result, 'p15 failed');
// toLower test
var p16 = db.runCommand({
@@ -455,7 +468,8 @@ var p16 = db.runCommand({
pipeline: [
{$project: {author: {$toUpper: "$author"}, pageViews: 1}},
{$project: {author: {$toLower: "$author"}, pageViews: 1}}
- ]
+ ],
+ cursor: {}
});
var p16result = [
@@ -476,7 +490,7 @@ var p16result = [
}
];
-assert.docEq(p16.result, p16result, 'p16 failed');
+assert.docEq(p16.cursor.firstBatch, p16result, 'p16 failed');
// substr test
var p17 = db.runCommand({
@@ -485,13 +499,14 @@ var p17 = db.runCommand({
$project: {
author: {$substrBytes: ["$author", 1, 2]},
}
- }]
+ }],
+ cursor: {}
});
var p17result =
[{"_id": 1, "author": "ob"}, {"_id": 2, "author": "av"}, {"_id": 3, "author": "an"}];
-assert.docEq(p17.result, p17result, 'p17 failed');
+assert.docEq(p17.cursor.firstBatch, p17result, 'p17 failed');
// strcasecmp test
var p18 = db.runCommand({
@@ -502,7 +517,8 @@ var p18 = db.runCommand({
thisisalametest: {$strcasecmp: ["foo", "bar"]},
thisisalamepass: {$strcasecmp: ["foo", "foo"]}
}
- }]
+ }],
+ cursor: {}
});
var p18result = [
@@ -511,7 +527,7 @@ var p18result = [
{"_id": 3, "tags": ["nasty", "filthy"], "thisisalametest": 1, "thisisalamepass": 0}
];
-assert.docEq(p18.result, p18result, 'p18 failed');
+assert.docEq(p18.cursor.firstBatch, p18result, 'p18 failed');
// date tests
var p19 = db.runCommand({
@@ -530,7 +546,8 @@ var p19 = db.runCommand({
week: {$week: "$posted"},
year: {$year: "$posted"}
}
- }]
+ }],
+ cursor: {}
});
var p19result = [
@@ -575,7 +592,7 @@ var p19result = [
}
];
-assert.docEq(p19.result, p19result, 'p19 failed');
+assert.docEq(p19.cursor.firstBatch, p19result, 'p19 failed');
db.vartype.drop();
db.vartype.save({x: 17, y: "foo"});
@@ -588,11 +605,11 @@ var p21 = db.runCommand({
_id: 0,
author: 1,
pageViews: {
- $cond:
- [{$eq: ["$author", "dave"]}, {$add: ["$pageViews", 1000]}, "$pageViews"]
+ $cond: [{$eq: ["$author", "dave"]}, {$add: ["$pageViews", 1000]}, "$pageViews"]
}
}
- }]
+ }],
+ cursor: {}
});
var p21result = [
@@ -601,10 +618,10 @@ var p21result = [
{"author": "jane", "pageViews": 6}
];
-assert.docEq(p21.result, p21result, 'p21 failed');
+assert.docEq(p21.cursor.firstBatch, p21result, 'p21 failed');
// simple matching
-var m1 = db.runCommand({aggregate: "article", pipeline: [{$match: {author: "dave"}}]});
+var m1 = db.runCommand({aggregate: "article", pipeline: [{$match: {author: "dave"}}], cursor: {}});
var m1result = [{
"_id": 2,
@@ -620,7 +637,7 @@ var m1result = [{
"other": {"bar": 14}
}];
-assert.docEq(m1.result, m1result, 'm1 failed');
+assert.docEq(m1.cursor.firstBatch, m1result, 'm1 failed');
// combining matching with a projection
var m2 = db.runCommand({
@@ -629,7 +646,8 @@ var m2 = db.runCommand({
{$project: {title: 1, author: 1, pageViews: 1, tags: 1, comments: 1}},
{$unwind: "$tags"},
{$match: {tags: "nasty"}}
- ]
+ ],
+ cursor: {}
});
var m2result = [
@@ -657,7 +675,7 @@ var m2result = [
}
];
-assert.docEq(m2.result, m2result, 'm2 failed');
+assert.docEq(m2.cursor.firstBatch, m2result, 'm2 failed');
// group by tag, _id is a field reference
var g1 = db.runCommand({
@@ -667,7 +685,8 @@ var g1 = db.runCommand({
{$unwind: "$tags"},
{$group: {_id: "$tags", docsByTag: {$sum: 1}, viewsByTag: {$sum: "$pageViews"}}},
{$sort: {'_id': 1}}
- ]
+ ],
+ cursor: {}
});
var g1result = [
@@ -677,7 +696,7 @@ var g1result = [
{"_id": "nasty", "docsByTag": 2, "viewsByTag": 13},
];
-assert.docEq(g1.result, g1result, 'g1 failed');
+assert.docEq(g1.cursor.firstBatch, g1result, 'g1 failed');
// $max, and averaging in a final projection; _id is structured
var g2 = db.runCommand({
@@ -704,7 +723,8 @@ var g2 = db.runCommand({
}
},
{$sort: {'docsByTag': 1, 'viewsByTag': 1}}
- ]
+ ],
+ cursor: {}
});
var g2result = [
@@ -720,7 +740,7 @@ var g2result = [
}
];
-assert.docEq(g2.result, g2result, 'g2 failed');
+assert.docEq(g2.cursor.firstBatch, g2result, 'g2 failed');
// $push as an accumulator; can pivot data
var g3 = db.runCommand({
@@ -735,7 +755,8 @@ var g3 = db.runCommand({
{$unwind: "$tags"},
{$group: {_id: {tags: "$tags"}, authors: {$push: "$author"}}},
{$sort: {'_id': 1}}
- ]
+ ],
+ cursor: {}
});
var g3result = [
@@ -745,7 +766,7 @@ var g3result = [
{"_id": {"tags": "nasty"}, "authors": ["dave", "jane"]}
];
-assert.docEq(g3.result, g3result, 'g3 failed');
+assert.docEq(g3.cursor.firstBatch, g3result, 'g3 failed');
// $avg, and averaging in a final projection
var g4 = db.runCommand({
@@ -762,7 +783,8 @@ var g4 = db.runCommand({
}
},
{$sort: {'_id': 1}}
- ]
+ ],
+ cursor: {}
});
var g4result = [
@@ -772,7 +794,7 @@ var g4result = [
{"_id": {"tags": "nasty"}, "docsByTag": 2, "viewsByTag": 13, "avgByTag": 6.5}
];
-assert.docEq(g4.result, g4result, 'g4 failed');
+assert.docEq(g4.cursor.firstBatch, g4result, 'g4 failed');
// $addToSet as an accumulator; can pivot data
var g5 = db.runCommand({
@@ -787,11 +809,12 @@ var g5 = db.runCommand({
{$unwind: "$tags"},
{$group: {_id: {tags: "$tags"}, authors: {$addToSet: "$author"}}},
{$sort: {'_id': 1}}
- ]
+ ],
+ cursor: {}
});
// $addToSet doesn't guarantee order so we shouldn't test for it.
-g5.result.forEach(function(obj) {
+g5.cursor.firstBatch.forEach(function(obj) {
obj.authors.sort();
});
@@ -814,7 +837,7 @@ var g5result = [
}
];
-assert.docEq(g5.result, g5result, 'g5 failed');
+assert.docEq(g5.cursor.firstBatch, g5result, 'g5 failed');
// $first and $last accumulators, constant _id
var g6 = db.runCommand({
@@ -829,7 +852,8 @@ var g6 = db.runCommand({
count: {$sum: 1}
}
}
- ]
+ ],
+ cursor: {}
});
var g6result = [{"_id": "authors", firstAuthor: "bob", lastAuthor: "jane", count: 3}];
@@ -845,6 +869,7 @@ var g7 = db.runCommand({
count: {$sum: 1}
}
}
- ]
+ ],
+ cursor: {}
});
-assert.eq(g7.result[0].count, 7);
+assert.eq(g7.cursor.firstBatch[0].count, 7);
diff --git a/jstests/aggregation/testshard1.js b/jstests/aggregation/testshard1.js
index 1dfc74725ca..42afc5b72e6 100644
--- a/jstests/aggregation/testshard1.js
+++ b/jstests/aggregation/testshard1.js
@@ -1,21 +1,6 @@
load('jstests/aggregation/extras/utils.js');
load('jstests/libs/analyze_plan.js'); // For planHasStage.
-// Use this for aggregations that only have arrays or results of specified order.
-// It will check that cursors return the same results as non-cursors.
-function aggregateOrdered(coll, pipeline) {
- var cursor = coll.aggregate(pipeline).toArray();
- var noCursor = coll.runCommand('aggregate', {pipeline: pipeline}).result;
- assert.eq(cursor, noCursor);
- return cursor;
-}
-
-// Use this for aggregations that have arrays or results of unspecified order.
-// It will bypass the check that cursors return the same results as non-cursors.
-function aggregateNoOrder(coll, pipeline) {
- return coll.aggregate(pipeline).toArray();
-}
-
jsTestLog("Creating sharded cluster");
var shardedAggTest = new ShardingTest({
shards: 2,
@@ -86,11 +71,19 @@ for (var i = 0; i < shards.length; i++) {
}
jsTestLog('a project and group in shards, result combined in mongos');
-var a1 = aggregateNoOrder(db.ts1, [
- {$project: {cMod10: {$mod: ["$counter", 10]}, number: 1, counter: 1}},
- {$group: {_id: "$cMod10", numberSet: {$addToSet: "$number"}, avgCounter: {$avg: "$cMod10"}}},
- {$sort: {_id: 1}}
-]);
+var a1 = db.ts1
+ .aggregate([
+ {$project: {cMod10: {$mod: ["$counter", 10]}, number: 1, counter: 1}},
+ {
+ $group: {
+ _id: "$cMod10",
+ numberSet: {$addToSet: "$number"},
+ avgCounter: {$avg: "$cMod10"}
+ }
+ },
+ {$sort: {_id: 1}}
+ ])
+ .toArray();
for (i = 0; i < 10; ++i) {
assert.eq(a1[i].avgCounter, a1[i]._id, 'agg sharded test avgCounter failed');
@@ -98,37 +91,38 @@ for (i = 0; i < 10; ++i) {
}
jsTestLog('an initial group starts the group in the shards, and combines them in mongos');
-var a2 = aggregateOrdered(db.ts1, [{$group: {_id: "all", total: {$sum: "$counter"}}}]);
+var a2 = db.ts1.aggregate([{$group: {_id: "all", total: {$sum: "$counter"}}}]).toArray();
jsTestLog('sum of an arithmetic progression S(n) = (n/2)(a(1) + a(n));');
assert.eq(a2[0].total, (nItems / 2) * (1 + nItems), 'agg sharded test counter sum failed');
jsTestLog('A group combining all documents into one, averaging a null field.');
-assert.eq(aggregateOrdered(db.ts1, [{$group: {_id: null, avg: {$avg: "$missing"}}}]),
+assert.eq(db.ts1.aggregate([{$group: {_id: null, avg: {$avg: "$missing"}}}]).toArray(),
[{_id: null, avg: null}]);
jsTestLog('an initial group starts the group in the shards, and combines them in mongos');
var a3 =
- aggregateOrdered(db.ts1, [{$group: {_id: "$number", total: {$sum: 1}}}, {$sort: {_id: 1}}]);
+ db.ts1.aggregate([{$group: {_id: "$number", total: {$sum: 1}}}, {$sort: {_id: 1}}]).toArray();
for (i = 0; i < strings.length; ++i) {
assert.eq(a3[i].total, nItems / strings.length, 'agg sharded test sum numbers failed');
}
jsTestLog('a match takes place in the shards; just returning the results from mongos');
-var a4 = aggregateNoOrder(db.ts1, [{
- $match: {
- $or: [
- {counter: 55},
- {counter: 1111},
- {counter: 2222},
- {counter: 33333},
- {counter: 99999},
- {counter: 55555}
- ]
- }
- }]);
-
+var a4 = db.ts1
+ .aggregate([{
+ $match: {
+ $or: [
+ {counter: 55},
+ {counter: 1111},
+ {counter: 2222},
+ {counter: 33333},
+ {counter: 99999},
+ {counter: 55555}
+ ]
+ }
+ }])
+ .toArray();
assert.eq(a4.length, 6, tojson(a4));
for (i = 0; i < 6; ++i) {
c = a4[i].counter;
@@ -146,7 +140,7 @@ function testSkipLimit(ops, expectedCount) {
ops.push({$group: {_id: 1, count: {$sum: 1}}});
- var out = aggregateOrdered(db.ts1, ops);
+ var out = db.ts1.aggregate(ops).toArray();
assert.eq(out[0].count, expectedCount);
}
@@ -166,8 +160,11 @@ function testSortLimit(limit, direction) {
var from_cursor =
db.ts1.find({}, {random: 1, _id: 0}).sort({random: direction}).limit(limit).toArray();
shardedAggTest.startBalancer(); // TODO: remove after fixing SERVER-9622
- var from_agg = aggregateOrdered(
- db.ts1, [{$project: {random: 1, _id: 0}}, {$sort: {random: direction}}, {$limit: limit}]);
+ var from_agg =
+ db.ts1
+ .aggregate(
+ [{$project: {random: 1, _id: 0}}, {$sort: {random: direction}}, {$limit: limit}])
+ .toArray();
assert.eq(from_cursor, from_agg);
}
testSortLimit(1, 1);
@@ -179,8 +176,8 @@ testSortLimit(100, -1);
function testAvgStdDev() {
jsTestLog('testing $avg and $stdDevPop in sharded $group');
- // Note: not using aggregateOrdered since it requires exact results. $stdDevPop can vary
- // slightly between runs if a migration occurs. This is why we use assert.close below.
+ // $stdDevPop can vary slightly between runs if a migration occurs. This is why we use
+ // assert.close below.
var res = db.ts1
.aggregate([{
$group: {
@@ -219,7 +216,7 @@ testSample();
jsTestLog('test $out by copying source collection verbatim to output');
var outCollection = db.ts1_out;
-var res = aggregateOrdered(db.ts1, [{$out: outCollection.getName()}]);
+var res = db.ts1.aggregate([{$out: outCollection.getName()}]).toArray();
shardedAggTest.stopBalancer(); // TODO: remove after fixing SERVER-9622
assert.eq(db.ts1.find().itcount(), outCollection.find().itcount());
assert.eq(db.ts1.find().sort({_id: 1}).toArray(), outCollection.find().sort({_id: 1}).toArray());
@@ -231,9 +228,13 @@ assert.commandFailed(
assert.writeOK(db.literal.save({dollar: false}));
-result = aggregateOrdered(
- db.literal,
- [{$project: {_id: 0, cost: {$cond: ['$dollar', {$literal: '$1.00'}, {$literal: '$.99'}]}}}]);
+result =
+ db.literal
+ .aggregate([{
+ $project:
+ {_id: 0, cost: {$cond: ['$dollar', {$literal: '$1.00'}, {$literal: '$.99'}]}}
+ }])
+ .toArray();
assert.eq([{cost: '$.99'}], result);
@@ -271,7 +272,7 @@ for (var shardName in res.shards) {
var pipeline = [{$match: {_id: targetId}}, {$project: {_id: 1}}, {$sort: {_id: 1}}];
var expectedDocs = [{_id: targetId}];
// Normal pipeline.
- assert.eq(aggregateOrdered(db.ts1, pipeline), expectedDocs);
+ assert.eq(db.ts1.aggregate(pipeline).toArray(), expectedDocs);
// With $out.
db[outCollection].drop();
pipeline.push({$out: outCollection});
@@ -291,7 +292,7 @@ for (var shardName in res.shards) {
expectedDocs.push({_id: i});
}
// Normal pipeline.
- assert.eq(aggregateOrdered(db.ts1, pipeline), expectedDocs);
+ assert.eq(db.ts1.aggregate(pipeline).toArray(), expectedDocs);
// With $out.
db[outCollection].drop();
pipeline.push({$out: outCollection});
diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js
index b685c30487a..3cbf3dfcc9b 100644
--- a/jstests/auth/lib/commands_lib.js
+++ b/jstests/auth/lib/commands_lib.js
@@ -25,7 +25,7 @@ be authorized.
{
testname: "aggregate_write",
- command: {aggregate: "foo", pipeline: [ {$out: "foo_out"} ] },
+ command: {aggregate: "foo", pipeline: [ {$out: "foo_out"} ], cursor: {} },
testcases: [
{ runOnDb: "roles_commands_1", roles: {readWrite: 1, readWriteAnyDatabase: 1} },
{ runOnDb: "roles_commands_2", roles: {readWriteAnyDatabase: 1} }
@@ -445,7 +445,7 @@ var authCommandsLib = {
{
testname: "aggregate_readonly",
- command: {aggregate: "foo", pipeline: []},
+ command: {aggregate: "foo", pipeline: [], cursor: {}},
testcases: [
{
runOnDb: firstDbName,
@@ -468,7 +468,7 @@ var authCommandsLib = {
teardown: function(db) {
db.view.drop();
},
- command: {aggregate: "view", pipeline: []},
+ command: {aggregate: "view", pipeline: [], cursor: {}},
testcases: [
// Tests that a user with read privileges on a view can aggregate it, even if they
// don't have read privileges on the underlying namespace.
@@ -529,7 +529,7 @@ var authCommandsLib = {
},
{
testname: "aggregate_write",
- command: {aggregate: "foo", pipeline: [{$out: "foo_out"}]},
+ command: {aggregate: "foo", pipeline: [{$out: "foo_out"}], cursor: {}},
testcases: [
{
runOnDb: firstDbName,
@@ -559,7 +559,7 @@ var authCommandsLib = {
teardown: function(db) {
db.view.drop();
},
- command: {aggregate: "view", pipeline: [{$out: "view_out"}]},
+ command: {aggregate: "view", pipeline: [{$out: "view_out"}], cursor: {}},
testcases: [
{
runOnDb: firstDbName,
@@ -589,7 +589,7 @@ var authCommandsLib = {
teardown: function(db) {
db.view.drop();
},
- command: {aggregate: "foo", pipeline: [{$out: "view"}]},
+ command: {aggregate: "foo", pipeline: [{$out: "view"}], cursor: {}},
testcases: [
{
runOnDb: firstDbName,
@@ -615,7 +615,7 @@ var authCommandsLib = {
},
{
testname: "aggregate_indexStats",
- command: {aggregate: "foo", pipeline: [{$indexStats: {}}]},
+ command: {aggregate: "foo", pipeline: [{$indexStats: {}}], cursor: {}},
setup: function(db) {
db.createCollection("foo");
},
@@ -641,9 +641,9 @@ var authCommandsLib = {
testname: "aggregate_lookup",
command: {
aggregate: "foo",
- pipeline: [
- {$lookup: {from: "bar", localField: "_id", foreignField: "_id", as: "results"}}
- ]
+ pipeline:
+ [{$lookup: {from: "bar", localField: "_id", foreignField: "_id", as: "results"}}],
+ cursor: {}
},
setup: function(db) {
db.createCollection("foo");
@@ -684,9 +684,10 @@ var authCommandsLib = {
},
command: {
aggregate: "foo",
- pipeline: [{
- $lookup: {from: "view", localField: "_id", foreignField: "_id", as: "results"}
- }]
+ pipeline: [
+ {$lookup: {from: "view", localField: "_id", foreignField: "_id", as: "results"}}
+ ],
+ cursor: {}
},
testcases: [
// Tests that a user can successfully $lookup into a view when given read access.
@@ -720,7 +721,8 @@ var authCommandsLib = {
connectToField: "barId",
as: "results"
}
- }]
+ }],
+ cursor: {}
},
setup: function(db) {
db.createCollection("foo");
@@ -769,7 +771,8 @@ var authCommandsLib = {
connectToField: "viewId",
as: "results"
}
- }]
+ }],
+ cursor: {}
},
testcases: [
// Tests that a user can successfully $graphLookup into a view when given read access.
@@ -793,7 +796,7 @@ var authCommandsLib = {
},
{
testname: "aggregate_collStats",
- command: {aggregate: "foo", pipeline: [{$collStats: {latencyStats: {}}}]},
+ command: {aggregate: "foo", pipeline: [{$collStats: {latencyStats: {}}}], cursor: {}},
setup: function(db) {
db.createCollection("foo");
},
@@ -862,7 +865,8 @@ var authCommandsLib = {
}
}]
}
- }]
+ }],
+ cursor: {}
},
setup: function(db) {
db.createCollection("foo");
@@ -919,7 +923,8 @@ var authCommandsLib = {
}
}]
}
- }]
+ }],
+ cursor: {}
},
setup: function(db) {
db.createCollection("foo");
diff --git a/jstests/core/index_stats.js b/jstests/core/index_stats.js
index 60b37fd571e..b912543bcb9 100644
--- a/jstests/core/index_stats.js
+++ b/jstests/core/index_stats.js
@@ -149,7 +149,7 @@
//
// Confirm index stats tick on aggregate w/ match.
//
- res = db.runCommand({aggregate: colName, pipeline: [{$match: {b: 1}}]});
+ res = db.runCommand({aggregate: colName, pipeline: [{$match: {b: 1}}], cursor: {}});
assert.commandWorked(res);
countB++;
assert.eq(countB, getUsageCount("b_1_c_1"));
diff --git a/jstests/core/max_time_ms.js b/jstests/core/max_time_ms.js
index 7762c8a439c..471e84690b8 100644
--- a/jstests/core/max_time_ms.js
+++ b/jstests/core/max_time_ms.js
@@ -379,7 +379,7 @@ assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeNeverTimeOut",
//
// "aggregate" command.
-res = t.runCommand("aggregate", {pipeline: [], maxTimeMS: 60 * 1000});
+res = t.runCommand("aggregate", {pipeline: [], cursor: {}, maxTimeMS: 60 * 1000});
assert(res.ok == 1,
"expected aggregate with maxtime to succeed, ok=" + res.ok + ", code=" + res.code);
diff --git a/jstests/core/operation_latency_histogram.js b/jstests/core/operation_latency_histogram.js
index 1e3f1a59b95..6e6f7ec1539 100644
--- a/jstests/core/operation_latency_histogram.js
+++ b/jstests/core/operation_latency_histogram.js
@@ -13,11 +13,11 @@
// Test aggregation command output format.
var commandResult = testDB.runCommand(
- {aggregate: testColl.getName(), pipeline: [{$collStats: {latencyStats: {}}}]});
+ {aggregate: testColl.getName(), pipeline: [{$collStats: {latencyStats: {}}}], cursor: {}});
assert.commandWorked(commandResult);
- assert(commandResult.result.length == 1);
+ assert(commandResult.cursor.firstBatch.length == 1);
- var stats = commandResult.result[0];
+ var stats = commandResult.cursor.firstBatch[0];
var histogramTypes = ["reads", "writes", "commands"];
assert(stats.hasOwnProperty("localTime"));
diff --git a/jstests/core/views/invalid_system_views.js b/jstests/core/views/invalid_system_views.js
index 14b8b49a2b7..741d835a58a 100644
--- a/jstests/core/views/invalid_system_views.js
+++ b/jstests/core/views/invalid_system_views.js
@@ -55,8 +55,9 @@
const lookup = {
$lookup: {from: "collection2", localField: "_id", foreignField: "_id", as: "match"}
};
- assert.commandWorked(viewsDB.runCommand({aggregate: "collection", pipeline: [lookup]}),
- makeErrorMessage("aggregate with $lookup"));
+ assert.commandWorked(
+ viewsDB.runCommand({aggregate: "collection", pipeline: [lookup], cursor: {}}),
+ makeErrorMessage("aggregate with $lookup"));
const graphLookup = {
$graphLookup: {
@@ -67,8 +68,9 @@
as: "match"
}
};
- assert.commandWorked(viewsDB.runCommand({aggregate: "collection", pipeline: [graphLookup]}),
- makeErrorMessage("aggregate with $graphLookup"));
+ assert.commandWorked(
+ viewsDB.runCommand({aggregate: "collection", pipeline: [graphLookup], cursor: {}}),
+ makeErrorMessage("aggregate with $graphLookup"));
assert.commandWorked(viewsDB.runCommand({dropIndexes: "collection", index: "x_1"}),
makeErrorMessage("dropIndexes"));
diff --git a/jstests/core/views/views_aggregation.js b/jstests/core/views/views_aggregation.js
index 8da8df5c27f..4a7e141a49f 100644
--- a/jstests/core/views/views_aggregation.js
+++ b/jstests/core/views/views_aggregation.js
@@ -90,6 +90,7 @@
viewsDB.runCommand({
aggregate: "invalidDocsView",
pipeline: [{$out: validatedCollName}],
+ cursor: {},
bypassDocumentValidation: true
}),
"Expected $out insertions to succeed since 'bypassDocumentValidation' was specified");
@@ -115,7 +116,7 @@
assert.commandWorked(
viewsDB.runCommand(
- {aggregate: "largeView", pipeline: [{$sort: {x: -1}}], allowDiskUse: true}),
+ {aggregate: "largeView", pipeline: [{$sort: {x: -1}}], cursor: {}, allowDiskUse: true}),
"Expected aggregate to succeed since 'allowDiskUse' was specified");
// The remaining tests involve $lookup and $graphLookup. We cannot lookup into sharded
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index e4c44e4a223..361cb97c127 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -82,7 +82,7 @@
_transferMods: {skip: isAnInternalCommand},
addShard: {skip: isUnrelated},
addShardToZone: {skip: isUnrelated},
- aggregate: {command: {aggregate: "view", pipeline: [{$match: {}}]}},
+ aggregate: {command: {aggregate: "view", pipeline: [{$match: {}}], cursor: {}}},
appendOplogNote: {skip: isUnrelated},
applyOps: {
command: {applyOps: [{op: "i", o: {_id: 1}, ns: "test.view"}]},
diff --git a/jstests/core/views/views_coll_stats.js b/jstests/core/views/views_coll_stats.js
index 311141775a6..986d698c92c 100644
--- a/jstests/core/views/views_coll_stats.js
+++ b/jstests/core/views/views_coll_stats.js
@@ -61,6 +61,7 @@
// Assert that attempting to retrieve storageStats fails.
makeView("a", "b");
assert.commandFailedWithCode(
- viewsDB.runCommand({aggregate: "a", pipeline: [{$collStats: {storageStats: {}}}]}),
+ viewsDB.runCommand(
+ {aggregate: "a", pipeline: [{$collStats: {storageStats: {}}}], cursor: {}}),
ErrorCodes.CommandNotSupportedOnView);
}());
diff --git a/jstests/core/views/views_collation.js b/jstests/core/views/views_collation.js
index 9e4ed7feb30..5deaebba1b5 100644
--- a/jstests/core/views/views_collation.js
+++ b/jstests/core/views/views_collation.js
@@ -17,15 +17,15 @@
assert(!listCollectionsOutput.cursor.firstBatch[0].options.hasOwnProperty("collation"));
// Operations that do not specify a collation succeed.
- assert.commandWorked(viewsDB.runCommand({aggregate: "simpleView", pipeline: []}));
+ assert.commandWorked(viewsDB.runCommand({aggregate: "simpleView", pipeline: [], cursor: {}}));
assert.commandWorked(viewsDB.runCommand({find: "simpleView"}));
assert.commandWorked(viewsDB.runCommand({count: "simpleView"}));
assert.commandWorked(viewsDB.runCommand({distinct: "simpleView", key: "x"}));
// Operations that explicitly ask for the "simple" locale succeed against a view with the
// simple collation.
- assert.commandWorked(
- viewsDB.runCommand({aggregate: "simpleView", pipeline: [], collation: {locale: "simple"}}));
+ assert.commandWorked(viewsDB.runCommand(
+ {aggregate: "simpleView", pipeline: [], cursor: {}, collation: {locale: "simple"}}));
assert.commandWorked(viewsDB.runCommand({find: "simpleView", collation: {locale: "simple"}}));
assert.commandWorked(viewsDB.runCommand({count: "simpleView", collation: {locale: "simple"}}));
assert.commandWorked(
@@ -33,7 +33,8 @@
// Attempting to override a view's simple collation fails.
assert.commandFailedWithCode(
- viewsDB.runCommand({aggregate: "simpleView", pipeline: [], collation: {locale: "en"}}),
+ viewsDB.runCommand(
+ {aggregate: "simpleView", pipeline: [], cursor: {}, collation: {locale: "en"}}),
ErrorCodes.OptionNotSupportedOnView);
assert.commandFailedWithCode(
viewsDB.runCommand({find: "simpleView", collation: {locale: "fr"}}),
@@ -53,14 +54,14 @@
assert.eq(listCollectionsOutput.cursor.firstBatch[0].options.collation.locale, "fil");
// Operations that do not specify a collation succeed.
- assert.commandWorked(viewsDB.runCommand({aggregate: "filView", pipeline: []}));
+ assert.commandWorked(viewsDB.runCommand({aggregate: "filView", pipeline: [], cursor: {}}));
assert.commandWorked(viewsDB.runCommand({find: "filView"}));
assert.commandWorked(viewsDB.runCommand({count: "filView"}));
assert.commandWorked(viewsDB.runCommand({distinct: "filView", key: "x"}));
// Operations with a matching collation succeed.
- assert.commandWorked(
- viewsDB.runCommand({aggregate: "filView", pipeline: [], collation: {locale: "fil"}}));
+ assert.commandWorked(viewsDB.runCommand(
+ {aggregate: "filView", pipeline: [], cursor: {}, collation: {locale: "fil"}}));
assert.commandWorked(viewsDB.runCommand({find: "filView", collation: {locale: "fil"}}));
assert.commandWorked(viewsDB.runCommand({count: "filView", collation: {locale: "fil"}}));
assert.commandWorked(
@@ -68,10 +69,12 @@
// Attempting to override the non-simple default collation of a view fails.
assert.commandFailedWithCode(
- viewsDB.runCommand({aggregate: "filView", pipeline: [], collation: {locale: "en"}}),
+ viewsDB.runCommand(
+ {aggregate: "filView", pipeline: [], cursor: {}, collation: {locale: "en"}}),
ErrorCodes.OptionNotSupportedOnView);
assert.commandFailedWithCode(
- viewsDB.runCommand({aggregate: "filView", pipeline: [], collation: {locale: "simple"}}),
+ viewsDB.runCommand(
+ {aggregate: "filView", pipeline: [], cursor: {}, collation: {locale: "simple"}}),
ErrorCodes.OptionNotSupportedOnView);
assert.commandFailedWithCode(viewsDB.runCommand({find: "filView", collation: {locale: "fr"}}),
ErrorCodes.OptionNotSupportedOnView);
@@ -105,18 +108,23 @@
// You can lookup into a view with the simple collation if the collection also has the same
// default collation.
- assert.commandWorked(
- viewsDB.runCommand({aggregate: "simpleCollection", pipeline: [lookupSimpleView]}));
- assert.commandWorked(
- viewsDB.runCommand({aggregate: "simpleCollection", pipeline: [graphLookupSimpleView]}));
+ assert.commandWorked(viewsDB.runCommand(
+ {aggregate: "simpleCollection", pipeline: [lookupSimpleView], cursor: {}}));
+ assert.commandWorked(viewsDB.runCommand(
+ {aggregate: "simpleCollection", pipeline: [graphLookupSimpleView], cursor: {}}));
// You can lookup into a view with the simple collation if the operation has a matching
// collation.
- assert.commandWorked(viewsDB.runCommand(
- {aggregate: "ukCollection", pipeline: [lookupSimpleView], collation: {locale: "simple"}}));
+ assert.commandWorked(viewsDB.runCommand({
+ aggregate: "ukCollection",
+ pipeline: [lookupSimpleView],
+ cursor: {},
+ collation: {locale: "simple"}
+ }));
assert.commandWorked(viewsDB.runCommand({
aggregate: "ukCollection",
pipeline: [graphLookupSimpleView],
+ cursor: {},
collation: {locale: "simple"}
}));
@@ -125,12 +133,14 @@
assert.commandFailedWithCode(viewsDB.runCommand({
aggregate: "simpleCollection",
pipeline: [lookupSimpleView],
+ cursor: {},
collation: {locale: "en"}
}),
ErrorCodes.OptionNotSupportedOnView);
assert.commandFailedWithCode(viewsDB.runCommand({
aggregate: "simpleCollection",
pipeline: [graphLookupSimpleView],
+ cursor: {},
collation: {locale: "zh"}
}),
ErrorCodes.OptionNotSupportedOnView);
@@ -151,35 +161,48 @@
// You can lookup into a view with no operation collation specified if the collection's
// collation matches the collation of the view.
assert.commandWorked(
- viewsDB.runCommand({aggregate: "filCollection", pipeline: [lookupFilView]}));
- assert.commandWorked(
- viewsDB.runCommand({aggregate: "filCollection", pipeline: [graphLookupFilView]}));
+ viewsDB.runCommand({aggregate: "filCollection", pipeline: [lookupFilView], cursor: {}}));
+ assert.commandWorked(viewsDB.runCommand(
+ {aggregate: "filCollection", pipeline: [graphLookupFilView], cursor: {}}));
// You can lookup into a view with a non-simple collation if the operation's collation
// matches.
- assert.commandWorked(viewsDB.runCommand(
- {aggregate: "ukCollection", pipeline: [lookupFilView], collation: {locale: "fil"}}));
- assert.commandWorked(viewsDB.runCommand(
- {aggregate: "ukCollection", pipeline: [graphLookupFilView], collation: {locale: "fil"}}));
+ assert.commandWorked(viewsDB.runCommand({
+ aggregate: "ukCollection",
+ pipeline: [lookupFilView],
+ cursor: {},
+ collation: {locale: "fil"}
+ }));
+ assert.commandWorked(viewsDB.runCommand({
+ aggregate: "ukCollection",
+ pipeline: [graphLookupFilView],
+ cursor: {},
+ collation: {locale: "fil"}
+ }));
// You can't lookup into a view when aggregating a collection whose default collation does
// not match the view's default collation.
assert.commandFailedWithCode(
- viewsDB.runCommand({aggregate: "simpleCollection", pipeline: [lookupFilView]}),
+ viewsDB.runCommand({aggregate: "simpleCollection", cursor: {}, pipeline: [lookupFilView]}),
ErrorCodes.OptionNotSupportedOnView);
assert.commandFailedWithCode(
- viewsDB.runCommand({aggregate: "simpleCollection", pipeline: [graphLookupFilView]}),
+ viewsDB.runCommand(
+ {aggregate: "simpleCollection", cursor: {}, pipeline: [graphLookupFilView]}),
ErrorCodes.OptionNotSupportedOnView);
// You can't lookup into a view when aggregating a collection and the operation's collation
// does not match the view's default collation.
- assert.commandFailedWithCode(
- viewsDB.runCommand(
- {aggregate: "filCollection", pipeline: [lookupFilView], collation: {locale: "zh"}}),
- ErrorCodes.OptionNotSupportedOnView);
+ assert.commandFailedWithCode(viewsDB.runCommand({
+ aggregate: "filCollection",
+ pipeline: [lookupFilView],
+ cursor: {},
+ collation: {locale: "zh"}
+ }),
+ ErrorCodes.OptionNotSupportedOnView);
assert.commandFailedWithCode(viewsDB.runCommand({
aggregate: "filCollection",
pipeline: [graphLookupFilView],
+ cursor: {},
collation: {locale: "zh"}
}),
ErrorCodes.OptionNotSupportedOnView);
@@ -189,9 +212,9 @@
assert.commandWorked(viewsDB.runCommand(
{create: "simpleView2", viewOn: "simpleCollection", collation: {locale: "simple"}}));
assert.commandWorked(
- viewsDB.runCommand({aggregate: "simpleView2", pipeline: [lookupSimpleView]}));
- assert.commandWorked(
- viewsDB.runCommand({aggregate: "simpleView2", pipeline: [graphLookupSimpleView]}));
+ viewsDB.runCommand({aggregate: "simpleView2", pipeline: [lookupSimpleView], cursor: {}}));
+ assert.commandWorked(viewsDB.runCommand(
+ {aggregate: "simpleView2", pipeline: [graphLookupSimpleView], cursor: {}}));
// You may perform an aggregation involving multiple views and collections if all the views
// have the same default collation.
@@ -204,21 +227,26 @@
as: "matched"
}
};
- assert.commandWorked(viewsDB.runCommand(
- {aggregate: "simpleView2", pipeline: [lookupSimpleView, graphLookupUkCollection]}));
+ assert.commandWorked(viewsDB.runCommand({
+ aggregate: "simpleView2",
+ pipeline: [lookupSimpleView, graphLookupUkCollection],
+ cursor: {}
+ }));
// You cannot perform an aggregation involving multiple views if the views don't all have
// the same default collation.
assert.commandFailedWithCode(
- viewsDB.runCommand({aggregate: "filView", pipeline: [lookupSimpleView]}),
- ErrorCodes.OptionNotSupportedOnView);
- assert.commandFailedWithCode(
- viewsDB.runCommand({aggregate: "simpleView", pipeline: [lookupFilView]}),
+ viewsDB.runCommand({aggregate: "filView", pipeline: [lookupSimpleView], cursor: {}}),
ErrorCodes.OptionNotSupportedOnView);
assert.commandFailedWithCode(
- viewsDB.runCommand(
- {aggregate: "simpleCollection", pipeline: [lookupFilView, graphLookupSimpleView]}),
+ viewsDB.runCommand({aggregate: "simpleView", pipeline: [lookupFilView], cursor: {}}),
ErrorCodes.OptionNotSupportedOnView);
+ assert.commandFailedWithCode(viewsDB.runCommand({
+ aggregate: "simpleCollection",
+ pipeline: [lookupFilView, graphLookupSimpleView],
+ cursor: {}
+ }),
+ ErrorCodes.OptionNotSupportedOnView);
// You cannot create a view that depends on another view with a different default collation.
assert.commandFailedWithCode(
diff --git a/jstests/libs/read_committed_lib.js b/jstests/libs/read_committed_lib.js
index b7e00327f4e..bdfac32ee1b 100644
--- a/jstests/libs/read_committed_lib.js
+++ b/jstests/libs/read_committed_lib.js
@@ -36,6 +36,7 @@ function testReadCommittedLookup(db, secondary, rst) {
}
},
],
+ cursor: {},
readConcern: {
level: "majority",
}
@@ -52,6 +53,7 @@ function testReadCommittedLookup(db, secondary, rst) {
as: "match"
}
}],
+ cursor: {},
readConcern: {
level: "majority",
}
@@ -78,10 +80,10 @@ function testReadCommittedLookup(db, secondary, rst) {
}];
// Confirm lookup/graphLookup return the matched result.
- let result = db.runCommand(aggCmdLookupObj).result;
+ let result = db.runCommand(aggCmdLookupObj).cursor.firstBatch;
assert.eq(result, expectedMatchedResult);
- result = db.runCommand(aggCmdGraphLookupObj).result;
+ result = db.runCommand(aggCmdGraphLookupObj).cursor.firstBatch;
assert.eq(result, expectedMatchedResult);
// Stop oplog application on the secondary so that it won't acknowledge updates.
@@ -92,10 +94,10 @@ function testReadCommittedLookup(db, secondary, rst) {
// lookup/graphLookup should not see the update, since it has not been acknowledged by the
// secondary.
- result = db.runCommand(aggCmdLookupObj).result;
+ result = db.runCommand(aggCmdLookupObj).cursor.firstBatch;
assert.eq(result, expectedMatchedResult);
- result = db.runCommand(aggCmdGraphLookupObj).result;
+ result = db.runCommand(aggCmdGraphLookupObj).cursor.firstBatch;
assert.eq(result, expectedMatchedResult);
// Restart oplog application on the secondary and wait for it's snapshot to catch up.
@@ -103,9 +105,9 @@ function testReadCommittedLookup(db, secondary, rst) {
rst.awaitLastOpCommitted();
// Now lookup/graphLookup should report that the documents don't match.
- result = db.runCommand(aggCmdLookupObj).result;
+ result = db.runCommand(aggCmdLookupObj).cursor.firstBatch;
assert.eq(result, expectedUnmatchedResult);
- result = db.runCommand(aggCmdGraphLookupObj).result;
+ result = db.runCommand(aggCmdGraphLookupObj).cursor.firstBatch;
assert.eq(result, expectedUnmatchedResult);
}
diff --git a/jstests/noPassthrough/commands_handle_kill.js b/jstests/noPassthrough/commands_handle_kill.js
index d2aedb8f4ea..9d3b197217f 100644
--- a/jstests/noPassthrough/commands_handle_kill.js
+++ b/jstests/noPassthrough/commands_handle_kill.js
@@ -20,7 +20,7 @@
var res;
// aggregate command errors if plan executor is killed.
- res = db.runCommand({aggregate: collName, pipeline: []});
+ res = db.runCommand({aggregate: collName, pipeline: [], cursor: {}});
assert.commandFailed(res);
assert(res.errmsg.indexOf("hit planExecutorAlwaysDead fail point") > -1);
diff --git a/jstests/noPassthrough/read_majority.js b/jstests/noPassthrough/read_majority.js
index 828b7fba4a5..ee7d3049fa7 100644
--- a/jstests/noPassthrough/read_majority.js
+++ b/jstests/noPassthrough/read_majority.js
@@ -197,10 +197,10 @@ load("jstests/libs/analyze_plan.js");
assert.eq(res.code, ErrorCodes.InvalidOptions);
// Agg $out also doesn't support read concern majority.
- assert.commandWorked(
- t.runCommand('aggregate', {pipeline: [{$out: 'out'}], readConcern: {level: 'local'}}));
- var res = assert.commandFailed(
- t.runCommand('aggregate', {pipeline: [{$out: 'out'}], readConcern: {level: 'majority'}}));
+ assert.commandWorked(t.runCommand(
+ 'aggregate', {pipeline: [{$out: 'out'}], cursor: {}, readConcern: {level: 'local'}}));
+ var res = assert.commandFailed(t.runCommand(
+ 'aggregate', {pipeline: [{$out: 'out'}], cursor: {}, readConcern: {level: 'majority'}}));
assert.eq(res.code, ErrorCodes.InvalidOptions);
MongoRunner.stopMongod(testServer);
diff --git a/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js b/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js
index 22617b681c2..47037e603bf 100644
--- a/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js
+++ b/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js
@@ -61,7 +61,7 @@
});
commands.push({
- req: {aggregate: collName, pipeline: [{$sort: {type: 1}}, {$out: "foo"}]},
+ req: {aggregate: collName, pipeline: [{$sort: {type: 1}}, {$out: "foo"}], cursor: {}},
setupFunc: function() {
coll.insert({_id: 1, type: 'oak'});
coll.insert({_id: 2, type: 'maple'});
diff --git a/jstests/readonly/aggregate.js b/jstests/readonly/aggregate.js
index d16f71ca001..1c980cdd327 100644
--- a/jstests/readonly/aggregate.js
+++ b/jstests/readonly/aggregate.js
@@ -82,6 +82,7 @@ runReadOnlyTest(function() {
var allowDiskUseCmd = {
aggregate: readableCollection.getName(),
pipeline: [],
+ cursor: {},
allowDiskUse: true
};
diff --git a/jstests/replsets/commands_that_write_accept_wc.js b/jstests/replsets/commands_that_write_accept_wc.js
index 64e52d4c6c6..3800b569c7e 100644
--- a/jstests/replsets/commands_that_write_accept_wc.js
+++ b/jstests/replsets/commands_that_write_accept_wc.js
@@ -78,7 +78,7 @@ load('jstests/libs/write_concern_util.js');
});
commands.push({
- req: {aggregate: collName, pipeline: [{$sort: {type: 1}}, {$out: "foo"}]},
+ req: {aggregate: collName, pipeline: [{$sort: {type: 1}}, {$out: "foo"}], cursor: {}},
setupFunc: function() {
coll.insert({_id: 1, type: 'oak'});
coll.insert({_id: 2, type: 'maple'});
diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js
index da8f0d80cab..a27b176cdf0 100644
--- a/jstests/sharding/authCommands.js
+++ b/jstests/sharding/authCommands.js
@@ -132,9 +132,10 @@
res = checkCommandSucceeded(testDB, {
aggregate: 'foo',
- pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}]
+ pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}],
+ cursor: {}
});
- assert.eq(4500, res.result[0].sum);
+ assert.eq(4500, res.cursor.firstBatch[0].sum);
} else {
print("Checking read operations, should fail");
assert.throws(function() {
@@ -146,7 +147,8 @@
{mapreduce: 'foo', map: map, reduce: reduce, out: {inline: 1}});
checkCommandFailed(testDB, {
aggregate: 'foo',
- pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}]
+ pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}],
+ cursor: {}
});
}
};
diff --git a/jstests/sharding/commands_that_write_accept_wc_shards.js b/jstests/sharding/commands_that_write_accept_wc_shards.js
index 1637938b4eb..53d2cd3233e 100644
--- a/jstests/sharding/commands_that_write_accept_wc_shards.js
+++ b/jstests/sharding/commands_that_write_accept_wc_shards.js
@@ -131,7 +131,7 @@ load('jstests/libs/write_concern_util.js');
// Aggregate with passthrough.
commands.push({
- req: {aggregate: collName, pipeline: [{$sort: {type: 1}}, {$out: "foo"}]},
+ req: {aggregate: collName, pipeline: [{$sort: {type: 1}}, {$out: "foo"}], cursor: {}},
setupFunc: function() {
coll.insert({_id: 1, x: 3, type: 'oak'});
coll.insert({_id: 2, x: 13, type: 'maple'});
@@ -148,7 +148,8 @@ load('jstests/libs/write_concern_util.js');
commands.push({
req: {
aggregate: collName,
- pipeline: [{$match: {x: -3}}, {$match: {type: {$exists: 1}}}, {$out: "foo"}]
+ pipeline: [{$match: {x: -3}}, {$match: {type: {$exists: 1}}}, {$out: "foo"}],
+ cursor: {}
},
setupFunc: function() {
shardCollectionWithChunks(st, coll);
@@ -168,7 +169,8 @@ load('jstests/libs/write_concern_util.js');
commands.push({
req: {
aggregate: collName,
- pipeline: [{$match: {type: {$exists: 1}}}, {$sort: {type: 1}}, {$out: "foo"}]
+ pipeline: [{$match: {type: {$exists: 1}}}, {$sort: {type: 1}}, {$out: "foo"}],
+ cursor: {}
},
setupFunc: function() {
shardCollectionWithChunks(st, coll);
diff --git a/jstests/sharding/error_propagation.js b/jstests/sharding/error_propagation.js
index bcf191c2d44..ee113329138 100644
--- a/jstests/sharding/error_propagation.js
+++ b/jstests/sharding/error_propagation.js
@@ -11,13 +11,13 @@
db.setSlaveOk(true);
assert.writeOK(db.foo.insert({a: 1}, {writeConcern: {w: 3}}));
- assert.commandWorked(
- db.runCommand({aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}]}));
+ assert.commandWorked(db.runCommand(
+ {aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}], cursor: {}}));
assert.writeOK(db.foo.insert({a: [1, 2]}, {writeConcern: {w: 3}}));
- var res =
- db.runCommand({aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}]});
+ var res = db.runCommand(
+ {aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}], cursor: {}});
assert.commandFailed(res);
assert.eq("$add only supports numeric or date types, not array", res.errmsg, printjson(res));
}());
diff --git a/jstests/sharding/max_time_ms_sharded.js b/jstests/sharding/max_time_ms_sharded.js
index 33121e61712..c7f443697aa 100644
--- a/jstests/sharding/max_time_ms_sharded.js
+++ b/jstests/sharding/max_time_ms_sharded.js
@@ -186,15 +186,16 @@
// Positive test for "aggregate".
configureMaxTimeAlwaysTimeOut("alwaysOn");
assert.commandFailedWithCode(
- coll.runCommand("aggregate", {pipeline: [], maxTimeMS: 60 * 1000}),
+ coll.runCommand("aggregate", {pipeline: [], cursor: {}, maxTimeMS: 60 * 1000}),
ErrorCodes.ExceededTimeLimit,
"expected aggregate to fail with code " + ErrorCodes.ExceededTimeLimit +
" due to maxTimeAlwaysTimeOut fail point, but instead got: " + tojson(res));
// Negative test for "aggregate".
configureMaxTimeAlwaysTimeOut("off");
- assert.commandWorked(coll.runCommand("aggregate", {pipeline: [], maxTimeMS: 60 * 1000}),
- "expected aggregate to not hit time limit in mongod");
+ assert.commandWorked(
+ coll.runCommand("aggregate", {pipeline: [], cursor: {}, maxTimeMS: 60 * 1000}),
+ "expected aggregate to not hit time limit in mongod");
// Positive test for "moveChunk".
configureMaxTimeAlwaysTimeOut("alwaysOn");
diff --git a/jstests/sharding/read_pref_cmd.js b/jstests/sharding/read_pref_cmd.js
index b9f275ea962..e8c06f48694 100644
--- a/jstests/sharding/read_pref_cmd.js
+++ b/jstests/sharding/read_pref_cmd.js
@@ -160,12 +160,12 @@ var testReadPreference = function(conn, hostList, isMongos, mode, tagSets, secEx
}
// Test on sharded
- cmdTest({aggregate: 'user', pipeline: [{$project: {x: 1}}]},
+ cmdTest({aggregate: 'user', pipeline: [{$project: {x: 1}}], cursor: {}},
true,
formatProfileQuery({aggregate: 'user'}));
// Test on non-sharded
- cmdTest({aggregate: 'mrIn', pipeline: [{$project: {x: 1}}]},
+ cmdTest({aggregate: 'mrIn', pipeline: [{$project: {x: 1}}], cursor: {}},
true,
formatProfileQuery({aggregate: 'mrIn'}));
};
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index 4887f61d49b..d548d9523ca 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -99,8 +99,7 @@ bool handleCursorCommand(OperationContext* txn,
invariant(cursor->isAggCursor());
}
- invariant(request.getBatchSize());
- long long batchSize = request.getBatchSize().get();
+ long long batchSize = request.getBatchSize();
// can't use result BSONObjBuilder directly since it won't handle exceptions correctly.
BSONArrayBuilder resultsArray;
@@ -529,33 +528,17 @@ public:
// Unless set to true, the ClientCursor created above will be deleted on block exit.
bool keepCursor = false;
- // Use of the aggregate command without specifying to use a cursor is deprecated.
- // Applications should migrate to using cursors. Cursors are strictly more useful than
- // outputting the results as a single document, since results that fit inside a single
- // BSONObj will also fit inside a single batch.
- //
- // We occasionally log a deprecation warning.
- if (!request.isCursorCommand()) {
- RARELY {
- warning()
- << "Use of the aggregate command without the 'cursor' "
- "option is deprecated. See "
- "http://dochub.mongodb.org/core/aggregate-without-cursor-deprecation.";
- }
- }
-
// If both explain and cursor are specified, explain wins.
if (expCtx->isExplain) {
result << "stages" << Value(pipeline->writeExplainOps());
- } else if (request.isCursorCommand()) {
+ } else {
+ // Cursor must be specified, if explain is not.
keepCursor = handleCursorCommand(txn,
origNss.ns(),
pin ? pin->getCursor() : nullptr,
pin ? pin->getCursor()->getExecutor() : exec.get(),
request,
result);
- } else {
- pipeline->run(result);
}
if (!expCtx->isExplain) {
diff --git a/src/mongo/db/pipeline/aggregation_request.cpp b/src/mongo/db/pipeline/aggregation_request.cpp
index c741c32c4d3..57a8f3644a9 100644
--- a/src/mongo/db/pipeline/aggregation_request.cpp
+++ b/src/mongo/db/pipeline/aggregation_request.cpp
@@ -58,7 +58,7 @@ const StringData AggregationRequest::kAllowDiskUseName = "allowDiskUse"_sd;
const long long AggregationRequest::kDefaultBatchSize = 101;
AggregationRequest::AggregationRequest(NamespaceString nss, std::vector<BSONObj> pipeline)
- : _nss(std::move(nss)), _pipeline(std::move(pipeline)) {}
+ : _nss(std::move(nss)), _pipeline(std::move(pipeline)), _batchSize(kDefaultBatchSize) {}
StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(NamespaceString nss,
const BSONObj& cmdObj) {
@@ -85,6 +85,8 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(NamespaceString
kCommandName,
repl::ReadConcernArgs::kReadConcernFieldName};
+ bool hasCursorElem = false;
+
// Parse optional parameters.
for (auto&& elem : cmdObj) {
auto fieldName = elem.fieldNameStringData();
@@ -108,7 +110,7 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(NamespaceString
return status;
}
- request.setCursorCommand(true);
+ hasCursorElem = true;
request.setBatchSize(batchSize);
} else if (kCollationName == fieldName) {
if (elem.type() != BSONType::Object) {
@@ -149,6 +151,14 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(NamespaceString
str::stream() << "unrecognized field '" << elem.fieldName() << "'"};
}
}
+
+ if (!hasCursorElem && !request.isExplain()) {
+ return {ErrorCodes::FailedToParse,
+ str::stream() << "The '" << kCursorName << "' option is required, unless '"
+ << kExplainName
+ << "' is true"};
+ }
+
return request;
}
@@ -165,7 +175,8 @@ Document AggregationRequest::serializeToCommandObj() const {
_bypassDocumentValidation ? Value(true) : Value()},
// Only serialize a collation if one was specified.
{kCollationName, _collation.isEmpty() ? Value() : Value(_collation)},
- {kCursorName, _batchSize ? Value(Document{{kBatchSizeName, _batchSize.get()}}) : Value()}};
+ // Only serialize batchSize when explain is false.
+ {kCursorName, _explain ? Value() : Value(Document{{kBatchSizeName, _batchSize}})}};
}
} // namespace mongo
diff --git a/src/mongo/db/pipeline/aggregation_request.h b/src/mongo/db/pipeline/aggregation_request.h
index 3a8844b00e9..4a3ef4c0223 100644
--- a/src/mongo/db/pipeline/aggregation_request.h
+++ b/src/mongo/db/pipeline/aggregation_request.h
@@ -81,7 +81,7 @@ public:
// Getters.
//
- boost::optional<long long> getBatchSize() const {
+ long long getBatchSize() const {
return _batchSize;
}
@@ -96,10 +96,6 @@ public:
return _pipeline;
}
- bool isCursorCommand() const {
- return _cursorCommand;
- }
-
bool isExplain() const {
return _explain;
}
@@ -128,8 +124,7 @@ public:
//
/**
- * Must be either unset or non-negative. Negative batchSize is illegal but batchSize of 0 is
- * allowed.
+ * Negative batchSize is illegal but batchSize of 0 is allowed.
*/
void setBatchSize(long long batchSize) {
uassert(40203, "batchSize must be non-negative", batchSize >= 0);
@@ -140,10 +135,6 @@ public:
_collation = collation.getOwned();
}
- void setCursorCommand(bool isCursorCommand) {
- _cursorCommand = isCursorCommand;
- }
-
void setExplain(bool isExplain) {
_explain = isExplain;
}
@@ -168,9 +159,9 @@ private:
// An unparsed version of the pipeline.
const std::vector<BSONObj> _pipeline;
- // Optional fields.
+ long long _batchSize;
- boost::optional<long long> _batchSize;
+ // Optional fields.
// An owned copy of the user-specified collation object, or an empty object if no collation was
// specified.
@@ -180,6 +171,5 @@ private:
bool _allowDiskUse = false;
bool _fromRouter = false;
bool _bypassDocumentValidation = false;
- bool _cursorCommand = false;
};
} // namespace mongo
diff --git a/src/mongo/db/pipeline/aggregation_request_test.cpp b/src/mongo/db/pipeline/aggregation_request_test.cpp
index acd38732522..4d1a5f33fb6 100644
--- a/src/mongo/db/pipeline/aggregation_request_test.cpp
+++ b/src/mongo/db/pipeline/aggregation_request_test.cpp
@@ -44,6 +44,9 @@
namespace mongo {
namespace {
+const Document kDefaultCursorOptionDocument{
+ {AggregationRequest::kBatchSizeName, AggregationRequest::kDefaultBatchSize}};
+
//
// Parsing
//
@@ -58,8 +61,7 @@ TEST(AggregationRequestTest, ShouldParseAllKnownOptions) {
ASSERT_TRUE(request.shouldAllowDiskUse());
ASSERT_TRUE(request.isFromRouter());
ASSERT_TRUE(request.shouldBypassDocumentValidation());
- ASSERT_TRUE(request.isCursorCommand());
- ASSERT_EQ(request.getBatchSize().get(), 10);
+ ASSERT_EQ(request.getBatchSize(), 10);
ASSERT_BSONOBJ_EQ(request.getCollation(),
BSON("locale"
<< "en_US"));
@@ -75,7 +77,8 @@ TEST(AggregationRequestTest, ShouldOnlySerializeRequiredFieldsIfNoOptionalFields
auto expectedSerialization =
Document{{AggregationRequest::kCommandName, nss.coll()},
- {AggregationRequest::kPipelineName, Value(std::vector<Value>{})}};
+ {AggregationRequest::kPipelineName, Value(std::vector<Value>{})},
+ {AggregationRequest::kCursorName, Value(kDefaultCursorOptionDocument)}};
ASSERT_DOCUMENT_EQ(request.serializeToCommandObj(), expectedSerialization);
}
@@ -90,7 +93,8 @@ TEST(AggregationRequestTest, ShouldNotSerializeOptionalValuesIfEquivalentToDefau
auto expectedSerialization =
Document{{AggregationRequest::kCommandName, nss.coll()},
- {AggregationRequest::kPipelineName, Value(std::vector<Value>{})}};
+ {AggregationRequest::kPipelineName, Value(std::vector<Value>{})},
+ {AggregationRequest::kCursorName, Value(kDefaultCursorOptionDocument)}};
ASSERT_DOCUMENT_EQ(request.serializeToCommandObj(), expectedSerialization);
}
@@ -101,6 +105,7 @@ TEST(AggregationRequestTest, ShouldSerializeOptionalValuesIfSet) {
request.setAllowDiskUse(true);
request.setFromRouter(true);
request.setBypassDocumentValidation(true);
+ request.setBatchSize(10); // batchSize not serialzed when explain is true.
const auto collationObj = BSON("locale"
<< "en_US");
request.setCollation(collationObj);
@@ -116,23 +121,25 @@ TEST(AggregationRequestTest, ShouldSerializeOptionalValuesIfSet) {
ASSERT_DOCUMENT_EQ(request.serializeToCommandObj(), expectedSerialization);
}
-TEST(AggregationRequestTest, ShouldSetBatchSizeToDefaultOnEmptyCursorObject) {
+TEST(AggregationRequestTest, ShouldSerializeBatchSizeIfSetAndExplainFalse) {
NamespaceString nss("a.collection");
- const BSONObj inputBson = fromjson("{pipeline: [{$match: {a: 'abc'}}], cursor: {}}");
- auto request = AggregationRequest::parseFromBSON(nss, inputBson);
- ASSERT_OK(request.getStatus());
- ASSERT_TRUE(request.getValue().isCursorCommand());
- ASSERT_TRUE(request.getValue().getBatchSize());
- ASSERT_EQ(request.getValue().getBatchSize().get(), AggregationRequest::kDefaultBatchSize);
+ AggregationRequest request(nss, {});
+ request.setBatchSize(10);
+
+ auto expectedSerialization =
+ Document{{AggregationRequest::kCommandName, nss.coll()},
+ {AggregationRequest::kPipelineName, Value(std::vector<Value>{})},
+ {AggregationRequest::kCursorName,
+ Value(Document({{AggregationRequest::kBatchSizeName, 10}}))}};
+ ASSERT_DOCUMENT_EQ(request.serializeToCommandObj(), expectedSerialization);
}
-TEST(AggregationRequestTest, NoBatchSizeWhenCursorObjectNotSet) {
+TEST(AggregationRequestTest, ShouldSetBatchSizeToDefaultOnEmptyCursorObject) {
NamespaceString nss("a.collection");
- const BSONObj inputBson = fromjson("{pipeline: [{$match: {a: 'abc'}}]}");
+ const BSONObj inputBson = fromjson("{pipeline: [{$match: {a: 'abc'}}], cursor: {}}");
auto request = AggregationRequest::parseFromBSON(nss, inputBson);
ASSERT_OK(request.getStatus());
- ASSERT_FALSE(request.getValue().isCursorCommand());
- ASSERT_FALSE(request.getValue().getBatchSize());
+ ASSERT_EQ(request.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
}
//
@@ -141,41 +148,51 @@ TEST(AggregationRequestTest, NoBatchSizeWhenCursorObjectNotSet) {
TEST(AggregationRequestTest, ShouldRejectNonArrayPipeline) {
NamespaceString nss("a.collection");
- const BSONObj inputBson = fromjson("{pipeline: {}}");
+ const BSONObj inputBson = fromjson("{pipeline: {}, cursor: {}}");
ASSERT_NOT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
}
TEST(AggregationRequestTest, ShouldRejectPipelineArrayIfAnElementIsNotAnObject) {
NamespaceString nss("a.collection");
- BSONObj inputBson = fromjson("{pipeline: [4]}");
+ BSONObj inputBson = fromjson("{pipeline: [4], cursor: {}}");
ASSERT_NOT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
- inputBson = fromjson("{pipeline: [{$match: {a: 'abc'}}, 4]}");
+ inputBson = fromjson("{pipeline: [{$match: {a: 'abc'}}, 4], cursor: {}}");
ASSERT_NOT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
}
TEST(AggregationRequestTest, ShouldRejectNonObjectCollation) {
NamespaceString nss("a.collection");
- const BSONObj inputBson = fromjson("{pipeline: [{$match: {a: 'abc'}}], collation: 1}");
+ const BSONObj inputBson =
+ fromjson("{pipeline: [{$match: {a: 'abc'}}], cursor: {}, collation: 1}");
ASSERT_NOT_OK(
AggregationRequest::parseFromBSON(NamespaceString("a.collection"), inputBson).getStatus());
}
TEST(AggregationRequestTest, ShouldRejectNonBoolExplain) {
NamespaceString nss("a.collection");
- const BSONObj inputBson = fromjson("{pipeline: [{$match: {a: 'abc'}}], explain: 1}");
+ const BSONObj inputBson =
+ fromjson("{pipeline: [{$match: {a: 'abc'}}], cursor: {}, explain: 1}");
ASSERT_NOT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
}
TEST(AggregationRequestTest, ShouldRejectNonBoolFromRouter) {
NamespaceString nss("a.collection");
- const BSONObj inputBson = fromjson("{pipeline: [{$match: {a: 'abc'}}], fromRouter: 1}");
+ const BSONObj inputBson =
+ fromjson("{pipeline: [{$match: {a: 'abc'}}], cursor: {}, fromRouter: 1}");
ASSERT_NOT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
}
TEST(AggregationRequestTest, ShouldRejectNonBoolAllowDiskUse) {
NamespaceString nss("a.collection");
- const BSONObj inputBson = fromjson("{pipeline: [{$match: {a: 'abc'}}], allowDiskUse: 1}");
+ const BSONObj inputBson =
+ fromjson("{pipeline: [{$match: {a: 'abc'}}], cursor: {}, allowDiskUse: 1}");
+ ASSERT_NOT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
+}
+
+TEST(AggregationRequestTest, ShouldRejectNoCursorNoExplain) {
+ NamespaceString nss("a.collection");
+ const BSONObj inputBson = fromjson("{pipeline: [{$match: {a: 'abc'}}]}");
ASSERT_NOT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
}
@@ -185,27 +202,29 @@ TEST(AggregationRequestTest, ShouldRejectNonBoolAllowDiskUse) {
TEST(AggregationRequestTest, ShouldIgnoreFieldsPrefixedWithDollar) {
NamespaceString nss("a.collection");
- const BSONObj inputBson = fromjson("{pipeline: [{$match: {a: 'abc'}}], $unknown: 1}");
+ const BSONObj inputBson =
+ fromjson("{pipeline: [{$match: {a: 'abc'}}], cursor: {}, $unknown: 1}");
ASSERT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
}
TEST(AggregationRequestTest, ShouldIgnoreWriteConcernOption) {
NamespaceString nss("a.collection");
const BSONObj inputBson =
- fromjson("{pipeline: [{$match: {a: 'abc'}}], writeConcern: 'invalid'}");
+ fromjson("{pipeline: [{$match: {a: 'abc'}}], cursor: {}, writeConcern: 'invalid'}");
ASSERT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
}
TEST(AggregationRequestTest, ShouldIgnoreMaxTimeMsOption) {
NamespaceString nss("a.collection");
- const BSONObj inputBson = fromjson("{pipeline: [{$match: {a: 'abc'}}], maxTimeMS: 'invalid'}");
+ const BSONObj inputBson =
+ fromjson("{pipeline: [{$match: {a: 'abc'}}], cursor: {}, maxTimeMS: 'invalid'}");
ASSERT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
}
TEST(AggregationRequestTest, ShouldIgnoreReadConcernOption) {
NamespaceString nss("a.collection");
const BSONObj inputBson =
- fromjson("{pipeline: [{$match: {a: 'abc'}}], readConcern: 'invalid'}");
+ fromjson("{pipeline: [{$match: {a: 'abc'}}], cursor: {}, readConcern: 'invalid'}");
ASSERT_OK(AggregationRequest::parseFromBSON(nss, inputBson).getStatus());
}
diff --git a/src/mongo/db/pipeline/pipeline.cpp b/src/mongo/db/pipeline/pipeline.cpp
index 8bb745fba13..5c13f710b40 100644
--- a/src/mongo/db/pipeline/pipeline.cpp
+++ b/src/mongo/db/pipeline/pipeline.cpp
@@ -310,30 +310,6 @@ void Pipeline::stitch() {
}
}
-void Pipeline::run(BSONObjBuilder& result) {
- // We should not get here in the explain case.
- verify(!pCtx->isExplain);
-
- // the array in which the aggregation results reside
- // cant use subArrayStart() due to error handling
- BSONArrayBuilder resultArray;
- while (auto next = getNext()) {
- // Add the document to the result set.
- BSONObjBuilder documentBuilder(resultArray.subobjStart());
- next->toBson(&documentBuilder);
- documentBuilder.doneFast();
- // Object will be too large, assert. The extra 1KB is for headers.
- uassert(16389,
- str::stream() << "aggregation result exceeds maximum document size ("
- << BSONObjMaxUserSize / (1024 * 1024)
- << "MB)",
- resultArray.len() < BSONObjMaxUserSize - 1024);
- }
-
- resultArray.done();
- result.appendArray("result", resultArray.arr());
-}
-
boost::optional<Document> Pipeline::getNext() {
invariant(!_sources.empty());
auto nextResult = _sources.back()->getNext();
diff --git a/src/mongo/db/pipeline/pipeline.h b/src/mongo/db/pipeline/pipeline.h
index fe4ca1de424..3aceb6c28f0 100644
--- a/src/mongo/db/pipeline/pipeline.h
+++ b/src/mongo/db/pipeline/pipeline.h
@@ -139,13 +139,6 @@ public:
*/
std::vector<Value> serialize() const;
- /**
- Run the Pipeline on the given source.
-
- @param result builder to write the result to
- */
- void run(BSONObjBuilder& result);
-
/// The initial source is special since it varies between mongos and mongod.
void addInitialSource(boost::intrusive_ptr<DocumentSource> source);
diff --git a/src/mongo/db/query/count_request_test.cpp b/src/mongo/db/query/count_request_test.cpp
index 58835de9453..41cff25449f 100644
--- a/src/mongo/db/query/count_request_test.cpp
+++ b/src/mongo/db/query/count_request_test.cpp
@@ -222,7 +222,7 @@ TEST(CountRequest, ConvertToAggregationSucceeds) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(ar.getValue().isCursorCommand());
+ ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
@@ -243,7 +243,7 @@ TEST(CountRequest, ConvertToAggregationWithQueryAndFilterAndLimit) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(ar.getValue().isCursorCommand());
+ ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
@@ -267,7 +267,6 @@ TEST(CountRequest, ConvertToAggregationWithExplain) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
ASSERT(ar.getValue().isExplain());
- ASSERT(ar.getValue().isCursorCommand());
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
diff --git a/src/mongo/db/query/parsed_distinct_test.cpp b/src/mongo/db/query/parsed_distinct_test.cpp
index 60c993756a7..1a4f07adb88 100644
--- a/src/mongo/db/query/parsed_distinct_test.cpp
+++ b/src/mongo/db/query/parsed_distinct_test.cpp
@@ -61,7 +61,7 @@ TEST(ParsedDistinctTest, ConvertToAggregationNoQuery) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
ASSERT(!ar.getValue().isExplain());
- ASSERT(ar.getValue().isCursorCommand());
+ ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
@@ -92,7 +92,7 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithQuery) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
ASSERT(!ar.getValue().isExplain());
- ASSERT(ar.getValue().isCursorCommand());
+ ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
@@ -124,7 +124,6 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithExplain) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
ASSERT(ar.getValue().isExplain());
- ASSERT(ar.getValue().isCursorCommand());
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
diff --git a/src/mongo/db/query/query_request_test.cpp b/src/mongo/db/query/query_request_test.cpp
index 3ea93b88012..b8e45d810e4 100644
--- a/src/mongo/db/query/query_request_test.cpp
+++ b/src/mongo/db/query/query_request_test.cpp
@@ -1104,7 +1104,7 @@ TEST(QueryRequestTest, ConvertToAggregationSucceeds) {
ASSERT_OK(ar.getStatus());
ASSERT(!ar.getValue().isExplain());
ASSERT(ar.getValue().getPipeline().empty());
- ASSERT(ar.getValue().isCursorCommand());
+ ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
}
@@ -1119,7 +1119,6 @@ TEST(QueryRequestTest, ConvertToAggregationWithExplainSucceeds) {
ASSERT_OK(ar.getStatus());
ASSERT(ar.getValue().isExplain());
ASSERT(ar.getValue().getPipeline().empty());
- ASSERT(ar.getValue().isCursorCommand());
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
}
@@ -1242,7 +1241,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithPipeline) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
ASSERT(!ar.getValue().isExplain());
- ASSERT(ar.getValue().isCursorCommand());
+ ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
@@ -1266,7 +1265,6 @@ TEST(QueryRequestTest, ConvertToAggregationWithBatchSize) {
auto ar = AggregationRequest::parseFromBSON(testns, agg.getValue());
ASSERT_OK(ar.getStatus());
- ASSERT(ar.getValue().isCursorCommand());
ASSERT(!ar.getValue().isExplain());
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_EQ(ar.getValue().getBatchSize(), 4LL);
@@ -1286,7 +1284,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithMaxTimeMS) {
auto ar = AggregationRequest::parseFromBSON(testns, cmdObj);
ASSERT_OK(ar.getStatus());
ASSERT(!ar.getValue().isExplain());
- ASSERT(ar.getValue().isCursorCommand());
+ ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
}
@@ -1301,7 +1299,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithCollationSucceeds) {
ASSERT_OK(ar.getStatus());
ASSERT(!ar.getValue().isExplain());
ASSERT(ar.getValue().getPipeline().empty());
- ASSERT(ar.getValue().isCursorCommand());
+ ASSERT_EQ(ar.getValue().getBatchSize(), AggregationRequest::kDefaultBatchSize);
ASSERT_EQ(ar.getValue().getNamespaceString(), testns);
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSON("f" << 1));
}
diff --git a/src/mongo/db/views/resolved_view.cpp b/src/mongo/db/views/resolved_view.cpp
index d5f4faa7f68..c5d955a08d1 100644
--- a/src/mongo/db/views/resolved_view.cpp
+++ b/src/mongo/db/views/resolved_view.cpp
@@ -83,17 +83,12 @@ StatusWith<BSONObj> ResolvedView::asExpandedViewAggregation(
}
pipelineBuilder.doneFast();
- // The cursor option is always specified regardless of the presence of batchSize.
- if (request.getBatchSize()) {
- BSONObjBuilder batchSizeBuilder(aggregationBuilder.subobjStart("cursor"));
- batchSizeBuilder.append(AggregationRequest::kBatchSizeName, *request.getBatchSize());
- batchSizeBuilder.doneFast();
- } else {
- aggregationBuilder.append("cursor", BSONObj());
- }
-
if (request.isExplain()) {
aggregationBuilder.append("explain", true);
+ } else {
+ BSONObjBuilder batchSizeBuilder(aggregationBuilder.subobjStart("cursor"));
+ batchSizeBuilder.append(AggregationRequest::kBatchSizeName, request.getBatchSize());
+ batchSizeBuilder.doneFast();
}
if (request.shouldBypassDocumentValidation()) {
diff --git a/src/mongo/db/views/resolved_view_test.cpp b/src/mongo/db/views/resolved_view_test.cpp
index 2221704e69a..3f991868ccb 100644
--- a/src/mongo/db/views/resolved_view_test.cpp
+++ b/src/mongo/db/views/resolved_view_test.cpp
@@ -44,24 +44,29 @@ namespace {
const NamespaceString viewNss("testdb.testview");
const NamespaceString backingNss("testdb.testcoll");
const std::vector<BSONObj> emptyPipeline;
+const BSONObj kDefaultCursorOptionDocument =
+ BSON(AggregationRequest::kBatchSizeName << AggregationRequest::kDefaultBatchSize);
TEST(ResolvedViewTest, ExpandingCmdObjWithEmptyPipelineOnNoOpViewYieldsEmptyPipeline) {
const ResolvedView resolvedView{backingNss, emptyPipeline};
- BSONObj cmdObj = BSON("aggregate" << viewNss.coll() << "pipeline" << BSONArray());
+ BSONObj cmdObj =
+ BSON("aggregate" << viewNss.coll() << "pipeline" << BSONArray() << "cursor" << BSONObj());
auto result = resolvedView.asExpandedViewAggregation(cmdObj);
ASSERT_OK(result.getStatus());
- BSONObj expected = BSON(
- "aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor" << BSONObj());
+ BSONObj expected =
+ BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor"
+ << kDefaultCursorOptionDocument);
ASSERT_BSONOBJ_EQ(result.getValue(), expected);
}
TEST(ResolvedViewTest, ExpandingCmdObjWithNonemptyPipelineAppendsToViewPipeline) {
std::vector<BSONObj> viewPipeline{BSON("skip" << 7)};
const ResolvedView resolvedView{backingNss, viewPipeline};
- BSONObj cmdObj =
- BSON("aggregate" << viewNss.coll() << "pipeline" << BSON_ARRAY(BSON("limit" << 3)));
+ BSONObj cmdObj = BSON(
+ "aggregate" << viewNss.coll() << "pipeline" << BSON_ARRAY(BSON("limit" << 3)) << "cursor"
+ << BSONObj());
auto result = resolvedView.asExpandedViewAggregation(cmdObj);
ASSERT_OK(result.getStatus());
@@ -69,7 +74,7 @@ TEST(ResolvedViewTest, ExpandingCmdObjWithNonemptyPipelineAppendsToViewPipeline)
BSONObj expected = BSON("aggregate" << backingNss.coll() << "pipeline"
<< BSON_ARRAY(BSON("skip" << 7) << BSON("limit" << 3))
<< "cursor"
- << BSONObj());
+ << kDefaultCursorOptionDocument);
ASSERT_BSONOBJ_EQ(result.getValue(), expected);
}
@@ -86,8 +91,9 @@ TEST(ResolvedViewTest, ExpandingAggRequestWithEmptyPipelineOnNoOpViewYieldsEmpty
auto result = resolvedView.asExpandedViewAggregation(aggRequest);
ASSERT_OK(result.getStatus());
- BSONObj expected = BSON(
- "aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor" << BSONObj());
+ BSONObj expected =
+ BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor"
+ << kDefaultCursorOptionDocument);
ASSERT_BSONOBJ_EQ(result.getValue(), expected);
}
@@ -104,7 +110,7 @@ TEST(ResolvedViewTest, ExpandingAggRequestWithNonemptyPipelineAppendsToViewPipel
BSONObj expected = BSON("aggregate" << backingNss.coll() << "pipeline"
<< BSON_ARRAY(BSON("skip" << 7) << BSON("limit" << 3))
<< "cursor"
- << BSONObj());
+ << kDefaultCursorOptionDocument);
ASSERT_BSONOBJ_EQ(result.getValue(), expected);
}
@@ -117,9 +123,22 @@ TEST(ResolvedViewTest, ExpandingAggRequestPreservesExplain) {
ASSERT_OK(result.getStatus());
BSONObj expected =
- BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor" << BSONObj()
+ BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "explain" << true);
+ ASSERT_BSONOBJ_EQ(result.getValue(), expected);
+}
+
+TEST(ResolvedViewTest, ExpandingAggRequestWithCursorAndExplainOnlyPreservesExplain) {
+ const ResolvedView resolvedView{backingNss, emptyPipeline};
+ BSONObj cmdObj =
+ BSON("aggregate" << viewNss.coll() << "pipeline" << BSONArray() << "cursor" << BSONObj()
<< "explain"
<< true);
+
+ auto result = resolvedView.asExpandedViewAggregation(cmdObj);
+ ASSERT_OK(result.getStatus());
+
+ BSONObj expected =
+ BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "explain" << true);
ASSERT_BSONOBJ_EQ(result.getValue(), expected);
}
@@ -132,7 +151,8 @@ TEST(ResolvedViewTest, ExpandingAggRequestPreservesBypassDocumentValidation) {
ASSERT_OK(result.getStatus());
BSONObj expected =
- BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor" << BSONObj()
+ BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor"
+ << kDefaultCursorOptionDocument
<< "bypassDocumentValidation"
<< true);
ASSERT_BSONOBJ_EQ(result.getValue(), expected);
@@ -147,7 +167,8 @@ TEST(ResolvedViewTest, ExpandingAggRequestPreservesAllowDiskUse) {
ASSERT_OK(result.getStatus());
BSONObj expected =
- BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor" << BSONObj()
+ BSON("aggregate" << backingNss.coll() << "pipeline" << BSONArray() << "cursor"
+ << kDefaultCursorOptionDocument
<< "allowDiskUse"
<< true);
ASSERT_BSONOBJ_EQ(result.getValue(), expected);