summaryrefslogtreecommitdiff
path: root/jstests/aggregation
diff options
context:
space:
mode:
authorNick Zolnierz <nicholas.zolnierz@mongodb.com>2020-03-18 15:17:56 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-03-25 17:40:43 +0000
commitfe79c4ee1dfc8d49ae06c94a927267712b32b011 (patch)
treea5ddce887802b62fb028c5d707186ebb64effe66 /jstests/aggregation
parentb36c69c5930d25a8f5ae348a2b2fb24f27f925e6 (diff)
downloadmongo-fe79c4ee1dfc8d49ae06c94a927267712b32b011.tar.gz
SERVER-46700 Update tests in aggregation suite to avoid spawning mongod/sharded clusters
Diffstat (limited to 'jstests/aggregation')
-rw-r--r--jstests/aggregation/bugs/cursor_timeout.js129
-rw-r--r--jstests/aggregation/bugs/lookup_unwind_getmore.js105
-rw-r--r--jstests/aggregation/bugs/lookup_unwind_killcursor.js97
-rw-r--r--jstests/aggregation/bugs/server37750.js80
-rw-r--r--jstests/aggregation/bugs/server6179.js56
-rw-r--r--jstests/aggregation/bugs/server7781.js42
-rw-r--r--jstests/aggregation/mongos_merge.js511
-rw-r--r--jstests/aggregation/mongos_slaveok.js49
-rw-r--r--jstests/aggregation/pipeline_pass_through_from_mongos.js165
-rw-r--r--jstests/aggregation/shard_targeting.js402
-rw-r--r--jstests/aggregation/sharded_agg_cleanup_on_error.js142
-rw-r--r--jstests/aggregation/sources/addFields/use_cases.js78
-rw-r--r--jstests/aggregation/sources/addFields/weather.js88
-rw-r--r--jstests/aggregation/sources/collStats/shard_host_info.js58
-rw-r--r--jstests/aggregation/sources/facet/use_cases.js156
-rw-r--r--jstests/aggregation/sources/merge/exchange_explain.js174
-rw-r--r--jstests/aggregation/sources/merge/use_cases.js113
-rw-r--r--jstests/aggregation/sources/replaceRoot/address.js87
-rw-r--r--jstests/aggregation/testSlave.js22
19 files changed, 148 insertions, 2406 deletions
diff --git a/jstests/aggregation/bugs/cursor_timeout.js b/jstests/aggregation/bugs/cursor_timeout.js
deleted file mode 100644
index e9b80b0597f..00000000000
--- a/jstests/aggregation/bugs/cursor_timeout.js
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Tests that an aggregation cursor is killed when it is timed out by the ClientCursorMonitor.
- *
- * This test was designed to reproduce SERVER-25585.
- * @tags: [
- * requires_spawning_own_processes,
- * ]
- */
-(function() {
-'use strict';
-
-// Cursor timeout on mongod is handled by a single thread/timer that will sleep for
-// "clientCursorMonitorFrequencySecs" and add the sleep value to each operation's duration when
-// it wakes up, timing out those whose "now() - last accessed since" time exceeds. A cursor
-// timeout of 2 seconds with a monitor frequency of 1 second means an effective timeout period
-// of 1 to 2 seconds.
-const cursorTimeoutMs = 2000;
-const cursorMonitorFrequencySecs = 1;
-
-const options = {
- setParameter: {
- internalDocumentSourceCursorBatchSizeBytes: 1,
- // We use the "cursorTimeoutMillis" server parameter to decrease how long it takes for a
- // non-exhausted cursor to time out. We use the "clientCursorMonitorFrequencySecs"
- // server parameter to make the ClientCursorMonitor that cleans up the timed out cursors
- // run more often. The combination of these server parameters reduces the amount of time
- // we need to wait within this test.
- cursorTimeoutMillis: cursorTimeoutMs,
- clientCursorMonitorFrequencySecs: cursorMonitorFrequencySecs,
- }
-};
-const conn = MongoRunner.runMongod(options);
-assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
-
-const testDB = conn.getDB('test');
-
-// We use a batch size of 2 to ensure that the mongo shell does not exhaust the cursor on its
-// first batch.
-const batchSize = 2;
-const numMatches = 5;
-
-function assertCursorTimesOut(collName, pipeline) {
- const res = assert.commandWorked(testDB.runCommand({
- aggregate: collName,
- pipeline: pipeline,
- cursor: {
- batchSize: batchSize,
- },
- }));
-
- let serverStatus = assert.commandWorked(testDB.serverStatus());
- const expectedNumTimedOutCursors = serverStatus.metrics.cursor.timedOut + 1;
-
- const cursor = new DBCommandCursor(testDB, res, batchSize);
-
- // Wait until the idle cursor background job has killed the aggregation cursor.
- assert.soon(
- function() {
- serverStatus = assert.commandWorked(testDB.serverStatus());
- return +serverStatus.metrics.cursor.timedOut === expectedNumTimedOutCursors;
- },
- function() {
- return "aggregation cursor failed to time out: " + tojson(serverStatus.metrics.cursor);
- });
-
- assert.eq(0, serverStatus.metrics.cursor.open.total, tojson(serverStatus));
-
- // We attempt to exhaust the aggregation cursor to verify that sending a getMore returns an
- // error due to the cursor being killed.
- let err = assert.throws(function() {
- cursor.itcount();
- });
- assert.eq(ErrorCodes.CursorNotFound, err.code, tojson(err));
-}
-
-assert.commandWorked(testDB.source.insert({local: 1}));
-for (let i = 0; i < numMatches; ++i) {
- assert.commandWorked(testDB.dest.insert({foreign: 1}));
-}
-
-// Test that a regular aggregation cursor is killed when the timeout is reached.
-assertCursorTimesOut('dest', []);
-
-// Test that an aggregation cursor with a $lookup stage is killed when the timeout is reached.
-assertCursorTimesOut('source', [
- {
- $lookup: {
- from: 'dest',
- localField: 'local',
- foreignField: 'foreign',
- as: 'matches',
- }
- },
- {
- $unwind: "$matches",
- },
- ]);
-
-// Test that an aggregation cursor with nested $lookup stages is killed when the timeout is
-// reached.
-assertCursorTimesOut('source', [
- {
- $lookup: {
- from: 'dest',
- let : {local1: "$local"},
- pipeline: [
- {$match: {$expr: {$eq: ["$foreign", "$$local1"]}}},
- {
- $lookup: {
- from: 'source',
- let : {foreign1: "$foreign"},
- pipeline: [{$match: {$expr: {$eq: ["$local", "$$foreign1"]}}}],
- as: 'matches2'
- }
- },
- {
- $unwind: "$matches2",
- },
- ],
- as: 'matches1',
- }
- },
- {
- $unwind: "$matches1",
- },
- ]);
-
-MongoRunner.stopMongod(conn);
-})();
diff --git a/jstests/aggregation/bugs/lookup_unwind_getmore.js b/jstests/aggregation/bugs/lookup_unwind_getmore.js
deleted file mode 100644
index c184b5e56d6..00000000000
--- a/jstests/aggregation/bugs/lookup_unwind_getmore.js
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * Tests that the server correctly handles when the OperationContext used by the $lookup stage
- * changes as it unwinds the results.
- *
- * This test was designed to reproduce SERVER-22537.
- * @tags: [
- * requires_spawning_own_processes,
- * ]
- */
-(function() {
-'use strict';
-
-const options = {
- setParameter: 'internalDocumentSourceCursorBatchSizeBytes=1'
-};
-const conn = MongoRunner.runMongod(options);
-assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
-
-const testDB = conn.getDB('test');
-
-/**
- * Executes an aggregrate with 'options.pipeline' and confirms that 'options.numResults' were
- * returned.
- */
-function runTest(options) {
- // The batchSize must be smaller than the number of documents returned by the $lookup. This
- // ensures that the mongo shell will issue a getMore when unwinding the $lookup results for
- // the same document in the 'source' collection, under a different OperationContext.
- const batchSize = 2;
-
- testDB.source.drop();
- assert.commandWorked(testDB.source.insert({x: 1}));
-
- testDB.dest.drop();
- for (let i = 0; i < 5; ++i) {
- assert.commandWorked(testDB.dest.insert({x: 1}));
- }
-
- const res = assert.commandWorked(testDB.runCommand({
- aggregate: 'source',
- pipeline: options.pipeline,
- cursor: {
- batchSize: batchSize,
- },
- }));
-
- const cursor = new DBCommandCursor(testDB, res, batchSize);
- assert.eq(options.numResults, cursor.itcount());
-}
-
-runTest({
- pipeline: [
- {
- $lookup: {
- from: 'dest',
- localField: 'x',
- foreignField: 'x',
- as: 'matches',
- }
- },
- {
- $unwind: {
- path: '$matches',
- },
- },
- ],
- numResults: 5
- });
-
-runTest({
- pipeline: [
- {
- $lookup: {
- from: 'dest',
- let : {x1: "$x"},
- pipeline: [
- {$match: {$expr: {$eq: ["$$x1", "$x"]}}},
- {
- $lookup: {
- from: "dest",
- as: "matches2",
- let : {x2: "$x"},
- pipeline: [{$match: {$expr: {$eq: ["$$x2", "$x"]}}}]
- }
- },
- {
- $unwind: {
- path: '$matches2',
- },
- },
- ],
- as: 'matches1',
- }
- },
- {
- $unwind: {
- path: '$matches1',
- },
- },
- ],
- numResults: 25
- });
-
-MongoRunner.stopMongod(conn);
-})();
diff --git a/jstests/aggregation/bugs/lookup_unwind_killcursor.js b/jstests/aggregation/bugs/lookup_unwind_killcursor.js
deleted file mode 100644
index 829f3b52089..00000000000
--- a/jstests/aggregation/bugs/lookup_unwind_killcursor.js
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Tests that the cursor underlying the $lookup stage is killed when the cursor returned to the
- * client for the aggregation pipeline is killed.
- *
- * This test was designed to reproduce SERVER-24386.
- * @tags: [
- * requires_spawning_own_processes,
- * ]
- */
-(function() {
-'use strict';
-
-const options = {
- setParameter: 'internalDocumentSourceCursorBatchSizeBytes=1'
-};
-const conn = MongoRunner.runMongod(options);
-assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
-
-const testDB = conn.getDB('test');
-
-function runTest(pipeline) {
- // We use a batch size of 2 to ensure that the mongo shell does not exhaust the cursor on
- // its first batch.
- const batchSize = 2;
-
- testDB.source.drop();
- assert.commandWorked(testDB.source.insert({x: 1}));
-
- testDB.dest.drop();
- for (let i = 0; i < 5; ++i) {
- assert.commandWorked(testDB.dest.insert({x: 1}));
- }
-
- const res = assert.commandWorked(testDB.runCommand({
- aggregate: 'source',
- pipeline: pipeline,
- cursor: {
- batchSize: batchSize,
- },
- }));
-
- const cursor = new DBCommandCursor(testDB, res, batchSize);
- cursor.close(); // Closing the cursor will issue the "killCursors" command.
-
- const serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- assert.eq(0, serverStatus.metrics.cursor.open.total, tojson(serverStatus.metrics.cursor));
-}
-
-runTest([
- {
- $lookup: {
- from: 'dest',
- localField: 'x',
- foreignField: 'x',
- as: 'matches',
- }
- },
- {
- $unwind: {
- path: '$matches',
- },
- },
- ]);
-
-runTest([
- {
- $lookup: {
- from: 'dest',
- let : {x1: "$x"},
- pipeline: [
- {$match: {$expr: {$eq: ["$$x1", "$x"]}}},
- {
- $lookup: {
- from: "dest",
- as: "matches2",
- let : {x2: "$x"},
- pipeline: [{$match: {$expr: {$eq: ["$$x2", "$x"]}}}]
- }
- },
- {
- $unwind: {
- path: '$matches2',
- },
- },
- ],
- as: 'matches1',
- }
- },
- {
- $unwind: {
- path: '$matches1',
- },
- },
- ]);
-
-MongoRunner.stopMongod(conn);
-})();
diff --git a/jstests/aggregation/bugs/server37750.js b/jstests/aggregation/bugs/server37750.js
deleted file mode 100644
index 902c427c292..00000000000
--- a/jstests/aggregation/bugs/server37750.js
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Confirms that a sharded $sample which employs the DSSampleFromRandomCursor optimization is
- * capable of yielding.
- *
- * @tags: [assumes_read_concern_unchanged, do_not_wrap_aggregations_in_facets, requires_journaling,
- * requires_sharding]
- */
-(function() {
-"use strict";
-
-load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-
-// Set up a 2-shard cluster. Configure 'internalQueryExecYieldIterations' on both shards such
-// that operations will yield on each PlanExecuter iteration.
-const st = new ShardingTest({
- name: jsTestName(),
- shards: 2,
- rs: {nodes: 1, setParameter: {internalQueryExecYieldIterations: 1}}
-});
-
-const mongosDB = st.s.getDB(jsTestName());
-const mongosColl = mongosDB.test;
-
-// Shard the test collection, split it at {_id: 0}, and move the upper chunk to shard1.
-st.shardColl(mongosColl, {_id: 1}, {_id: 0}, {_id: 0});
-
-// Insert enough documents on each shard to induce the $sample random-cursor optimization.
-for (let i = (-150); i < 150; ++i) {
- assert.commandWorked(mongosColl.insert({_id: i}));
-}
-
-// Run the initial aggregate for the $sample stage.
-const cmdRes = assert.commandWorked(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [{$sample: {size: 3}}],
- comment: "$sample random",
- cursor: {batchSize: 0}
-}));
-assert.eq(cmdRes.cursor.firstBatch.length, 0);
-
-// Force each shard to hang on yield to allow for currentOp capture.
-FixtureHelpers.runCommandOnEachPrimary({
- db: mongosDB.getSiblingDB("admin"),
- cmdObj: {
- configureFailPoint: "setYieldAllLocksHang",
- mode: "alwaysOn",
- data: {namespace: mongosColl.getFullName()}
- }
-});
-
-// Run $currentOp to confirm that the $sample getMore yields on both shards.
-const awaitShell = startParallelShell(() => {
- load("jstests/libs/fixture_helpers.js");
- assert.soon(() => db.getSiblingDB("admin")
- .aggregate([
- {$currentOp: {}},
- {
- $match: {
- "cursor.originatingCommand.comment": "$sample random",
- planSummary: "QUEUED_DATA, MULTI_ITERATOR",
- numYields: {$gt: 0}
- }
- }
- ])
- .itcount() === 2);
- // Release the failpoint and allow the getMores to complete.
- FixtureHelpers.runCommandOnEachPrimary({
- db: db.getSiblingDB("admin"),
- cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "off"}
- });
-}, mongosDB.getMongo().port);
-
-// Retrieve the results for the $sample aggregation.
-const sampleCursor = new DBCommandCursor(mongosDB, cmdRes);
-assert.eq(sampleCursor.toArray().length, 3);
-
-// Confirm that the parallel shell completes successfully, and tear down the cluster.
-awaitShell();
-st.stop();
-})(); \ No newline at end of file
diff --git a/jstests/aggregation/bugs/server6179.js b/jstests/aggregation/bugs/server6179.js
deleted file mode 100644
index 065f5b261ee..00000000000
--- a/jstests/aggregation/bugs/server6179.js
+++ /dev/null
@@ -1,56 +0,0 @@
-// SERVER-6179: support for two $groups in sharded agg
-// @tags: [
-// requires_sharding,
-// requires_spawning_own_processes,
-// ]
-(function() {
-'use strict';
-
-var s = new ShardingTest({shards: 2});
-
-assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
-s.ensurePrimaryShard('test', s.shard1.shardName);
-assert.commandWorked(s.s0.adminCommand({shardcollection: "test.data", key: {_id: 1}}));
-
-var d = s.getDB("test");
-
-// Insert _id values 0 - 99
-var N = 100;
-
-var bulkOp = d.data.initializeOrderedBulkOp();
-for (var i = 0; i < N; ++i) {
- bulkOp.insert({_id: i, i: i % 10});
-}
-bulkOp.execute();
-
-// Split the data into 3 chunks
-assert.commandWorked(s.s0.adminCommand({split: "test.data", middle: {_id: 33}}));
-assert.commandWorked(s.s0.adminCommand({split: "test.data", middle: {_id: 66}}));
-
-// Migrate the middle chunk to another shard
-assert.commandWorked(s.s0.adminCommand(
- {movechunk: "test.data", find: {_id: 50}, to: s.getOther(s.getPrimaryShard("test")).name}));
-
-// Check that we get results rather than an error
-var result = d.data
- .aggregate({$group: {_id: '$_id', i: {$first: '$i'}}},
- {$group: {_id: '$i', avg_id: {$avg: '$_id'}}},
- {$sort: {_id: 1}})
- .toArray();
-var expected = [
- {"_id": 0, "avg_id": 45},
- {"_id": 1, "avg_id": 46},
- {"_id": 2, "avg_id": 47},
- {"_id": 3, "avg_id": 48},
- {"_id": 4, "avg_id": 49},
- {"_id": 5, "avg_id": 50},
- {"_id": 6, "avg_id": 51},
- {"_id": 7, "avg_id": 52},
- {"_id": 8, "avg_id": 53},
- {"_id": 9, "avg_id": 54}
-];
-
-assert.eq(result, expected);
-
-s.stop();
-})();
diff --git a/jstests/aggregation/bugs/server7781.js b/jstests/aggregation/bugs/server7781.js
index d70662a9d25..462beebe5fd 100644
--- a/jstests/aggregation/bugs/server7781.js
+++ b/jstests/aggregation/bugs/server7781.js
@@ -1,8 +1,4 @@
// SERVER-7781 $geoNear pipeline stage
-// @tags: [
-// requires_sharding,
-// requires_spawning_own_processes,
-// ]
(function() {
'use strict';
@@ -55,32 +51,9 @@ function testGeoNearStageOutput({geoNearSpec, limit, batchSize}) {
// We use this to generate points. Using a single global to avoid reseting RNG in each pass.
var pointMaker = new GeoNearRandomTest(coll);
-function test(db, sharded, indexType) {
+function test(db, indexType) {
db[coll].drop();
- if (sharded) { // sharded setup
- var shards = [];
- var config = db.getSiblingDB("config");
- config.shards.find().forEach(function(shard) {
- shards.push(shard._id);
- });
-
- assert.commandWorked(
- db.adminCommand({shardCollection: db[coll].getFullName(), key: {rand: 1}}));
- for (var i = 1; i < 10; i++) {
- // split at 0.1, 0.2, ... 0.9
- assert.commandWorked(
- db.adminCommand({split: db[coll].getFullName(), middle: {rand: i / 10}}));
- db.adminCommand({
- moveChunk: db[coll].getFullName(),
- find: {rand: i / 10},
- to: shards[i % shards.length]
- });
- }
-
- assert.eq(config.chunks.count({'ns': db[coll].getFullName()}), 10);
- }
-
// insert points
var numPts = 10 * 1000;
var bulk = db[coll].initializeUnorderedBulkOp();
@@ -117,15 +90,6 @@ function test(db, sharded, indexType) {
});
}
-test(db, false, '2d');
-test(db, false, '2dsphere');
-
-var sharded = new ShardingTest({shards: 3, mongos: 1});
-assert.commandWorked(sharded.s0.adminCommand({enablesharding: "test"}));
-sharded.ensurePrimaryShard('test', sharded.shard1.shardName);
-
-test(sharded.getDB('test'), true, '2d');
-test(sharded.getDB('test'), true, '2dsphere');
-
-sharded.stop();
+test(db, '2d');
+test(db, '2dsphere');
})();
diff --git a/jstests/aggregation/mongos_merge.js b/jstests/aggregation/mongos_merge.js
deleted file mode 100644
index e99dd77eb02..00000000000
--- a/jstests/aggregation/mongos_merge.js
+++ /dev/null
@@ -1,511 +0,0 @@
-/**
- * Tests that split aggregations whose merge pipelines are eligible to run on mongoS do so, and
- * produce the expected results. Stages which are eligible to merge on mongoS include:
- *
- * - Splitpoints whose merge components are non-blocking, e.g. $skip, $limit, $sort, $sample.
- * - Non-splittable streaming stages, e.g. $match, $project, $unwind.
- * - Blocking stages in cases where 'allowDiskUse' is false, e.g. $group, $bucketAuto.
- *
- * Because wrapping these aggregations in a $facet stage will affect how the pipeline can be merged,
- * and will therefore invalidate the results of the test cases below, we tag this test to prevent it
- * running under the 'aggregation_facet_unwind' passthrough.
- *
- * @tags: [
- * do_not_wrap_aggregations_in_facets,
- * requires_sharding,
- * requires_spawning_own_processes,
- * requires_profiling,
- * ]
- */
-
-(function() {
-load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions.
-load('jstests/libs/geo_near_random.js'); // For GeoNearRandomTest.
-load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
-load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
-
-const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
-
-const mongosDB = st.s0.getDB(jsTestName());
-const mongosColl = mongosDB[jsTestName()];
-const unshardedColl = mongosDB[jsTestName() + "_unsharded"];
-
-const shard0DB = primaryShardDB = st.shard0.getDB(jsTestName());
-const shard1DB = st.shard1.getDB(jsTestName());
-
-assert.commandWorked(mongosDB.dropDatabase());
-
-// Always merge pipelines which cannot merge on mongoS on the primary shard instead, so we know
-// where to check for $mergeCursors.
-assert.commandWorked(
- mongosDB.adminCommand({setParameter: 1, internalQueryAlwaysMergeOnPrimaryShard: true}));
-
-// Enable sharding on the test DB and ensure its primary is shard0.
-assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
-st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
-
-// Shard the test collection on _id.
-assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
-// We will need to test $geoNear on this collection, so create a 2dsphere index.
-assert.commandWorked(mongosColl.createIndex({geo: "2dsphere"}));
-
-// We will test that $textScore metadata is not propagated to the user, so create a text index.
-assert.commandWorked(mongosColl.createIndex({text: "text"}));
-
-// Split the collection into 4 chunks: [MinKey, -100), [-100, 0), [0, 100), [100, MaxKey).
-assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: -100}}));
-assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
-assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 100}}));
-
-// Move the [0, 100) and [100, MaxKey) chunks to shard1.
-assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 50}, to: st.shard1.shardName}));
-assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 150}, to: st.shard1.shardName}));
-
-// Create a random geo co-ord generator for testing.
-var georng = new GeoNearRandomTest(mongosColl);
-
-// Write 400 documents across the 4 chunks.
-for (let i = -200; i < 200; i++) {
- assert.commandWorked(mongosColl.insert(
- {_id: i, a: [i], b: {redactThisDoc: true}, c: true, geo: georng.mkPt(), text: "txt"}));
- assert.commandWorked(unshardedColl.insert({_id: i, x: i}));
-}
-
-let testNameHistory = new Set();
-
-// Clears system.profile and restarts the profiler on the primary shard. We enable profiling to
-// verify that no $mergeCursors occur during tests where we expect the merge to run on mongoS.
-function startProfiling() {
- assert.commandWorked(primaryShardDB.setProfilingLevel(0));
- primaryShardDB.system.profile.drop();
- assert.commandWorked(primaryShardDB.setProfilingLevel(2));
-}
-
-/**
- * Runs the aggregation specified by 'pipeline', verifying that:
- * - The number of documents returned by the aggregation matches 'expectedCount'.
- * - The merge was performed on a mongoS if 'mergeType' is 'mongos', and on a shard otherwise.
- */
-function assertMergeBehaviour(
- {testName, pipeline, mergeType, batchSize, allowDiskUse, expectedCount}) {
- // Ensure that this test has a unique name.
- assert(!testNameHistory.has(testName));
- testNameHistory.add(testName);
-
- // Create the aggregation options from the given arguments.
- const opts = {
- comment: testName,
- cursor: (batchSize ? {batchSize: batchSize} : {}),
- };
-
- if (allowDiskUse !== undefined) {
- opts.allowDiskUse = allowDiskUse;
- }
-
- // Verify that the explain() output's 'mergeType' field matches our expectation.
- assert.eq(
- assert.commandWorked(mongosColl.explain().aggregate(pipeline, Object.extend({}, opts)))
- .mergeType,
- mergeType);
-
- // Verify that the aggregation returns the expected number of results.
- assert.eq(mongosColl.aggregate(pipeline, opts).itcount(), expectedCount);
-
- // Verify that a $mergeCursors aggregation ran on the primary shard if 'mergeType' is not
- // 'mongos', and that no such aggregation ran otherwise.
- profilerHasNumMatchingEntriesOrThrow({
- profileDB: primaryShardDB,
- numExpectedMatches: (mergeType === "mongos" ? 0 : 1),
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: 1}
- }
- });
-}
-
-/**
- * Throws an assertion if the aggregation specified by 'pipeline' does not produce
- * 'expectedCount' results, or if the merge phase is not performed on the mongoS.
- */
-function assertMergeOnMongoS({testName, pipeline, batchSize, allowDiskUse, expectedCount}) {
- assertMergeBehaviour({
- testName: testName,
- pipeline: pipeline,
- mergeType: "mongos",
- batchSize: (batchSize || 10),
- allowDiskUse: allowDiskUse,
- expectedCount: expectedCount
- });
-}
-
-/**
- * Throws an assertion if the aggregation specified by 'pipeline' does not produce
- * 'expectedCount' results, or if the merge phase was not performed on a shard.
- */
-function assertMergeOnMongoD(
- {testName, pipeline, mergeType, batchSize, allowDiskUse, expectedCount}) {
- assertMergeBehaviour({
- testName: testName,
- pipeline: pipeline,
- mergeType: (mergeType || "anyShard"),
- batchSize: (batchSize || 10),
- allowDiskUse: allowDiskUse,
- expectedCount: expectedCount
- });
-}
-
-/**
- * Runs a series of test cases which will consistently merge on mongoS or mongoD regardless of
- * whether 'allowDiskUse' is true, false or omitted.
- */
-function runTestCasesWhoseMergeLocationIsConsistentRegardlessOfAllowDiskUse(allowDiskUse) {
- // Test that a $match pipeline with an empty merge stage is merged on mongoS.
- assertMergeOnMongoS({
- testName: "agg_mongos_merge_match_only",
- pipeline: [{$match: {_id: {$gte: -200, $lte: 200}}}],
- allowDiskUse: allowDiskUse,
- expectedCount: 400
- });
-
- // Test that a $sort stage which merges pre-sorted streams is run on mongoS.
- assertMergeOnMongoS({
- testName: "agg_mongos_merge_sort_presorted",
- pipeline: [{$match: {_id: {$gte: -200, $lte: 200}}}, {$sort: {_id: -1}}],
- allowDiskUse: allowDiskUse,
- expectedCount: 400
- });
-
- // Test that $skip is merged on mongoS.
- assertMergeOnMongoS({
- testName: "agg_mongos_merge_skip",
- pipeline: [{$match: {_id: {$gte: -200, $lte: 200}}}, {$sort: {_id: -1}}, {$skip: 300}],
- allowDiskUse: allowDiskUse,
- expectedCount: 100
- });
-
- // Test that $limit is merged on mongoS.
- assertMergeOnMongoS({
- testName: "agg_mongos_merge_limit",
- pipeline: [{$match: {_id: {$gte: -200, $lte: 200}}}, {$limit: 300}],
- allowDiskUse: allowDiskUse,
- expectedCount: 300
- });
-
- // Test that $sample is merged on mongoS if it is the splitpoint, since this will result in
- // a merging $sort of presorted streams in the merge pipeline.
- assertMergeOnMongoS({
- testName: "agg_mongos_merge_sample_splitpoint",
- pipeline: [{$match: {_id: {$gte: -200, $lte: 200}}}, {$sample: {size: 300}}],
- allowDiskUse: allowDiskUse,
- expectedCount: 300
- });
-
- // Test that $geoNear is merged on mongoS.
- assertMergeOnMongoS({
- testName: "agg_mongos_merge_geo_near",
- pipeline:
- [{$geoNear: {near: [0, 0], distanceField: "distance", spherical: true}}, {$limit: 300}],
- allowDiskUse: allowDiskUse,
- expectedCount: 300
- });
-
- // Test that $facet is merged on mongoS if all pipelines are mongoS-mergeable regardless of
- // 'allowDiskUse'.
- assertMergeOnMongoS({
- testName: "agg_mongos_merge_facet_all_pipes_eligible_for_mongos",
- pipeline: [
- {$match: {_id: {$gte: -200, $lte: 200}}},
- {
- $facet: {
- pipe1: [{$match: {_id: {$gt: 0}}}, {$skip: 10}, {$limit: 150}],
- pipe2: [{$match: {_id: {$lt: 0}}}, {$project: {_id: 0, a: 1}}]
- }
- }
- ],
- allowDiskUse: allowDiskUse,
- expectedCount: 1
- });
-
- // Test that $facet is merged on mongoD if any pipeline requires a primary shard merge,
- // regardless of 'allowDiskUse'.
- assertMergeOnMongoD({
- testName: "agg_mongos_merge_facet_pipe_needs_primary_shard_disk_use_" + allowDiskUse,
- pipeline: [
- {$match: {_id: {$gte: -200, $lte: 200}}},
- {
- $facet: {
- pipe1: [{$match: {_id: {$gt: 0}}}, {$skip: 10}, {$limit: 150}],
- pipe2: [
- {$match: {_id: {$lt: 0}}},
- {
- $lookup: {
- from: unshardedColl.getName(),
- localField: "_id",
- foreignField: "_id",
- as: "lookupField"
- }
- }
- ]
- }
- }
- ],
- mergeType: "primaryShard",
- allowDiskUse: allowDiskUse,
- expectedCount: 1
- });
-
- // Test that a pipeline whose merging half can be run on mongos using only the mongos
- // execution machinery returns the correct results.
- // TODO SERVER-30882 Find a way to assert that all stages get absorbed by mongos.
- assertMergeOnMongoS({
- testName: "agg_mongos_merge_all_mongos_runnable_skip_and_limit_stages",
- pipeline: [
- {$match: {_id: {$gte: -200, $lte: 200}}},
- {$sort: {_id: -1}},
- {$skip: 150},
- {$limit: 150},
- {$skip: 5},
- {$limit: 1},
- ],
- allowDiskUse: allowDiskUse,
- expectedCount: 1
- });
-
- // Test that a merge pipeline which needs to run on a shard is NOT merged on mongoS
- // regardless of 'allowDiskUse'.
- assertMergeOnMongoD({
- testName: "agg_mongos_merge_primary_shard_disk_use_" + allowDiskUse,
- pipeline: [
- {$match: {_id: {$gte: -200, $lte: 200}}},
- {$_internalSplitPipeline: {mergeType: "anyShard"}}
- ],
- mergeType: "anyShard",
- allowDiskUse: allowDiskUse,
- expectedCount: 400
- });
-
- // Allow sharded $lookup.
- setParameterOnAllHosts(
- DiscoverTopology.findNonConfigNodes(st.s), "internalQueryAllowShardedLookup", true);
-
- // Test that $lookup is merged on the primary shard when the foreign collection is
- // unsharded.
- assertMergeOnMongoD({
- testName: "agg_mongos_merge_lookup_unsharded_disk_use_" + allowDiskUse,
- pipeline: [
- {$match: {_id: {$gte: -200, $lte: 200}}},
- {
- $lookup: {
- from: unshardedColl.getName(),
- localField: "_id",
- foreignField: "_id",
- as: "lookupField"
- }
- }
- ],
- mergeType: "primaryShard",
- allowDiskUse: allowDiskUse,
- expectedCount: 400
- });
-
- // Test that $lookup is merged on mongoS when the foreign collection is sharded.
- assertMergeOnMongoS({
- testName: "agg_mongos_merge_lookup_sharded_disk_use_" + allowDiskUse,
- pipeline: [
- {$match: {_id: {$gte: -200, $lte: 200}}},
- {
- $lookup: {
- from: mongosColl.getName(),
- localField: "_id",
- foreignField: "_id",
- as: "lookupField"
- }
- }
- ],
- mergeType: "mongos",
- allowDiskUse: allowDiskUse,
- expectedCount: 400
- });
-
- // Disable sharded $lookup.
- setParameterOnAllHosts(
- DiscoverTopology.findNonConfigNodes(st.s), "internalQueryAllowShardedLookup", false);
-}
-
-/**
- * Runs a series of test cases which will always merge on mongoD when 'allowDiskUse' is true,
- * and on mongoS when 'allowDiskUse' is false or omitted.
- */
-function runTestCasesWhoseMergeLocationDependsOnAllowDiskUse(allowDiskUse) {
- // All test cases should merge on mongoD if allowDiskUse is true, mongoS otherwise.
- const assertMergeOnMongoX = (allowDiskUse ? assertMergeOnMongoD : assertMergeOnMongoS);
-
- // Test that a blocking $sort is only merged on mongoS if 'allowDiskUse' is not set.
- assertMergeOnMongoX({
- testName: "agg_mongos_merge_blocking_sort_no_disk_use",
- pipeline: [{$match: {_id: {$gte: -200, $lte: 200}}}, {$sort: {_id: -1}}, {$sort: {a: 1}}],
- allowDiskUse: allowDiskUse,
- expectedCount: 400
- });
-
- // Test that $group is only merged on mongoS if 'allowDiskUse' is not set.
- assertMergeOnMongoX({
- testName: "agg_mongos_merge_group_allow_disk_use",
- pipeline:
- [{$match: {_id: {$gte: -200, $lte: 200}}}, {$group: {_id: {$mod: ["$_id", 150]}}}],
- allowDiskUse: allowDiskUse,
- expectedCount: 299
- });
-
- // Test that a blocking $sample is only merged on mongoS if 'allowDiskUse' is not set.
- assertMergeOnMongoX({
- testName: "agg_mongos_merge_blocking_sample_allow_disk_use",
- pipeline: [
- {$match: {_id: {$gte: -200, $lte: 200}}},
- {$sample: {size: 300}},
- {$sample: {size: 200}}
- ],
- allowDiskUse: allowDiskUse,
- expectedCount: 200
- });
-
- // Test that $facet is only merged on mongoS if all pipelines are mongoS-mergeable when
- // 'allowDiskUse' is not set.
- assertMergeOnMongoX({
- testName: "agg_mongos_merge_facet_allow_disk_use",
- pipeline: [
- {$match: {_id: {$gte: -200, $lte: 200}}},
- {
- $facet: {
- pipe1: [{$match: {_id: {$gt: 0}}}, {$skip: 10}, {$limit: 150}],
- pipe2: [{$match: {_id: {$lt: 0}}}, {$sort: {a: -1}}]
- }
- }
- ],
- allowDiskUse: allowDiskUse,
- expectedCount: 1
- });
-
- // Test that $bucketAuto is only merged on mongoS if 'allowDiskUse' is not set.
- assertMergeOnMongoX({
- testName: "agg_mongos_merge_bucket_auto_allow_disk_use",
- pipeline: [
- {$match: {_id: {$gte: -200, $lte: 200}}},
- {$bucketAuto: {groupBy: "$_id", buckets: 10}}
- ],
- allowDiskUse: allowDiskUse,
- expectedCount: 10
- });
-
- //
- // Test composite stages.
- //
-
- // Test that $bucket ($group->$sort) is merged on mongoS iff 'allowDiskUse' is not set.
- assertMergeOnMongoX({
- testName: "agg_mongos_merge_bucket_allow_disk_use",
- pipeline: [
- {$match: {_id: {$gte: -200, $lte: 200}}},
- {$bucket: {groupBy: "$_id", boundaries: [-200, -150, -100, -50, 0, 50, 100, 150, 200]}}
- ],
- allowDiskUse: allowDiskUse,
- expectedCount: 8
- });
-
- // Test that $sortByCount ($group->$sort) is merged on mongoS iff 'allowDiskUse' isn't set.
- assertMergeOnMongoX({
- testName: "agg_mongos_merge_sort_by_count_allow_disk_use",
- pipeline: [{$match: {_id: {$gte: -200, $lte: 200}}}, {$sortByCount: {$mod: ["$_id", 150]}}],
- allowDiskUse: allowDiskUse,
- expectedCount: 299
- });
-
- // Test that $count ($group->$project) is merged on mongoS iff 'allowDiskUse' is not set.
- assertMergeOnMongoX({
- testName: "agg_mongos_merge_count_allow_disk_use",
- pipeline: [{$match: {_id: {$gte: -150, $lte: 1500}}}, {$count: "doc_count"}],
- allowDiskUse: allowDiskUse,
- expectedCount: 1
- });
-}
-
-// Run all test cases for each potential value of 'allowDiskUse'.
-for (let allowDiskUse of [false, undefined, true]) {
- // Reset the profiler and clear the list of tests that ran on the previous iteration.
- testNameHistory.clear();
- startProfiling();
-
- // Run all test cases.
- runTestCasesWhoseMergeLocationIsConsistentRegardlessOfAllowDiskUse(allowDiskUse);
- runTestCasesWhoseMergeLocationDependsOnAllowDiskUse(allowDiskUse);
-}
-
-// Start a new profiling session before running the final few tests.
-startProfiling();
-
-// Test that merge pipelines containing all mongos-runnable stages produce the expected output.
-assertMergeOnMongoS({
- testName: "agg_mongos_merge_all_mongos_runnable_stages",
- pipeline: [
- {$geoNear: {near: [0, 0], distanceField: "distance", spherical: true}},
- {$sort: {a: 1}},
- {$skip: 150},
- {$limit: 150},
- {$addFields: {d: true}},
- {$unwind: "$a"},
- {$sample: {size: 100}},
- {$project: {c: 0, geo: 0, distance: 0}},
- {$group: {_id: "$_id", doc: {$push: "$$CURRENT"}}},
- {$unwind: "$doc"},
- {$replaceRoot: {newRoot: "$doc"}},
- {$facet: {facetPipe: [{$match: {_id: {$gte: -200, $lte: 200}}}]}},
- {$unwind: "$facetPipe"},
- {$replaceRoot: {newRoot: "$facetPipe"}},
- {
- $redact:
- {$cond: {if: {$eq: ["$redactThisDoc", true]}, then: "$$PRUNE", else: "$$DESCEND"}}
- },
- {
- $match: {
- _id: {$gte: -50, $lte: 100},
- a: {$type: "number", $gte: -50, $lte: 100},
- b: {$exists: false},
- c: {$exists: false},
- d: true,
- geo: {$exists: false},
- distance: {$exists: false},
- text: "txt"
- }
- }
- ],
- expectedCount: 100
-});
-
-// Test that metadata is not propagated to the user when a pipeline which produces metadata
-// fields merges on mongoS.
-const metaDataTests = [
- {pipeline: [{$sort: {_id: -1}}], verifyNoMetaData: (doc) => assert.isnull(doc.$sortKey)},
- {
- pipeline: [{$match: {$text: {$search: "txt"}}}],
- verifyNoMetaData: (doc) => assert.isnull(doc.$textScore)
- },
- {pipeline: [{$sample: {size: 300}}], verifyNoMetaData: (doc) => assert.isnull(doc.$randVal)},
- {
- pipeline: [{$match: {$text: {$search: "txt"}}}, {$sort: {text: 1}}],
- verifyNoMetaData: (doc) =>
- assert.docEq([doc.$textScore, doc.$sortKey], [undefined, undefined])
- }
-];
-
-for (let metaDataTest of metaDataTests) {
- assert.gte(mongosColl.aggregate(metaDataTest.pipeline).itcount(), 300);
- mongosColl.aggregate(metaDataTest.pipeline).forEach(metaDataTest.verifyNoMetaData);
-}
-
-st.stop();
-})();
diff --git a/jstests/aggregation/mongos_slaveok.js b/jstests/aggregation/mongos_slaveok.js
deleted file mode 100644
index f1c235266fc..00000000000
--- a/jstests/aggregation/mongos_slaveok.js
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Tests aggregate command against mongos with slaveOk. For more tests on read preference,
- * please refer to jstests/sharding/read_pref_cmd.js.
- * @tags: [
- * requires_replication,
- * requires_sharding,
- * requires_spawning_own_processes,
- * requires_profiling
- * ]
- */
-(function() {
-load('jstests/replsets/rslib.js');
-
-var NODES = 2;
-
-var doTest = function(st, doSharded) {
- var testDB = st.s.getDB('test');
-
- if (doSharded) {
- testDB.adminCommand({enableSharding: 'test'});
- testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
- }
-
- testDB.user.insert({x: 10}, {writeConcern: {w: NODES}});
- testDB.setSlaveOk(true);
-
- var secNode = st.rs0.getSecondary();
- secNode.getDB('test').setProfilingLevel(2);
-
- // wait for mongos to recognize that the slave is up
- awaitRSClientHosts(st.s, secNode, {ok: true});
-
- var res = testDB.runCommand({aggregate: 'user', pipeline: [{$project: {x: 1}}], cursor: {}});
- assert(res.ok, 'aggregate command failed: ' + tojson(res));
-
- var profileQuery = {op: 'command', ns: 'test.user', 'command.aggregate': 'user'};
- var profileDoc = secNode.getDB('test').system.profile.findOne(profileQuery);
-
- assert(profileDoc != null);
- testDB.dropDatabase();
-};
-
-var st = new ShardingTest({shards: {rs0: {oplogSize: 10, nodes: NODES}}});
-
-doTest(st, false);
-doTest(st, true);
-
-st.stop();
-})();
diff --git a/jstests/aggregation/pipeline_pass_through_from_mongos.js b/jstests/aggregation/pipeline_pass_through_from_mongos.js
deleted file mode 100644
index 46805d74372..00000000000
--- a/jstests/aggregation/pipeline_pass_through_from_mongos.js
+++ /dev/null
@@ -1,165 +0,0 @@
-/**
- * Tests to verify that the aggregation pipeline passthrough behaviour works as expected for stages
- * which have sub-pipelines, whose stages may have differing passthrough constraints. This test
- * exercises the fix for SERVER-41290.
- * @tags: [requires_sharding, requires_profiling]
- */
-(function() {
-'use strict';
-
-load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions.
-
-const st = new ShardingTest({shards: 2});
-const mongosDB = st.s0.getDB(jsTestName());
-assert.commandWorked(st.s0.adminCommand({enableSharding: jsTestName()}));
-st.ensurePrimaryShard(jsTestName(), st.shard0.shardName);
-const mongosColl = mongosDB.test;
-const primaryShard = st.shard0.getDB(jsTestName());
-const shard1DB = st.shard1.getDB(jsTestName());
-
-assert.commandWorked(primaryShard.setProfilingLevel(2));
-assert.commandWorked(shard1DB.setProfilingLevel(2));
-
-// Verify that the $lookup is passed through to the primary shard when all its sub-pipeline
-// stages can be passed through.
-let testName = "sub_pipeline_can_be_passed_through";
-assert.commandWorked(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [
- {$lookup: {pipeline: [{$match: {a: "val"}}], from: mongosDB.otherColl.getName(), as: "c"}}
- ],
- cursor: {},
- comment: testName
-}));
-profilerHasSingleMatchingEntryOrThrow({
- profileDB: primaryShard,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
-});
-profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
-});
-
-// Test to verify that the mongoS doesn't pass the pipeline through to the primary shard when
-// $lookup's sub-pipeline has one or more stages which don't allow passthrough. In this
-// sub-pipeline, the $merge stage is not allowed to pass through, which forces the pipeline to
-// be parsed on mongoS. Since $merge is not allowed within a $lookup, the command thus fails on
-// mongoS without ever reaching a shard. This test-case exercises the bug described in
-// SERVER-41290.
-const pipelineForLookup = [
- {
- $lookup: {
- pipeline: [{$match: {a: "val"}}, {$merge: {into: "merge_collection"}}],
- from: mongosDB.otherColl.getName(),
- as: "c",
- }
- },
- ];
-testName = "lookup_with_merge_cannot_be_passed_through";
-assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: pipelineForLookup,
- cursor: {},
- comment: testName
-}),
- 51047);
-profilerHasZeroMatchingEntriesOrThrow({
- profileDB: primaryShard,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
-});
-profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
-});
-
-// Same test as the above with another level of nested $lookup.
-const pipelineForNestedLookup = [{
- $lookup: {
- from: mongosDB.otherColl.getName(),
- as: "field",
- pipeline: [{
- $lookup: {
- pipeline: [{$match: {a: "val"}}, {$merge: {into: "merge_collection"}}],
- from: mongosDB.nested.getName(),
- as: "c",
- }
- }]
- }
- }];
-testName = "nested_lookup_with_merge_cannot_be_passed_through";
-assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: pipelineForNestedLookup,
- cursor: {},
- comment: testName
-}),
- 51047);
-profilerHasZeroMatchingEntriesOrThrow({
- profileDB: primaryShard,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
-});
-profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
-});
-
-// Test to verify that the mongoS doesn't pass the pipeline through to the primary shard when
-// one or more of $facet's sub-pipelines have one or more stages which don't allow passthrough.
-// In this sub-pipeline, the $merge stage is not allowed to pass through, which forces the
-// pipeline to be parsed on mongoS. Since $merge is not allowed within a $facet, the command
-// thus fails on mongoS without ever reaching a shard. This test-case exercises the bug
-// described in SERVER-41290.
-const pipelineForFacet = [
- {
- $facet: {
- field0: [{$match: {a: "val"}}],
- field1: [{$match: {a: "val"}}, {$merge: {into: "merge_collection"}}],
- }
- },
-];
-testName = "facet_with_merge_cannot_be_passed_through";
-assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: pipelineForFacet,
- cursor: {},
- comment: testName
-}),
- 40600);
-profilerHasZeroMatchingEntriesOrThrow({
- profileDB: primaryShard,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
-});
-profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
-});
-
-// Same test as the above with another level of nested $facet.
-const pipelineForNestedFacet = [
- {
- $facet: {
- field0: [{$match: {a: "val"}}],
- field1:
- [{$facet: {field2: [{$match: {a: "val"}}, {$merge: {into: "merge_collection"}}]}}],
- }
- },
-];
-testName = "facet_with_merge_cannot_be_passed_through";
-assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: pipelineForFacet,
- cursor: {},
- comment: testName
-}),
- 40600);
-profilerHasZeroMatchingEntriesOrThrow({
- profileDB: primaryShard,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
-});
-profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
-});
-
-st.stop();
-})();
diff --git a/jstests/aggregation/shard_targeting.js b/jstests/aggregation/shard_targeting.js
deleted file mode 100644
index 7a913f43967..00000000000
--- a/jstests/aggregation/shard_targeting.js
+++ /dev/null
@@ -1,402 +0,0 @@
-/**
- * Test that aggregations are sent directly to a single shard in the case where the data required by
- * the pipeline's initial query all resides on that shard, and that we correctly back out and
- * re-target in the event that a stale config exception is received.
- *
- * In particular:
- *
- * - If the data required by the aggregation all resides on a single shard (including multi-chunk
- * range $matches), send the entire pipeline to that shard and do not perform a $mergeCursors.
- * - In the case of a stage which requires a primary shard merge, do not split the pipeline or
- * generate a $mergeCursors if the data required by the aggregation all resides on the primary
- * shard.
- *
- * Because wrapping these aggregations in a $facet stage will affect how the pipeline is targeted,
- * and will therefore invalidate the results of the test cases below, we tag this test to prevent it
- * running under the 'aggregation_facet_unwind' passthrough.
- *
- * @tags: [
- * do_not_wrap_aggregations_in_facets,
- * requires_sharding,
- * requires_spawning_own_processes,
- * requires_profiling,
- * ]
- */
-(function() {
-load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions.
-
-const st = new ShardingTest({shards: 2, mongos: 2, config: 1});
-
-// mongosForAgg will be used to perform all aggregations.
-// mongosForMove does all chunk migrations, leaving mongosForAgg with stale config metadata.
-const mongosForAgg = st.s0;
-const mongosForMove = st.s1;
-
-const mongosDB = mongosForAgg.getDB(jsTestName());
-const mongosColl = mongosDB.test;
-
-const shard0DB = primaryShardDB = st.shard0.getDB(jsTestName());
-const shard1DB = st.shard1.getDB(jsTestName());
-
-// Turn off best-effort recipient metadata refresh post-migration commit on both shards because
-// it creates non-determinism for the profiler.
-assert.commandWorked(st.shard0.getDB('admin').runCommand(
- {configureFailPoint: 'doNotRefreshRecipientAfterCommit', mode: 'alwaysOn'}));
-assert.commandWorked(st.shard1.getDB('admin').runCommand(
- {configureFailPoint: 'doNotRefreshRecipientAfterCommit', mode: 'alwaysOn'}));
-
-// Turn off automatic shard refresh in mongos when a stale config error is thrown.
-assert.commandWorked(mongosForAgg.getDB('admin').runCommand(
- {configureFailPoint: 'doNotRefreshShardsOnRetargettingError', mode: 'alwaysOn'}));
-
-assert.commandWorked(mongosDB.dropDatabase());
-
-// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
-assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
-st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
-
-// Shard the test collection on _id.
-assert.commandWorked(
- mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
-
-// Split the collection into 4 chunks: [MinKey, -100), [-100, 0), [0, 100), [100, MaxKey).
-assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: -100}}));
-assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
-assert.commandWorked(mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 100}}));
-
-// Move the [0, 100) and [100, MaxKey) chunks to st.shard1.shardName.
-assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 50}, to: st.shard1.shardName}));
-assert.commandWorked(mongosDB.adminCommand(
- {moveChunk: mongosColl.getFullName(), find: {_id: 150}, to: st.shard1.shardName}));
-
-// Write one document into each of the chunks.
-assert.commandWorked(mongosColl.insert({_id: -150}));
-assert.commandWorked(mongosColl.insert({_id: -50}));
-assert.commandWorked(mongosColl.insert({_id: 50}));
-assert.commandWorked(mongosColl.insert({_id: 150}));
-
-const shardExceptions =
- [ErrorCodes.StaleConfig, ErrorCodes.StaleShardVersion, ErrorCodes.StaleEpoch];
-
-// Create an $_internalSplitPipeline stage that forces the merge to occur on the Primary shard.
-const forcePrimaryMerge = [{$_internalSplitPipeline: {mergeType: "primaryShard"}}];
-
-function runAggShardTargetTest({splitPoint}) {
- // Ensure that both mongoS have up-to-date caches, and enable the profiler on both shards.
- assert.commandWorked(mongosForAgg.getDB("admin").runCommand({flushRouterConfig: 1}));
- assert.commandWorked(mongosForMove.getDB("admin").runCommand({flushRouterConfig: 1}));
-
- assert.commandWorked(shard0DB.setProfilingLevel(2));
- assert.commandWorked(shard1DB.setProfilingLevel(2));
-
- //
- // Test cases.
- //
-
- let testName, outColl;
-
- // Test that a range query is passed through if the chunks encompassed by the query all lie
- // on a single shard, in this case st.shard0.shardName.
- testName = "agg_shard_targeting_range_single_shard_all_chunks_on_same_shard";
- assert.eq(mongosColl
- .aggregate([{$match: {_id: {$gte: -150, $lte: -50}}}].concat(splitPoint),
- {comment: testName})
- .itcount(),
- 2);
-
- // We expect one aggregation on shard0, none on shard1, and no $mergeCursors on shard0 (the
- // primary shard).
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard0DB,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {"command.aggregate": mongosColl.getName(), "command.comment": testName}
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: primaryShardDB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: 1}
- }
- });
-
- // Test that a range query with a stage that requires a primary shard merge ($out in this
- // case) is passed through if the chunk ranges encompassed by the query all lie on the
- // primary shard.
- testName = "agg_shard_targeting_range_all_chunks_on_primary_shard_out_no_merge";
- outColl = mongosDB[testName];
-
- assert.commandWorked(mongosDB.runCommand({
- aggregate: mongosColl.getName(),
- pipeline: [{$match: {_id: {$gte: -150, $lte: -50}}}].concat(splitPoint).concat([
- {$out: testName}
- ]),
- comment: testName,
- cursor: {}
- }));
-
- // We expect one aggregation on shard0, none on shard1, and no $mergeCursors on shard0 (the
- // primary shard). We expect some of these commands may fail with staleDBVersion and be
- // retried, so we will ignore those entries in the profiler.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard0DB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- errMsg: {$exists: false}
- }
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: shard1DB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- errMsg: {$exists: false}
- }
- });
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: primaryShardDB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: 1},
- errMsg: {$exists: false}
- }
- });
-
- // Verify that the contents of the $out collection are as expected.
- assert.eq(outColl.find().sort({_id: 1}).toArray(), [{_id: -150}, {_id: -50}]);
-
- // Test that a passthrough will back out and split the pipeline if we try to target a single
- // shard, get a stale config exception, and find that more than one shard is now involved.
- // Move the _id: [-100, 0) chunk from st.shard0.shardName to st.shard1.shardName via
- // mongosForMove.
- assert.commandWorked(mongosForMove.getDB("admin").runCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: -50},
- to: st.shard1.shardName,
- }));
-
- // Run the same aggregation that targeted a single shard via the now-stale mongoS. It should
- // attempt to send the aggregation to st.shard0.shardName, hit a stale config exception,
- // split the pipeline and redispatch. We append an $_internalSplitPipeline stage in order to
- // force a shard merge rather than a mongoS merge.
- testName = "agg_shard_targeting_backout_passthrough_and_split_if_cache_is_stale";
- assert.eq(mongosColl
- .aggregate([{$match: {_id: {$gte: -150, $lte: -50}}}]
- .concat(splitPoint)
- .concat(forcePrimaryMerge),
- {comment: testName})
- .itcount(),
- 2);
-
- // Before the first dispatch:
- // - mongosForMove and st.shard0.shardName (the donor shard) are up to date.
- // - mongosForAgg and st.shard1.shardName are stale. mongosForAgg incorrectly believes that
- // the necessary data is all on st.shard0.shardName.
- //
- // We therefore expect that:
- // - mongosForAgg will throw a stale config error when it attempts to establish a
- // single-shard cursor on st.shard0.shardName (attempt 1).
- // - mongosForAgg will back out, refresh itself, and redispatch to both shards.
- // - st.shard1.shardName will throw a stale config and refresh itself when the split
- // pipeline is sent to it (attempt 2).
- // - mongosForAgg will back out and redispatch (attempt 3).
- // - The aggregation will succeed on the third dispatch.
-
- // We confirm this behaviour via the following profiler results:
-
- // - One aggregation on st.shard0.shardName with a shard version exception (indicating that
- // the mongoS was stale).
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard0DB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: false},
- errCode: {$in: shardExceptions}
- }
- });
-
- // - One aggregation on st.shard1.shardName with a shard version exception (indicating that
- // the shard was stale).
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard1DB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: false},
- errCode: {$in: shardExceptions}
- }
- });
-
- // - At most two aggregations on st.shard0.shardName with no stale config exceptions. The
- // first, if present, is an aborted cursor created if the command reaches
- // st.shard0.shardName before st.shard1.shardName throws its stale config exception during
- // attempt 2. The second profiler entry is from the aggregation which succeeded.
- profilerHasAtLeastOneAtMostNumMatchingEntriesOrThrow({
- profileDB: shard0DB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: false},
- errCode: {$exists: false}
- },
- maxExpectedMatches: 2
- });
-
- // - One aggregation on st.shard1.shardName with no stale config exception.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard1DB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: false},
- errCode: {$exists: false}
- }
- });
-
- // - One $mergeCursors aggregation on primary st.shard0.shardName, since we eventually
- // target both shards after backing out the passthrough and splitting the pipeline.
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: primaryShardDB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: true}
- }
- });
-
- // Move the _id: [-100, 0) chunk back from st.shard1.shardName to st.shard0.shardName via
- // mongosForMove. Shard0 and mongosForAgg are now stale.
- assert.commandWorked(mongosForMove.getDB("admin").runCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: -50},
- to: st.shard0.shardName,
- _waitForDelete: true
- }));
-
- // Run the same aggregation via the now-stale mongoS. It should split the pipeline, hit a
- // stale config exception, and reset to the original single-shard pipeline upon refresh. We
- // append an $_internalSplitPipeline stage in order to force a shard merge rather than a
- // mongoS merge.
- testName = "agg_shard_targeting_backout_split_pipeline_and_reassemble_if_cache_is_stale";
- assert.eq(mongosColl
- .aggregate([{$match: {_id: {$gte: -150, $lte: -50}}}]
- .concat(splitPoint)
- .concat(forcePrimaryMerge),
- {comment: testName})
- .itcount(),
- 2);
-
- // Before the first dispatch:
- // - mongosForMove and st.shard1.shardName (the donor shard) are up to date.
- // - mongosForAgg and st.shard0.shardName are stale. mongosForAgg incorrectly believes that
- // the necessary data is spread across both shards.
- //
- // We therefore expect that:
- // - mongosForAgg will throw a stale config error when it attempts to establish a cursor on
- // st.shard1.shardName (attempt 1).
- // - mongosForAgg will back out, refresh itself, and redispatch to st.shard0.shardName.
- // - st.shard0.shardName will throw a stale config and refresh itself when the pipeline is
- // sent to it (attempt 2).
- // - mongosForAgg will back out, and redispatch (attempt 3).
- // - The aggregation will succeed on the third dispatch.
-
- // We confirm this behaviour via the following profiler results:
-
- // - One aggregation on st.shard1.shardName with a shard version exception (indicating that
- // the mongoS was stale).
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard1DB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: false},
- errCode: {$in: shardExceptions}
- }
- });
-
- // - One aggregation on st.shard0.shardName with a shard version exception (indicating that
- // the shard was stale).
- profilerHasSingleMatchingEntryOrThrow({
- profileDB: shard0DB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: false},
- errCode: {$in: shardExceptions}
- }
- });
-
- // - At most two aggregations on st.shard0.shardName with no stale config exceptions. The
- // first, if present, is an aborted cursor created if the command reaches
- // st.shard0.shardName before st.shard1.shardName throws its stale config exception during
- // attempt 1. The second profiler entry is the aggregation which succeeded.
- profilerHasAtLeastOneAtMostNumMatchingEntriesOrThrow({
- profileDB: shard0DB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: false},
- errCode: {$exists: false}
- },
- maxExpectedMatches: 2
- });
-
- // No $mergeCursors aggregation on primary st.shard0.shardName, since after backing out the
- // split pipeline we eventually target only st.shard0.shardName.
- profilerHasZeroMatchingEntriesOrThrow({
- profileDB: primaryShardDB,
- filter: {
- "command.aggregate": mongosColl.getName(),
- "command.comment": testName,
- "command.pipeline.$mergeCursors": {$exists: true}
- }
- });
-
- // Clean up the test run by dropping the $out collection and resetting the profiler.
- assert(outColl.drop());
-
- assert.commandWorked(shard0DB.setProfilingLevel(0));
- assert.commandWorked(shard1DB.setProfilingLevel(0));
-
- assert(shard0DB.system.profile.drop());
- assert(shard1DB.system.profile.drop());
-}
-
-// Run tests with a variety of splitpoints, testing the pipeline split and re-assembly logic in
-// cases where the merge pipeline is empty, where the split stage is moved from shard to merge
-// pipe ($facet, $lookup), and where there are both shard and merge versions of the split source
-// ($sort, $group, $limit). Each test case will ultimately produce the same output.
-runAggShardTargetTest({splitPoint: []});
-runAggShardTargetTest({splitPoint: [{$sort: {_id: 1}}]});
-runAggShardTargetTest({splitPoint: [{$group: {_id: "$_id"}}]});
-runAggShardTargetTest({splitPoint: [{$limit: 4}]});
-runAggShardTargetTest({
- splitPoint: [
- {$facet: {facetPipe: [{$match: {_id: {$gt: MinKey}}}]}},
- {$unwind: "$facetPipe"},
- {$replaceRoot: {newRoot: "$facetPipe"}}
- ]
-});
-runAggShardTargetTest({
- splitPoint: [
- {
- $lookup: {
- from: "dummycoll",
- localField: "dummyfield",
- foreignField: "dummyfield",
- as: "lookupRes"
- }
- },
- {$project: {lookupRes: 0}}
- ]
- });
-
-st.stop();
-})();
diff --git a/jstests/aggregation/sharded_agg_cleanup_on_error.js b/jstests/aggregation/sharded_agg_cleanup_on_error.js
deleted file mode 100644
index 1a57fffe018..00000000000
--- a/jstests/aggregation/sharded_agg_cleanup_on_error.js
+++ /dev/null
@@ -1,142 +0,0 @@
-/**
- * Test that when a sharded aggregation errors on just one shard, cursors on all other shards are
- * cleaned up correctly.
- *
- * Must be banned from suites that use a sharding fixture, since this test starts its own sharded
- * cluster. Must be banned in the $facet passthrough, since that suite changes the pipeline
- * splitting and merging behavior expected by this test.
- * @tags: [requires_sharding,do_not_wrap_aggregations_in_facets]
- */
-(function() {
-"use strict";
-
-// For assertMergeFailsForAllModesWithCode.
-load("jstests/aggregation/extras/merge_helpers.js");
-load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
-
-const kFailPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
-const kFailpointOptions = {
- shouldCheckForInterrupt: true
-};
-
-const st = new ShardingTest({shards: 2});
-const kDBName = "test";
-const kDivideByZeroErrCode = 16608;
-const mongosDB = st.s.getDB(kDBName);
-const shard0DB = st.shard0.getDB(kDBName);
-const shard1DB = st.shard1.getDB(kDBName);
-
-let coll = mongosDB.sharded_agg_cleanup_on_error;
-
-for (let i = 0; i < 10; i++) {
- assert.commandWorked(coll.insert({_id: i}));
-}
-
-st.shardColl(coll, {_id: 1}, {_id: 5}, {_id: 6}, kDBName, false);
-st.ensurePrimaryShard(kDBName, st.shard0.name);
-
-function assertFailsAndCleansUpCursors({pipeline, errCode}) {
- let cmdRes = mongosDB.runCommand(
- {aggregate: coll.getName(), pipeline: pipeline, cursor: {batchSize: 0}});
- assert.commandWorked(cmdRes);
- assert.neq(0, cmdRes.cursor.id);
- assert.eq(coll.getFullName(), cmdRes.cursor.ns);
- assert.eq(0, cmdRes.cursor.firstBatch.length);
-
- cmdRes = mongosDB.runCommand({getMore: cmdRes.cursor.id, collection: coll.getName()});
- assert.commandFailedWithCode(cmdRes, errCode);
-
- // Neither mongos or the shards should leave cursors open. By the time we get here, the
- // cursor which was hanging on shard 1 will have been marked interrupted, but isn't
- // guaranteed to be deleted yet. Thus, we use an assert.soon().
- assert.eq(mongosDB.serverStatus().metrics.cursor.open.total, 0);
- assert.eq(shard0DB.serverStatus().metrics.cursor.open.total, 0);
- assert.soon(() => shard1DB.serverStatus().metrics.cursor.open.pinned == 0);
-}
-
-try {
- // Set up a fail point which causes getMore to hang on shard 1.
- assert.commandWorked(shard1DB.adminCommand(
- {configureFailPoint: kFailPointName, mode: "alwaysOn", data: kFailpointOptions}));
-
- // Issue an aggregregation that will fail during a getMore on shard 0, and make sure that
- // this correctly kills the hanging cursor on shard 1. Use $_internalSplitPipeline to ensure
- // that this pipeline merges on mongos.
- assertFailsAndCleansUpCursors({
- pipeline: [
- {$project: {out: {$divide: ["$_id", 0]}}},
- {$_internalSplitPipeline: {mergeType: "mongos"}}
- ],
- errCode: kDivideByZeroErrCode
- });
-
- // Repeat the test above, but this time use $_internalSplitPipeline to force the merge to
- // take place on a shard 0.
- assertFailsAndCleansUpCursors({
- pipeline: [
- {$project: {out: {$divide: ["$_id", 0]}}},
- {$_internalSplitPipeline: {mergeType: "primaryShard"}}
- ],
- errCode: kDivideByZeroErrCode
- });
-} finally {
- assert.commandWorked(shard1DB.adminCommand({configureFailPoint: kFailPointName, mode: "off"}));
-}
-
-// Test that aggregations which fail to establish a merging shard cursor also cleanup the open
-// shard cursors.
-try {
- // Enable the failpoint to fail on establishing a merging shard cursor.
- assert.commandWorked(mongosDB.adminCommand({
- configureFailPoint: "shardedAggregateFailToEstablishMergingShardCursor",
- mode: "alwaysOn"
- }));
-
- // Run an aggregation which requires a merging shard pipeline. This should fail because of
- // the failpoint.
- assertErrorCode(coll, [{$out: "target"}], ErrorCodes.FailPointEnabled);
-
- // Neither mongos or the shards should leave cursors open.
- assert.eq(mongosDB.serverStatus().metrics.cursor.open.total, 0);
- assert.soon(() => shard0DB.serverStatus().metrics.cursor.open.total == 0);
- assert.soon(() => shard1DB.serverStatus().metrics.cursor.open.total == 0);
-
-} finally {
- assert.commandWorked(mongosDB.adminCommand(
- {configureFailPoint: "shardedAggregateFailToEstablishMergingShardCursor", mode: "off"}));
-}
-
-// Test that aggregations involving $exchange correctly clean up the producer cursors.
-try {
- assert.commandWorked(mongosDB.adminCommand({
- configureFailPoint: "shardedAggregateFailToDispatchExchangeConsumerPipeline",
- mode: "alwaysOn"
- }));
-
- // Run an aggregation which is eligible for $exchange. This should assert because of
- // the failpoint. Add a $group stage to force an exchange-eligible split of the pipeline
- // before the $merge. Without the $group we won't use the exchange optimization and instead
- // will send the $merge to each shard.
- st.shardColl(mongosDB.target, {_id: 1}, {_id: 0}, {_id: 1}, kDBName, false);
-
- assertMergeFailsForAllModesWithCode({
- source: coll,
- target: mongosDB.target,
- prevStages: [{$group: {_id: "$fakeShardKey"}}],
- errorCodes: ErrorCodes.FailPointEnabled
- });
-
- // Neither mongos or the shards should leave cursors open.
- assert.eq(mongosDB.serverStatus().metrics.cursor.open.total, 0);
- assert.soon(() => shard0DB.serverStatus().metrics.cursor.open.total == 0);
- assert.soon(() => shard1DB.serverStatus().metrics.cursor.open.total == 0);
-
-} finally {
- assert.commandWorked(mongosDB.adminCommand({
- configureFailPoint: "shardedAggregateFailToDispatchExchangeConsumerPipeline",
- mode: "off"
- }));
-}
-
-st.stop();
-})();
diff --git a/jstests/aggregation/sources/addFields/use_cases.js b/jstests/aggregation/sources/addFields/use_cases.js
index 34703a75907..4df220e95ca 100644
--- a/jstests/aggregation/sources/addFields/use_cases.js
+++ b/jstests/aggregation/sources/addFields/use_cases.js
@@ -2,10 +2,6 @@
* $addFields can be used to add fixed and computed fields to documents while preserving the
* original document. Verify that using $addFields and adding computed fields in a $project yield
* the same result.
- * @tags: [
- * requires_sharding,
- * requires_spawning_own_processes,
- * ]
*/
(function() {
@@ -14,54 +10,38 @@
// For arrayEq.
load("jstests/aggregation/extras/utils.js");
-const dbName = "test";
const collName = jsTest.name();
+const coll = db.getCollection(collName);
+coll.drop();
-function doExecutionTest(conn) {
- const coll = conn.getDB(dbName).getCollection(collName);
- coll.drop();
-
- // Insert a bunch of documents of the form above.
- const nDocs = 10;
- for (let i = 0; i < nDocs; i++) {
- assert.commandWorked(coll.insert({"_id": i, "2i": i * 2, "3i": i * 3}));
- }
-
- // Add the minimum, maximum, and average temperatures, and make sure that doing the same
- // with addFields yields the correct answer.
- // First compute with $project, since we know all the fields in this document.
- let projectPipe = [{
- $project: {
- "2i": 1,
- "3i": 1,
- "6i^2": {"$multiply": ["$2i", "$3i"]},
- // _id is implicitly included.
- }
- }];
- let correct = coll.aggregate(projectPipe).toArray();
-
- // Then compute the same results using $addFields.
- let addFieldsPipe = [{
- $addFields: {
- "6i^2": {"$multiply": ["$2i", "$3i"]},
- // All other fields are implicitly included.
- }
- }];
- let addFieldsResult = coll.aggregate(addFieldsPipe).toArray();
-
- // Then assert they are the same.
- assert(arrayEq(addFieldsResult, correct),
- "$addFields does not work the same as a $project with computed and included fields");
+const nDocs = 10;
+for (let i = 0; i < nDocs; i++) {
+ assert.commandWorked(coll.insert({"_id": i, "2i": i * 2, "3i": i * 3}));
}
-// Test against the standalone started by resmoke.py.
-let conn = db.getMongo();
-doExecutionTest(conn);
-print("Success! Standalone execution use case test for $addFields passed.");
+// Add the minimum, maximum, and average temperatures, and make sure that doing the same
+// with addFields yields the correct answer.
+// First compute with $project, since we know all the fields in this document.
+let projectPipe = [{
+ $project: {
+ "2i": 1,
+ "3i": 1,
+ "6i^2": {"$multiply": ["$2i", "$3i"]},
+ // _id is implicitly included.
+ }
+}];
+let correct = coll.aggregate(projectPipe).toArray();
+
+// Then compute the same results using $addFields.
+let addFieldsPipe = [{
+ $addFields: {
+ "6i^2": {"$multiply": ["$2i", "$3i"]},
+ // All other fields are implicitly included.
+ }
+}];
+let addFieldsResult = coll.aggregate(addFieldsPipe).toArray();
-// Test against a sharded cluster.
-let st = new ShardingTest({shards: 2});
-doExecutionTest(st.s0);
-st.stop();
-print("Success! Sharding use case test for $addFields passed.");
+// Then assert they are the same.
+assert(arrayEq(addFieldsResult, correct),
+ "$addFields does not work the same as a $project with computed and included fields");
}());
diff --git a/jstests/aggregation/sources/addFields/weather.js b/jstests/aggregation/sources/addFields/weather.js
index feb57c9a8a6..6b440bf5f66 100644
--- a/jstests/aggregation/sources/addFields/weather.js
+++ b/jstests/aggregation/sources/addFields/weather.js
@@ -2,10 +2,6 @@
* $addFields can be used to add fixed and computed fields to documents while preserving the
* original document. Verify that using $addFields and adding computed fields in a $project yield
* the same result. Use the sample case of computing weather metadata.
- * @tags: [
- * requires_sharding,
- * requires_spawning_own_processes,
- * ]
*/
(function() {
@@ -14,11 +10,17 @@
// For arrayEq.
load("jstests/aggregation/extras/utils.js");
-const dbName = "test";
const collName = jsTest.name();
+const coll = db.getCollection(collName);
+coll.drop();
Random.setRandomSeed();
+const nDocs = 10;
+for (let i = 0; i < nDocs; i++) {
+ assert.commandWorked(coll.insert(generateRandomDocument()));
+}
+
/**
* Helper to generate an array of specified length of numbers in the specified range.
*/
@@ -49,56 +51,34 @@ function generateRandomDocument() {
};
}
-function doExecutionTest(conn) {
- const coll = conn.getDB(dbName).getCollection(collName);
- coll.drop();
-
- // Insert a bunch of documents of the form above.
- const nDocs = 10;
- for (let i = 0; i < nDocs; i++) {
- assert.commandWorked(coll.insert(generateRandomDocument()));
+// Add the minimum, maximum, and average temperatures, and make sure that doing the same
+// with addFields yields the correct answer.
+// First compute with $project, since we know all the fields in this document.
+let projectWeatherPipe = [{
+ $project: {
+ "month": 1,
+ "day": 1,
+ "temperatures": 1,
+ "minTemp": {"$min": "$temperatures"},
+ "maxTemp": {"$max": "$temperatures"},
+ "average": {"$avg": "$temperatures"},
+ // _id is implicitly included.
}
+}];
+let correctWeather = coll.aggregate(projectWeatherPipe).toArray();
- // Add the minimum, maximum, and average temperatures, and make sure that doing the same
- // with addFields yields the correct answer.
- // First compute with $project, since we know all the fields in this document.
- let projectWeatherPipe = [{
- $project: {
- "month": 1,
- "day": 1,
- "temperatures": 1,
- "minTemp": {"$min": "$temperatures"},
- "maxTemp": {"$max": "$temperatures"},
- "average": {"$avg": "$temperatures"},
- // _id is implicitly included.
- }
- }];
- let correctWeather = coll.aggregate(projectWeatherPipe).toArray();
-
- // Then compute the same results using $addFields.
- let addFieldsWeatherPipe = [{
- $addFields: {
- "minTemp": {"$min": "$temperatures"},
- "maxTemp": {"$max": "$temperatures"},
- "average": {"$avg": "$temperatures"},
- // All other fields are implicitly included.
- }
- }];
- let addFieldsResult = coll.aggregate(addFieldsWeatherPipe).toArray();
-
- // Then assert they are the same.
- assert(arrayEq(addFieldsResult, correctWeather),
- "$addFields does not work the same as a $project with computed and included fields");
-}
-
-// Test against the standalone started by resmoke.py.
-let conn = db.getMongo();
-doExecutionTest(conn);
-print("Success! Standalone execution weather test for $addFields passed.");
+// Then compute the same results using $addFields.
+let addFieldsWeatherPipe = [{
+ $addFields: {
+ "minTemp": {"$min": "$temperatures"},
+ "maxTemp": {"$max": "$temperatures"},
+ "average": {"$avg": "$temperatures"},
+ // All other fields are implicitly included.
+ }
+}];
+let addFieldsResult = coll.aggregate(addFieldsWeatherPipe).toArray();
-// Test against a sharded cluster.
-let st = new ShardingTest({shards: 2});
-doExecutionTest(st.s0);
-st.stop();
-print("Success! Sharding weather test for $addFields passed.");
+// Then assert they are the same.
+assert(arrayEq(addFieldsResult, correctWeather),
+ "$addFields does not work the same as a $project with computed and included fields");
}());
diff --git a/jstests/aggregation/sources/collStats/shard_host_info.js b/jstests/aggregation/sources/collStats/shard_host_info.js
deleted file mode 100644
index ced3f9bb47f..00000000000
--- a/jstests/aggregation/sources/collStats/shard_host_info.js
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Verifies that the $collStats aggregation stage includes the shard and hostname for each output
- * document when run via mongoS, and that the former is absent when run on a non-shard mongoD.
- * @tags: [
- * requires_sharding,
- * requires_spawning_own_processes,
- * ]
- */
-(function() {
-"use strict";
-
-// Test mongoD behaviour using the standalone started by resmoke.py.
-let testDB = db.getSiblingDB(jsTestName());
-let testColl = testDB.test;
-
-// getHostName() doesn't include port, db.getMongo().host is 127.0.0.1:<port>
-const hostName = (getHostName() + ":" + db.getMongo().host.split(":")[1]);
-
-// Test that the shard field is absent and the host field is present when run on mongoD.
-assert.eq(testColl
- .aggregate([
- {$collStats: {latencyStats: {histograms: true}}},
- {$group: {_id: {shard: "$shard", host: "$host"}}}
- ])
- .toArray(),
- [{_id: {host: hostName}}]);
-
-// Test that both shard and hostname are present for $collStats results on a sharded cluster.
-const st = new ShardingTest({name: jsTestName(), shards: 2});
-
-testDB = st.s.getDB(jsTestName());
-testColl = testDB.test;
-
-assert.commandWorked(testDB.dropDatabase());
-
-// Enable sharding on the test database.
-assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
-
-// Shard 'testColl' on {_id: 'hashed'}. This will automatically presplit the collection and
-// place chunks on each shard.
-assert.commandWorked(
- testDB.adminCommand({shardCollection: testColl.getFullName(), key: {_id: "hashed"}}));
-
-// Group $collStats result by $shard and $host to confirm that both fields are present.
-assert.eq(testColl
- .aggregate([
- {$collStats: {latencyStats: {histograms: true}}},
- {$group: {_id: {shard: "$shard", host: "$host"}}},
- {$sort: {_id: 1}}
- ])
- .toArray(),
- [
- {_id: {shard: st.shard0.shardName, host: st.rs0.getPrimary().host}},
- {_id: {shard: st.shard1.shardName, host: st.rs1.getPrimary().host}},
- ]);
-
-st.stop();
-})();
diff --git a/jstests/aggregation/sources/facet/use_cases.js b/jstests/aggregation/sources/facet/use_cases.js
index acf79f16f32..e1507d2c2c0 100644
--- a/jstests/aggregation/sources/facet/use_cases.js
+++ b/jstests/aggregation/sources/facet/use_cases.js
@@ -1,9 +1,5 @@
/**
* Tests some practical use cases of the $facet stage.
- * @tags: [
- * requires_sharding,
- * requires_spawning_own_processes,
- * ]
*/
(function() {
"use strict";
@@ -13,7 +9,6 @@ load("jstests/libs/discover_topology.js"); // For findData
const dbName = "test";
const collName = jsTest.name();
-const testNs = dbName + "." + collName;
Random.setRandomSeed();
@@ -52,8 +47,8 @@ function generateRandomDocument(docId) {
* Inserts 'nDocs' documents into collection given by 'dbName' and 'collName'. Documents will
* have _ids in the range [0, nDocs).
*/
-function populateData(conn, nDocs) {
- var coll = conn.getDB(dbName).getCollection(collName);
+function populateData(nDocs) {
+ var coll = db.getCollection(collName);
coll.remove({}); // Don't drop the collection, since it might be sharded.
var bulk = coll.initializeUnorderedBulkOp();
@@ -64,106 +59,51 @@ function populateData(conn, nDocs) {
assert.commandWorked(bulk.execute());
}
-function doExecutionTest(conn) {
- var coll = conn.getDB(dbName).getCollection(collName);
- //
- // Compute the most common manufacturers, and the number of TVs in each price range.
- //
-
- // First compute each separately, to make sure we have the correct results.
- const manufacturerPipe = [
- {$sortByCount: "$manufacturer"},
- // Sort by count and then by _id in case there are two manufacturers with an equal
- // count.
- {$sort: {count: -1, _id: 1}},
- ];
- const bucketedPricePipe = [
- {
- $bucket: {groupBy: "$price", boundaries: [0, 500, 1000, 1500, 2000], default: 2000},
- },
- {$sort: {count: -1}}
- ];
- const automaticallyBucketedPricePipe = [{$bucketAuto: {groupBy: "$price", buckets: 5}}];
-
- const mostCommonManufacturers = coll.aggregate(manufacturerPipe).toArray();
- const numTVsBucketedByPriceRange = coll.aggregate(bucketedPricePipe).toArray();
- const numTVsAutomaticallyBucketedByPriceRange =
- coll.aggregate(automaticallyBucketedPricePipe).toArray();
-
- const facetPipe = [{
- $facet: {
- manufacturers: manufacturerPipe,
- bucketedPrices: bucketedPricePipe,
- autoBucketedPrices: automaticallyBucketedPricePipe
- }
- }];
-
- // Then compute the results using $facet.
- const facetResult = coll.aggregate(facetPipe).toArray();
- assert.eq(facetResult.length, 1);
- const facetManufacturers = facetResult[0].manufacturers;
- const facetBucketedPrices = facetResult[0].bucketedPrices;
- const facetAutoBucketedPrices = facetResult[0].autoBucketedPrices;
-
- // Then assert they are the same.
- assert.eq(facetManufacturers, mostCommonManufacturers);
- assert.eq(facetBucketedPrices, numTVsBucketedByPriceRange);
- assert.eq(facetAutoBucketedPrices, numTVsAutomaticallyBucketedByPriceRange);
-}
-
-// Test against the standalone started by resmoke.py.
const nDocs = 1000 * 10;
-const conn = db.getMongo();
-populateData(conn, nDocs);
-doExecutionTest(conn);
-
-// Test against a sharded cluster.
-const st = new ShardingTest({shards: 2});
-populateData(st.s0, nDocs);
-doExecutionTest(st.s0);
-
-const shardedDBName = "sharded";
-const shardedCollName = "collection";
-const shardedColl = st.getDB(shardedDBName).getCollection(shardedCollName);
-const unshardedColl = st.getDB(shardedDBName).getCollection(collName);
-
-assert.commandWorked(st.admin.runCommand({enableSharding: shardedDBName}));
-assert.commandWorked(
- st.admin.runCommand({shardCollection: shardedColl.getFullName(), key: {_id: 1}}));
-
-// Test $lookup inside a $facet stage on a sharded collection.
-// Enable sharded $lookup.
-setParameterOnAllHosts(
- DiscoverTopology.findNonConfigNodes(st.s), "internalQueryAllowShardedLookup", true);
-assert.commandWorked(unshardedColl.runCommand({
- aggregate: unshardedColl.getName(),
- pipeline: [{
- $facet: {
- a: [{
- $lookup:
- {from: shardedCollName, localField: "_id", foreignField: "_id", as: "results"}
- }]
- }
- }],
- cursor: {}
-}));
-// Disable sharded $lookup.
-setParameterOnAllHosts(
- DiscoverTopology.findNonConfigNodes(st.s), "internalQueryAllowShardedLookup", false);
-
-// Then run the assertions against a sharded collection.
-assert.commandWorked(st.admin.runCommand({enableSharding: dbName}));
-assert.commandWorked(st.admin.runCommand({shardCollection: testNs, key: {_id: 1}}));
-
-// Make sure there is a chunk on each shard, so that our aggregations are targeted to multiple
-// shards.
-assert.commandWorked(st.admin.runCommand({split: testNs, middle: {_id: nDocs / 2}}));
-assert.commandWorked(
- st.admin.runCommand({moveChunk: testNs, find: {_id: 0}, to: st.shard0.shardName}));
-assert.commandWorked(
- st.admin.runCommand({moveChunk: testNs, find: {_id: nDocs - 1}, to: st.shard1.shardName}));
-
-doExecutionTest(st.s0);
-
-st.stop();
+populateData(nDocs);
+const coll = db.getCollection(collName);
+
+//
+// Compute the most common manufacturers, and the number of TVs in each price range.
+//
+
+// First compute each separately, to make sure we have the correct results.
+const manufacturerPipe = [
+ {$sortByCount: "$manufacturer"},
+ // Sort by count and then by _id in case there are two manufacturers with an equal
+ // count.
+ {$sort: {count: -1, _id: 1}},
+];
+const bucketedPricePipe = [
+ {
+ $bucket: {groupBy: "$price", boundaries: [0, 500, 1000, 1500, 2000], default: 2000},
+ },
+ {$sort: {count: -1}}
+];
+const automaticallyBucketedPricePipe = [{$bucketAuto: {groupBy: "$price", buckets: 5}}];
+
+const mostCommonManufacturers = coll.aggregate(manufacturerPipe).toArray();
+const numTVsBucketedByPriceRange = coll.aggregate(bucketedPricePipe).toArray();
+const numTVsAutomaticallyBucketedByPriceRange =
+ coll.aggregate(automaticallyBucketedPricePipe).toArray();
+
+const facetPipe = [{
+ $facet: {
+ manufacturers: manufacturerPipe,
+ bucketedPrices: bucketedPricePipe,
+ autoBucketedPrices: automaticallyBucketedPricePipe
+ }
+}];
+
+// Then compute the results using $facet.
+const facetResult = coll.aggregate(facetPipe).toArray();
+assert.eq(facetResult.length, 1);
+const facetManufacturers = facetResult[0].manufacturers;
+const facetBucketedPrices = facetResult[0].bucketedPrices;
+const facetAutoBucketedPrices = facetResult[0].autoBucketedPrices;
+
+// Then assert they are the same.
+assert.eq(facetManufacturers, mostCommonManufacturers);
+assert.eq(facetBucketedPrices, numTVsBucketedByPriceRange);
+assert.eq(facetAutoBucketedPrices, numTVsAutomaticallyBucketedByPriceRange);
}());
diff --git a/jstests/aggregation/sources/merge/exchange_explain.js b/jstests/aggregation/sources/merge/exchange_explain.js
deleted file mode 100644
index 23bed99973d..00000000000
--- a/jstests/aggregation/sources/merge/exchange_explain.js
+++ /dev/null
@@ -1,174 +0,0 @@
-/**
- * Test $merge and exchange with explain.
- *
- * @tags: [requires_sharding]
- */
-load('jstests/aggregation/extras/utils.js');
-
-(function() {
-"use strict";
-
-const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
-
-const mongosDB = st.s.getDB("test_db");
-
-const inColl = mongosDB["inColl"];
-const targetCollRange = mongosDB["targetCollRange"];
-const targetCollRangeOtherField = mongosDB["targetCollRangeOtherField"];
-const targetCollHash = mongosDB["targetCollHash"];
-
-const numDocs = 1000;
-
-function runExplainQuery(targetColl) {
- return inColl.explain("allPlansExecution").aggregate([
- {$group: {_id: "$a", a: {$avg: "$a"}}},
- {
- $merge: {
- into: {
- db: targetColl.getDB().getName(),
- coll: targetColl.getName(),
- },
- whenMatched: "replace",
- whenNotMatched: "insert"
- }
- }
- ]);
-}
-
-function runRealQuery(targetColl) {
- return inColl.aggregate([
- {$group: {_id: "$a", a: {$avg: "$a"}}},
- {
- $merge: {
- into: {
- db: targetColl.getDB().getName(),
- coll: targetColl.getName(),
- },
- whenMatched: "replace",
- whenNotMatched: "insert"
- }
- }
- ]);
-}
-
-function getExchangeSpec(explain) {
- assert(explain.hasOwnProperty("splitPipeline"), tojson(explain));
- assert(explain.splitPipeline.hasOwnProperty("exchange"), tojson(explain));
-
- return explain.splitPipeline.exchange;
-}
-
-// Shard the input collection.
-st.shardColl(inColl, {a: 1}, {a: 500}, {a: 500}, mongosDB.getName());
-
-// Insert some data to the input collection.
-let bulk = inColl.initializeUnorderedBulkOp();
-for (let i = 0; i < numDocs; i++) {
- bulk.insert({a: i}, {b: [0, 1, 2, 3, i]});
-}
-assert.commandWorked(bulk.execute());
-
-// Shard the output collections.
-st.shardColl(targetCollRange, {_id: 1}, {_id: 500}, {_id: 500}, mongosDB.getName());
-st.shardColl(targetCollRangeOtherField, {b: 1}, {b: 500}, {b: 500}, mongosDB.getName());
-st.shardColl(targetCollHash, {_id: "hashed"}, false, false, mongosDB.getName());
-
-// Run the explain. We expect to see the range based exchange here.
-let explain = runExplainQuery(targetCollRange);
-
-// Make sure we see the exchange in the explain output.
-assert.eq(explain.mergeType, "exchange", tojson(explain));
-let exchangeSpec = getExchangeSpec(explain);
-assert.eq(exchangeSpec.policy, "keyRange");
-assert.eq(exchangeSpec.key, {_id: 1});
-
-// Run the real query.
-runRealQuery(targetCollRange);
-let results = targetCollRange.aggregate([{'$count': "count"}]).next().count;
-assert.eq(results, numDocs);
-
-// Rerun the same query with the hash based exchange.
-explain = runExplainQuery(targetCollHash);
-
-// Make sure we see the exchange in the explain output.
-assert.eq(explain.mergeType, "exchange", tojson(explain));
-exchangeSpec = getExchangeSpec(explain);
-assert.eq(exchangeSpec.policy, "keyRange");
-assert.eq(exchangeSpec.key, {_id: "hashed"});
-
-// Run the real query.
-runRealQuery(targetCollHash);
-results = targetCollHash.aggregate([{'$count': "count"}]).next().count;
-assert.eq(results, numDocs);
-
-// This should fail because the "on" field ('b' in this case, the shard key of the target
-// collection) cannot be an array.
-assertErrorCode(inColl,
- [{
- $merge: {
- into: {
- db: targetCollRangeOtherField.getDB().getName(),
- coll: targetCollRangeOtherField.getName(),
- },
- whenMatched: "replace",
- whenNotMatched: "insert"
- }
- }],
- 51132);
-
-// Turn off the exchange and rerun the query.
-assert.commandWorked(mongosDB.adminCommand({setParameter: 1, internalQueryDisableExchange: 1}));
-explain = runExplainQuery(targetCollRange);
-
-// Make sure there is no exchange.
-assert.eq(explain.mergeType, "anyShard", tojson(explain));
-assert(explain.hasOwnProperty("splitPipeline"), tojson(explain));
-assert(!explain.splitPipeline.hasOwnProperty("exchange"), tojson(explain));
-
-// This should fail similar to before even if we are not running the exchange.
-assertErrorCode(inColl,
- [{
- $merge: {
- into: {
- db: targetCollRangeOtherField.getDB().getName(),
- coll: targetCollRangeOtherField.getName(),
- },
- whenMatched: "replace",
- whenNotMatched: "insert"
- }
- }],
- 51132);
-
-// SERVER-38349 Make sure mongos rejects specifying exchange directly.
-assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: inColl.getName(),
- pipeline: [],
- cursor: {},
- exchange: {
- policy: "keyRange",
- bufferSize: NumberInt(1024),
- boundaries: [{_id: 0}],
- consumers: NumberInt(2),
- consumerIds: [NumberInt(0), NumberInt(1)]
- }
-}),
- 51028);
-
-assert.commandFailedWithCode(mongosDB.runCommand({
- aggregate: inColl.getName(),
- pipeline: [{
- $merge: {into: targetCollRange.getName(), whenMatched: "replace", whenNotMatched: "insert"}
- }],
- cursor: {},
- exchange: {
- policy: "keyRange",
- bufferSize: NumberInt(1024),
- boundaries: [{_id: 0}],
- consumers: NumberInt(2),
- consumerIds: [NumberInt(0), NumberInt(1)]
- }
-}),
- 51028);
-
-st.stop();
-}());
diff --git a/jstests/aggregation/sources/merge/use_cases.js b/jstests/aggregation/sources/merge/use_cases.js
deleted file mode 100644
index 6c1c71b9419..00000000000
--- a/jstests/aggregation/sources/merge/use_cases.js
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Tests a practical use case for $merge from a collection of samples to an hourly rollup output
- * collection.
- *
- * @tags: [requires_sharding]
- */
-(function() {
-"use strict";
-
-Random.setRandomSeed();
-
-const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
-
-const mongosDB = st.s.getDB("use_cases");
-
-const metricsColl = mongosDB["metrics"];
-const rollupColl = mongosDB["rollup"];
-
-function incDateByMinutes(date, mins) {
- return new Date(date.getTime() + (60 * 1000 * mins));
-}
-
-// Inserts 'nSamples' worth of random data starting at 'date'.
-function insertRandomData(coll, date, nSamples) {
- let ticksSum = 0, tempSum = 0;
- let bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < nSamples; i++) {
- const randTick = Random.randInt(100);
- const randTemp = Random.randInt(100);
- ticksSum += randTick;
- tempSum += randTemp;
- bulk.insert(
- {_id: incDateByMinutes(date, i * (60 / nSamples)), ticks: randTick, temp: randTemp});
- }
- assert.commandWorked(bulk.execute());
-
- return [ticksSum, tempSum];
-}
-
-// Runs a $merge aggregate on the metrics collection to the rollup collection, grouping by hour,
-// summing the ticks, and averaging the temps.
-function runAggregate({startDate, whenMatchedMode, whenNotMatchedMode}) {
- metricsColl.aggregate([
- {$match: {_id: {$gte: startDate}}},
- {
- $group: {
- _id: {$dateToString: {format: "%Y-%m-%dT%H", date: "$_id"}},
- ticks: {$sum: "$ticks"},
- avgTemp: {$avg: "$temp"},
- }
- },
- {
- $merge: {
- into: {db: rollupColl.getDB().getName(), coll: rollupColl.getName()},
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }
- ]);
-}
-
-// Shard the metrics (source) collection on _id, which is the date of the sample.
-const hourZero = new ISODate("2018-08-15T00:00:00.000Z");
-const hourOne = incDateByMinutes(hourZero, 60);
-st.shardColl(metricsColl, {_id: 1}, {_id: hourOne}, {_id: hourOne}, mongosDB.getName());
-
-// Insert sample documents into the metrics collection.
-const samplesPerHour = 10;
-let [ticksSum, tempSum] = insertRandomData(metricsColl, hourZero, samplesPerHour);
-
-runAggregate({startDate: hourZero, whenMatchedMode: "fail", whenNotMatchedMode: "insert"});
-
-// Verify the results of the $merge in the rollup collection.
-let res = rollupColl.find().sort({_id: 1});
-assert.eq([{_id: "2018-08-15T00", ticks: ticksSum, avgTemp: tempSum / samplesPerHour}],
- res.toArray());
-
-// Insert another hour's worth of data, and verify that the $merge will append the result to the
-// output collection.
-[ticksSum, tempSum] = insertRandomData(metricsColl, hourOne, samplesPerHour);
-
-runAggregate({startDate: hourOne, whenMatchedMode: "fail", whenNotMatchedMode: "insert"});
-
-res = rollupColl.find().sort({_id: 1}).toArray();
-assert.eq(2, res.length);
-assert.eq(res[1], {_id: "2018-08-15T01", ticks: ticksSum, avgTemp: tempSum / samplesPerHour});
-
-// Whoops, there was a mistake in the last hour of data. Let's re-run the aggregation and update
-// the rollup collection using the "replace".
-assert.commandWorked(metricsColl.update({_id: hourOne}, {$inc: {ticks: 10}}));
-ticksSum += 10;
-
-runAggregate({startDate: hourOne, whenMatchedMode: "replace", whenNotMatchedMode: "insert"});
-
-res = rollupColl.find().sort({_id: 1}).toArray();
-assert.eq(2, res.length);
-assert.eq(res[1], {_id: "2018-08-15T01", ticks: ticksSum, avgTemp: tempSum / samplesPerHour});
-
-// Shard the output collection into 2 chunks, and make the split hour 6.
-const hourSix = incDateByMinutes(hourZero, 60 * 6);
-st.shardColl(rollupColl, {_id: 1}, {_id: hourSix}, {_id: hourSix}, mongosDB.getName());
-
-// Insert hour 7 data into the metrics collection and re-run the aggregation.
-[ticksSum, tempSum] = insertRandomData(metricsColl, hourSix, samplesPerHour);
-
-runAggregate({startDate: hourSix, whenMatchedMode: "fail", whenNotMatchedMode: "insert"});
-
-res = rollupColl.find().sort({_id: 1}).toArray();
-assert.eq(3, res.length, tojson(res));
-assert.eq(res[2], {_id: "2018-08-15T06", ticks: ticksSum, avgTemp: tempSum / samplesPerHour});
-
-st.stop();
-}());
diff --git a/jstests/aggregation/sources/replaceRoot/address.js b/jstests/aggregation/sources/replaceRoot/address.js
index 051c693110f..224691477d5 100644
--- a/jstests/aggregation/sources/replaceRoot/address.js
+++ b/jstests/aggregation/sources/replaceRoot/address.js
@@ -1,9 +1,5 @@
/**
* $replaceRoot can be used to extract parts of a document; here we test a simple address case.
- * @tags: [
- * requires_sharding,
- * requires_spawning_own_processes,
- * ]
*/
(function() {
@@ -12,9 +8,6 @@
// For arrayEq.
load("jstests/aggregation/extras/utils.js");
-const dbName = "test";
-const collName = jsTest.name();
-
Random.setRandomSeed();
/**
@@ -51,54 +44,42 @@ function generateRandomDocument() {
};
}
-function doExecutionTest(conn) {
- const coll = conn.getDB(dbName).getCollection(collName);
- coll.drop();
-
- // Insert a bunch of documents of the form above.
- const nDocs = 10;
- let bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < nDocs; i++) {
- bulk.insert(generateRandomDocument());
- }
- assert.commandWorked(bulk.execute());
-
- // Extract the contents of the address field, and make sure that doing the same
- // with replaceRoot yields the correct answer.
- // First compute each separately, since we know all of the fields in the address,
- // to make sure we have the correct results.
- let addressPipe = [{
- $project: {
- "_id": 0,
- "number": "$address.number",
- "street": "$address.street",
- "city": "$address.city",
- "zip": "$address.zip"
- }
- }];
- let correctAddresses = coll.aggregate(addressPipe).toArray();
-
- // Then compute the same results using $replaceRoot.
- let replaceWithResult = coll.aggregate([
- {$replaceRoot: {newRoot: "$address"}},
- {$sort: {city: 1, zip: 1, street: 1, number: 1}}
- ])
- .toArray();
+const dbName = "test";
+const collName = jsTest.name();
+const coll = db.getCollection(collName);
+coll.drop();
- // Then assert they are the same.
- assert(
- arrayEq(replaceWithResult, correctAddresses),
- "$replaceRoot does not work the same as $project-ing the relevant fields to the top level");
+// Insert a bunch of documents of the form above.
+const nDocs = 10;
+let bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < nDocs; i++) {
+ bulk.insert(generateRandomDocument());
}
+assert.commandWorked(bulk.execute());
+
+// Extract the contents of the address field, and make sure that doing the same
+// with replaceRoot yields the correct answer.
+// First compute each separately, since we know all of the fields in the address,
+// to make sure we have the correct results.
+let addressPipe = [{
+ $project: {
+ "_id": 0,
+ "number": "$address.number",
+ "street": "$address.street",
+ "city": "$address.city",
+ "zip": "$address.zip"
+ }
+}];
+let correctAddresses = coll.aggregate(addressPipe).toArray();
-// Test against the standalone started by resmoke.py.
-let conn = db.getMongo();
-doExecutionTest(conn);
-print("Success! Standalone execution test for $replaceRoot passed.");
+// Then compute the same results using $replaceRoot.
+let replaceWithResult = coll.aggregate([
+ {$replaceRoot: {newRoot: "$address"}},
+ {$sort: {city: 1, zip: 1, street: 1, number: 1}}
+ ])
+ .toArray();
-// Test against a sharded cluster.
-let st = new ShardingTest({shards: 2});
-doExecutionTest(st.s0);
-st.stop();
-print("Success! Sharding test for $replaceRoot passed.");
+// Then assert they are the same.
+assert(arrayEq(replaceWithResult, correctAddresses),
+ "$replaceRoot does not work the same as $project-ing the relevant fields to the top level");
}());
diff --git a/jstests/aggregation/testSlave.js b/jstests/aggregation/testSlave.js
deleted file mode 100644
index 1b3d302be95..00000000000
--- a/jstests/aggregation/testSlave.js
+++ /dev/null
@@ -1,22 +0,0 @@
-// This test just make sure that aggregation is possible on a secondary node.
-// @tags: [
-// requires_replication,
-// requires_spawning_own_processes,
-// ]
-var replTest = new ReplSetTest({name: 'aggTestSlave', nodes: 2});
-var nodes = replTest.startSet();
-replTest.initiate();
-replTest.awaitReplication();
-
-var primary = replTest.getPrimary().getDB('test');
-var secondary = replTest.getSecondary().getDB('test');
-
-var options = {writeConcern: {w: 2}};
-primary.agg.insert({}, options);
-primary.agg.insert({}, options);
-primary.agg.insert({}, options);
-
-var res = secondary.agg.aggregate({$group: {_id: null, count: {$sum: 1}}});
-assert.eq(res.toArray(), [{_id: null, count: 3}]);
-
-replTest.stopSet();