summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Zolnierz <nicholas.zolnierz@mongodb.com>2020-03-18 15:17:56 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-03-25 17:40:43 +0000
commitfe79c4ee1dfc8d49ae06c94a927267712b32b011 (patch)
treea5ddce887802b62fb028c5d707186ebb64effe66
parentb36c69c5930d25a8f5ae348a2b2fb24f27f925e6 (diff)
downloadmongo-fe79c4ee1dfc8d49ae06c94a927267712b32b011.tar.gz
SERVER-46700 Update tests in aggregation suite to avoid spawning mongod/sharded clusters
-rw-r--r--buildscripts/resmokeconfig/suites/aggregation.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/aggregation_mongos_passthrough.yml4
-rw-r--r--buildscripts/resmokeconfig/suites/aggregation_one_shard_sharded_collections.yml4
-rw-r--r--buildscripts/resmokeconfig/suites/aggregation_read_concern_majority_passthrough.yml8
-rw-r--r--buildscripts/resmokeconfig/suites/aggregation_sharded_collections_passthrough.yml7
-rw-r--r--jstests/aggregation/bugs/server7781.js42
-rw-r--r--jstests/aggregation/sources/addFields/use_cases.js78
-rw-r--r--jstests/aggregation/sources/addFields/weather.js88
-rw-r--r--jstests/aggregation/sources/facet/use_cases.js156
-rw-r--r--jstests/aggregation/sources/replaceRoot/address.js87
-rw-r--r--jstests/aggregation/testSlave.js22
-rw-r--r--jstests/noPassthrough/agg_cursor_timeout.js (renamed from jstests/aggregation/bugs/cursor_timeout.js)3
-rw-r--r--jstests/noPassthrough/lookup_unwind_getmore.js (renamed from jstests/aggregation/bugs/lookup_unwind_getmore.js)3
-rw-r--r--jstests/noPassthrough/lookup_unwind_killcursor.js (renamed from jstests/aggregation/bugs/lookup_unwind_killcursor.js)3
-rw-r--r--jstests/noPassthrough/shard_host_info.js (renamed from jstests/aggregation/sources/collStats/shard_host_info.js)9
-rw-r--r--jstests/sharding/agg_mongos_merge.js (renamed from jstests/aggregation/mongos_merge.js)8
-rw-r--r--jstests/sharding/agg_mongos_slaveok.js (renamed from jstests/aggregation/mongos_slaveok.js)1
-rw-r--r--jstests/sharding/agg_shard_targeting.js (renamed from jstests/aggregation/shard_targeting.js)0
-rw-r--r--jstests/sharding/exchange_explain.js (renamed from jstests/aggregation/sources/merge/exchange_explain.js)0
-rw-r--r--jstests/sharding/lookup.js12
-rw-r--r--jstests/sharding/merge_use_cases.js (renamed from jstests/aggregation/sources/merge/use_cases.js)0
-rw-r--r--jstests/sharding/pipeline_pass_through_from_mongos.js (renamed from jstests/aggregation/pipeline_pass_through_from_mongos.js)0
-rw-r--r--jstests/sharding/server37750.js (renamed from jstests/aggregation/bugs/server37750.js)0
-rw-r--r--jstests/sharding/server6179.js (renamed from jstests/aggregation/bugs/server6179.js)0
-rw-r--r--jstests/sharding/sharded_agg_cleanup_on_error.js (renamed from jstests/aggregation/sharded_agg_cleanup_on_error.js)0
25 files changed, 167 insertions, 369 deletions
diff --git a/buildscripts/resmokeconfig/suites/aggregation.yml b/buildscripts/resmokeconfig/suites/aggregation.yml
index 523b65ad0fa..38ae5965b47 100644
--- a/buildscripts/resmokeconfig/suites/aggregation.yml
+++ b/buildscripts/resmokeconfig/suites/aggregation.yml
@@ -14,6 +14,7 @@ executor:
config:
shell_options:
readMode: commands
+ eval: load("jstests/libs/override_methods/detect_spawning_own_mongod.js");
hooks:
- class: ValidateCollections
- class: CleanEveryN
diff --git a/buildscripts/resmokeconfig/suites/aggregation_mongos_passthrough.yml b/buildscripts/resmokeconfig/suites/aggregation_mongos_passthrough.yml
index 85013bdfd16..41bc0530e59 100644
--- a/buildscripts/resmokeconfig/suites/aggregation_mongos_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/aggregation_mongos_passthrough.yml
@@ -6,10 +6,6 @@ selector:
exclude_files:
- jstests/aggregation/extras/*.js
- jstests/aggregation/data/*.js
- # The following tests start their own mongod.
- - jstests/aggregation/bugs/cursor_timeout.js
- - jstests/aggregation/bugs/lookup_unwind_getmore.js
- - jstests/aggregation/bugs/lookup_unwind_killcursor.js
# TODO: Remove when SERVER-23229 is fixed.
- jstests/aggregation/bugs/groupMissing.js
# Mongos does not support runtimeConstants.
diff --git a/buildscripts/resmokeconfig/suites/aggregation_one_shard_sharded_collections.yml b/buildscripts/resmokeconfig/suites/aggregation_one_shard_sharded_collections.yml
index 1b0874ba4d9..9597b8980d1 100644
--- a/buildscripts/resmokeconfig/suites/aggregation_one_shard_sharded_collections.yml
+++ b/buildscripts/resmokeconfig/suites/aggregation_one_shard_sharded_collections.yml
@@ -6,10 +6,6 @@ selector:
exclude_files:
- jstests/aggregation/extras/*.js
- jstests/aggregation/data/*.js
- # The following tests start their own mongod.
- - jstests/aggregation/bugs/cursor_timeout.js
- - jstests/aggregation/bugs/lookup_unwind_getmore.js
- - jstests/aggregation/bugs/lookup_unwind_killcursor.js
# TODO: Remove when SERVER-23229 is fixed.
- jstests/aggregation/bugs/groupMissing.js
# TODO SERVER-32309: Enable once $lookup with pipeline supports sharded foreign collections.
diff --git a/buildscripts/resmokeconfig/suites/aggregation_read_concern_majority_passthrough.yml b/buildscripts/resmokeconfig/suites/aggregation_read_concern_majority_passthrough.yml
index e552fb10f0b..9b48c8969aa 100644
--- a/buildscripts/resmokeconfig/suites/aggregation_read_concern_majority_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/aggregation_read_concern_majority_passthrough.yml
@@ -6,20 +6,12 @@ selector:
exclude_files:
- jstests/aggregation/extras/*.js
- jstests/aggregation/data/*.js
- # Blocked by SERVER-37191
- - jstests/aggregation/sources/merge/exchange_explain.js
# This test specifies a $out stage not as the last stage in the aggregation pipeline, causing a
# non-local readConcern to erroneously be sent with the command.
- jstests/aggregation/sources/out/required_last_position.js
# These tests fail due to the inability to specify a writeConcern when secondaryThrottle is not
# set as part of the moveChunk command.
- - jstests/aggregation/mongos_merge.js
- - jstests/aggregation/shard_targeting.js
- jstests/aggregation/sources/facet/use_cases.js
- # These test fail because afterOpTime is required to guarantee a secondary has advanced its
- # majority-committed snapshot.
- - jstests/aggregation/mongos_slaveok.js
- - jstests/aggregation/testSlave.js
exclude_with_any_tags:
##
# The next three tags correspond to the special errors thrown by the
diff --git a/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_passthrough.yml b/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_passthrough.yml
index f6150caf75d..7902d33e4d0 100644
--- a/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_passthrough.yml
@@ -6,13 +6,6 @@ selector:
exclude_files:
- jstests/aggregation/extras/*.js
- jstests/aggregation/data/*.js
- # The following tests start their own ReplSetTest.
- - jstests/aggregation/testSlave.js
- - jstests/aggregation/sources/changeStream/*.js
- # The following tests start their own mongod.
- - jstests/aggregation/bugs/cursor_timeout.js
- - jstests/aggregation/bugs/lookup_unwind_getmore.js
- - jstests/aggregation/bugs/lookup_unwind_killcursor.js
# This test makes assertions about whether aggregations will need to spill to disk, which assumes
# all the data is located on a single shard.
- jstests/aggregation/spill_to_disk.js
diff --git a/jstests/aggregation/bugs/server7781.js b/jstests/aggregation/bugs/server7781.js
index d70662a9d25..462beebe5fd 100644
--- a/jstests/aggregation/bugs/server7781.js
+++ b/jstests/aggregation/bugs/server7781.js
@@ -1,8 +1,4 @@
// SERVER-7781 $geoNear pipeline stage
-// @tags: [
-// requires_sharding,
-// requires_spawning_own_processes,
-// ]
(function() {
'use strict';
@@ -55,32 +51,9 @@ function testGeoNearStageOutput({geoNearSpec, limit, batchSize}) {
// We use this to generate points. Using a single global to avoid reseting RNG in each pass.
var pointMaker = new GeoNearRandomTest(coll);
-function test(db, sharded, indexType) {
+function test(db, indexType) {
db[coll].drop();
- if (sharded) { // sharded setup
- var shards = [];
- var config = db.getSiblingDB("config");
- config.shards.find().forEach(function(shard) {
- shards.push(shard._id);
- });
-
- assert.commandWorked(
- db.adminCommand({shardCollection: db[coll].getFullName(), key: {rand: 1}}));
- for (var i = 1; i < 10; i++) {
- // split at 0.1, 0.2, ... 0.9
- assert.commandWorked(
- db.adminCommand({split: db[coll].getFullName(), middle: {rand: i / 10}}));
- db.adminCommand({
- moveChunk: db[coll].getFullName(),
- find: {rand: i / 10},
- to: shards[i % shards.length]
- });
- }
-
- assert.eq(config.chunks.count({'ns': db[coll].getFullName()}), 10);
- }
-
// insert points
var numPts = 10 * 1000;
var bulk = db[coll].initializeUnorderedBulkOp();
@@ -117,15 +90,6 @@ function test(db, sharded, indexType) {
});
}
-test(db, false, '2d');
-test(db, false, '2dsphere');
-
-var sharded = new ShardingTest({shards: 3, mongos: 1});
-assert.commandWorked(sharded.s0.adminCommand({enablesharding: "test"}));
-sharded.ensurePrimaryShard('test', sharded.shard1.shardName);
-
-test(sharded.getDB('test'), true, '2d');
-test(sharded.getDB('test'), true, '2dsphere');
-
-sharded.stop();
+test(db, '2d');
+test(db, '2dsphere');
})();
diff --git a/jstests/aggregation/sources/addFields/use_cases.js b/jstests/aggregation/sources/addFields/use_cases.js
index 34703a75907..4df220e95ca 100644
--- a/jstests/aggregation/sources/addFields/use_cases.js
+++ b/jstests/aggregation/sources/addFields/use_cases.js
@@ -2,10 +2,6 @@
* $addFields can be used to add fixed and computed fields to documents while preserving the
* original document. Verify that using $addFields and adding computed fields in a $project yield
* the same result.
- * @tags: [
- * requires_sharding,
- * requires_spawning_own_processes,
- * ]
*/
(function() {
@@ -14,54 +10,38 @@
// For arrayEq.
load("jstests/aggregation/extras/utils.js");
-const dbName = "test";
const collName = jsTest.name();
+const coll = db.getCollection(collName);
+coll.drop();
-function doExecutionTest(conn) {
- const coll = conn.getDB(dbName).getCollection(collName);
- coll.drop();
-
- // Insert a bunch of documents of the form above.
- const nDocs = 10;
- for (let i = 0; i < nDocs; i++) {
- assert.commandWorked(coll.insert({"_id": i, "2i": i * 2, "3i": i * 3}));
- }
-
- // Add the minimum, maximum, and average temperatures, and make sure that doing the same
- // with addFields yields the correct answer.
- // First compute with $project, since we know all the fields in this document.
- let projectPipe = [{
- $project: {
- "2i": 1,
- "3i": 1,
- "6i^2": {"$multiply": ["$2i", "$3i"]},
- // _id is implicitly included.
- }
- }];
- let correct = coll.aggregate(projectPipe).toArray();
-
- // Then compute the same results using $addFields.
- let addFieldsPipe = [{
- $addFields: {
- "6i^2": {"$multiply": ["$2i", "$3i"]},
- // All other fields are implicitly included.
- }
- }];
- let addFieldsResult = coll.aggregate(addFieldsPipe).toArray();
-
- // Then assert they are the same.
- assert(arrayEq(addFieldsResult, correct),
- "$addFields does not work the same as a $project with computed and included fields");
+const nDocs = 10;
+for (let i = 0; i < nDocs; i++) {
+ assert.commandWorked(coll.insert({"_id": i, "2i": i * 2, "3i": i * 3}));
}
-// Test against the standalone started by resmoke.py.
-let conn = db.getMongo();
-doExecutionTest(conn);
-print("Success! Standalone execution use case test for $addFields passed.");
+// Add the minimum, maximum, and average temperatures, and make sure that doing the same
+// with addFields yields the correct answer.
+// First compute with $project, since we know all the fields in this document.
+let projectPipe = [{
+ $project: {
+ "2i": 1,
+ "3i": 1,
+ "6i^2": {"$multiply": ["$2i", "$3i"]},
+ // _id is implicitly included.
+ }
+}];
+let correct = coll.aggregate(projectPipe).toArray();
+
+// Then compute the same results using $addFields.
+let addFieldsPipe = [{
+ $addFields: {
+ "6i^2": {"$multiply": ["$2i", "$3i"]},
+ // All other fields are implicitly included.
+ }
+}];
+let addFieldsResult = coll.aggregate(addFieldsPipe).toArray();
-// Test against a sharded cluster.
-let st = new ShardingTest({shards: 2});
-doExecutionTest(st.s0);
-st.stop();
-print("Success! Sharding use case test for $addFields passed.");
+// Then assert they are the same.
+assert(arrayEq(addFieldsResult, correct),
+ "$addFields does not work the same as a $project with computed and included fields");
}());
diff --git a/jstests/aggregation/sources/addFields/weather.js b/jstests/aggregation/sources/addFields/weather.js
index feb57c9a8a6..6b440bf5f66 100644
--- a/jstests/aggregation/sources/addFields/weather.js
+++ b/jstests/aggregation/sources/addFields/weather.js
@@ -2,10 +2,6 @@
* $addFields can be used to add fixed and computed fields to documents while preserving the
* original document. Verify that using $addFields and adding computed fields in a $project yield
* the same result. Use the sample case of computing weather metadata.
- * @tags: [
- * requires_sharding,
- * requires_spawning_own_processes,
- * ]
*/
(function() {
@@ -14,11 +10,17 @@
// For arrayEq.
load("jstests/aggregation/extras/utils.js");
-const dbName = "test";
const collName = jsTest.name();
+const coll = db.getCollection(collName);
+coll.drop();
Random.setRandomSeed();
+const nDocs = 10;
+for (let i = 0; i < nDocs; i++) {
+ assert.commandWorked(coll.insert(generateRandomDocument()));
+}
+
/**
* Helper to generate an array of specified length of numbers in the specified range.
*/
@@ -49,56 +51,34 @@ function generateRandomDocument() {
};
}
-function doExecutionTest(conn) {
- const coll = conn.getDB(dbName).getCollection(collName);
- coll.drop();
-
- // Insert a bunch of documents of the form above.
- const nDocs = 10;
- for (let i = 0; i < nDocs; i++) {
- assert.commandWorked(coll.insert(generateRandomDocument()));
+// Add the minimum, maximum, and average temperatures, and make sure that doing the same
+// with addFields yields the correct answer.
+// First compute with $project, since we know all the fields in this document.
+let projectWeatherPipe = [{
+ $project: {
+ "month": 1,
+ "day": 1,
+ "temperatures": 1,
+ "minTemp": {"$min": "$temperatures"},
+ "maxTemp": {"$max": "$temperatures"},
+ "average": {"$avg": "$temperatures"},
+ // _id is implicitly included.
}
+}];
+let correctWeather = coll.aggregate(projectWeatherPipe).toArray();
- // Add the minimum, maximum, and average temperatures, and make sure that doing the same
- // with addFields yields the correct answer.
- // First compute with $project, since we know all the fields in this document.
- let projectWeatherPipe = [{
- $project: {
- "month": 1,
- "day": 1,
- "temperatures": 1,
- "minTemp": {"$min": "$temperatures"},
- "maxTemp": {"$max": "$temperatures"},
- "average": {"$avg": "$temperatures"},
- // _id is implicitly included.
- }
- }];
- let correctWeather = coll.aggregate(projectWeatherPipe).toArray();
-
- // Then compute the same results using $addFields.
- let addFieldsWeatherPipe = [{
- $addFields: {
- "minTemp": {"$min": "$temperatures"},
- "maxTemp": {"$max": "$temperatures"},
- "average": {"$avg": "$temperatures"},
- // All other fields are implicitly included.
- }
- }];
- let addFieldsResult = coll.aggregate(addFieldsWeatherPipe).toArray();
-
- // Then assert they are the same.
- assert(arrayEq(addFieldsResult, correctWeather),
- "$addFields does not work the same as a $project with computed and included fields");
-}
-
-// Test against the standalone started by resmoke.py.
-let conn = db.getMongo();
-doExecutionTest(conn);
-print("Success! Standalone execution weather test for $addFields passed.");
+// Then compute the same results using $addFields.
+let addFieldsWeatherPipe = [{
+ $addFields: {
+ "minTemp": {"$min": "$temperatures"},
+ "maxTemp": {"$max": "$temperatures"},
+ "average": {"$avg": "$temperatures"},
+ // All other fields are implicitly included.
+ }
+}];
+let addFieldsResult = coll.aggregate(addFieldsWeatherPipe).toArray();
-// Test against a sharded cluster.
-let st = new ShardingTest({shards: 2});
-doExecutionTest(st.s0);
-st.stop();
-print("Success! Sharding weather test for $addFields passed.");
+// Then assert they are the same.
+assert(arrayEq(addFieldsResult, correctWeather),
+ "$addFields does not work the same as a $project with computed and included fields");
}());
diff --git a/jstests/aggregation/sources/facet/use_cases.js b/jstests/aggregation/sources/facet/use_cases.js
index acf79f16f32..e1507d2c2c0 100644
--- a/jstests/aggregation/sources/facet/use_cases.js
+++ b/jstests/aggregation/sources/facet/use_cases.js
@@ -1,9 +1,5 @@
/**
* Tests some practical use cases of the $facet stage.
- * @tags: [
- * requires_sharding,
- * requires_spawning_own_processes,
- * ]
*/
(function() {
"use strict";
@@ -13,7 +9,6 @@ load("jstests/libs/discover_topology.js"); // For findData
const dbName = "test";
const collName = jsTest.name();
-const testNs = dbName + "." + collName;
Random.setRandomSeed();
@@ -52,8 +47,8 @@ function generateRandomDocument(docId) {
* Inserts 'nDocs' documents into collection given by 'dbName' and 'collName'. Documents will
* have _ids in the range [0, nDocs).
*/
-function populateData(conn, nDocs) {
- var coll = conn.getDB(dbName).getCollection(collName);
+function populateData(nDocs) {
+ var coll = db.getCollection(collName);
coll.remove({}); // Don't drop the collection, since it might be sharded.
var bulk = coll.initializeUnorderedBulkOp();
@@ -64,106 +59,51 @@ function populateData(conn, nDocs) {
assert.commandWorked(bulk.execute());
}
-function doExecutionTest(conn) {
- var coll = conn.getDB(dbName).getCollection(collName);
- //
- // Compute the most common manufacturers, and the number of TVs in each price range.
- //
-
- // First compute each separately, to make sure we have the correct results.
- const manufacturerPipe = [
- {$sortByCount: "$manufacturer"},
- // Sort by count and then by _id in case there are two manufacturers with an equal
- // count.
- {$sort: {count: -1, _id: 1}},
- ];
- const bucketedPricePipe = [
- {
- $bucket: {groupBy: "$price", boundaries: [0, 500, 1000, 1500, 2000], default: 2000},
- },
- {$sort: {count: -1}}
- ];
- const automaticallyBucketedPricePipe = [{$bucketAuto: {groupBy: "$price", buckets: 5}}];
-
- const mostCommonManufacturers = coll.aggregate(manufacturerPipe).toArray();
- const numTVsBucketedByPriceRange = coll.aggregate(bucketedPricePipe).toArray();
- const numTVsAutomaticallyBucketedByPriceRange =
- coll.aggregate(automaticallyBucketedPricePipe).toArray();
-
- const facetPipe = [{
- $facet: {
- manufacturers: manufacturerPipe,
- bucketedPrices: bucketedPricePipe,
- autoBucketedPrices: automaticallyBucketedPricePipe
- }
- }];
-
- // Then compute the results using $facet.
- const facetResult = coll.aggregate(facetPipe).toArray();
- assert.eq(facetResult.length, 1);
- const facetManufacturers = facetResult[0].manufacturers;
- const facetBucketedPrices = facetResult[0].bucketedPrices;
- const facetAutoBucketedPrices = facetResult[0].autoBucketedPrices;
-
- // Then assert they are the same.
- assert.eq(facetManufacturers, mostCommonManufacturers);
- assert.eq(facetBucketedPrices, numTVsBucketedByPriceRange);
- assert.eq(facetAutoBucketedPrices, numTVsAutomaticallyBucketedByPriceRange);
-}
-
-// Test against the standalone started by resmoke.py.
const nDocs = 1000 * 10;
-const conn = db.getMongo();
-populateData(conn, nDocs);
-doExecutionTest(conn);
-
-// Test against a sharded cluster.
-const st = new ShardingTest({shards: 2});
-populateData(st.s0, nDocs);
-doExecutionTest(st.s0);
-
-const shardedDBName = "sharded";
-const shardedCollName = "collection";
-const shardedColl = st.getDB(shardedDBName).getCollection(shardedCollName);
-const unshardedColl = st.getDB(shardedDBName).getCollection(collName);
-
-assert.commandWorked(st.admin.runCommand({enableSharding: shardedDBName}));
-assert.commandWorked(
- st.admin.runCommand({shardCollection: shardedColl.getFullName(), key: {_id: 1}}));
-
-// Test $lookup inside a $facet stage on a sharded collection.
-// Enable sharded $lookup.
-setParameterOnAllHosts(
- DiscoverTopology.findNonConfigNodes(st.s), "internalQueryAllowShardedLookup", true);
-assert.commandWorked(unshardedColl.runCommand({
- aggregate: unshardedColl.getName(),
- pipeline: [{
- $facet: {
- a: [{
- $lookup:
- {from: shardedCollName, localField: "_id", foreignField: "_id", as: "results"}
- }]
- }
- }],
- cursor: {}
-}));
-// Disable sharded $lookup.
-setParameterOnAllHosts(
- DiscoverTopology.findNonConfigNodes(st.s), "internalQueryAllowShardedLookup", false);
-
-// Then run the assertions against a sharded collection.
-assert.commandWorked(st.admin.runCommand({enableSharding: dbName}));
-assert.commandWorked(st.admin.runCommand({shardCollection: testNs, key: {_id: 1}}));
-
-// Make sure there is a chunk on each shard, so that our aggregations are targeted to multiple
-// shards.
-assert.commandWorked(st.admin.runCommand({split: testNs, middle: {_id: nDocs / 2}}));
-assert.commandWorked(
- st.admin.runCommand({moveChunk: testNs, find: {_id: 0}, to: st.shard0.shardName}));
-assert.commandWorked(
- st.admin.runCommand({moveChunk: testNs, find: {_id: nDocs - 1}, to: st.shard1.shardName}));
-
-doExecutionTest(st.s0);
-
-st.stop();
+populateData(nDocs);
+const coll = db.getCollection(collName);
+
+//
+// Compute the most common manufacturers, and the number of TVs in each price range.
+//
+
+// First compute each separately, to make sure we have the correct results.
+const manufacturerPipe = [
+ {$sortByCount: "$manufacturer"},
+ // Sort by count and then by _id in case there are two manufacturers with an equal
+ // count.
+ {$sort: {count: -1, _id: 1}},
+];
+const bucketedPricePipe = [
+ {
+ $bucket: {groupBy: "$price", boundaries: [0, 500, 1000, 1500, 2000], default: 2000},
+ },
+ {$sort: {count: -1}}
+];
+const automaticallyBucketedPricePipe = [{$bucketAuto: {groupBy: "$price", buckets: 5}}];
+
+const mostCommonManufacturers = coll.aggregate(manufacturerPipe).toArray();
+const numTVsBucketedByPriceRange = coll.aggregate(bucketedPricePipe).toArray();
+const numTVsAutomaticallyBucketedByPriceRange =
+ coll.aggregate(automaticallyBucketedPricePipe).toArray();
+
+const facetPipe = [{
+ $facet: {
+ manufacturers: manufacturerPipe,
+ bucketedPrices: bucketedPricePipe,
+ autoBucketedPrices: automaticallyBucketedPricePipe
+ }
+}];
+
+// Then compute the results using $facet.
+const facetResult = coll.aggregate(facetPipe).toArray();
+assert.eq(facetResult.length, 1);
+const facetManufacturers = facetResult[0].manufacturers;
+const facetBucketedPrices = facetResult[0].bucketedPrices;
+const facetAutoBucketedPrices = facetResult[0].autoBucketedPrices;
+
+// Then assert they are the same.
+assert.eq(facetManufacturers, mostCommonManufacturers);
+assert.eq(facetBucketedPrices, numTVsBucketedByPriceRange);
+assert.eq(facetAutoBucketedPrices, numTVsAutomaticallyBucketedByPriceRange);
}());
diff --git a/jstests/aggregation/sources/replaceRoot/address.js b/jstests/aggregation/sources/replaceRoot/address.js
index 051c693110f..224691477d5 100644
--- a/jstests/aggregation/sources/replaceRoot/address.js
+++ b/jstests/aggregation/sources/replaceRoot/address.js
@@ -1,9 +1,5 @@
/**
* $replaceRoot can be used to extract parts of a document; here we test a simple address case.
- * @tags: [
- * requires_sharding,
- * requires_spawning_own_processes,
- * ]
*/
(function() {
@@ -12,9 +8,6 @@
// For arrayEq.
load("jstests/aggregation/extras/utils.js");
-const dbName = "test";
-const collName = jsTest.name();
-
Random.setRandomSeed();
/**
@@ -51,54 +44,42 @@ function generateRandomDocument() {
};
}
-function doExecutionTest(conn) {
- const coll = conn.getDB(dbName).getCollection(collName);
- coll.drop();
-
- // Insert a bunch of documents of the form above.
- const nDocs = 10;
- let bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < nDocs; i++) {
- bulk.insert(generateRandomDocument());
- }
- assert.commandWorked(bulk.execute());
-
- // Extract the contents of the address field, and make sure that doing the same
- // with replaceRoot yields the correct answer.
- // First compute each separately, since we know all of the fields in the address,
- // to make sure we have the correct results.
- let addressPipe = [{
- $project: {
- "_id": 0,
- "number": "$address.number",
- "street": "$address.street",
- "city": "$address.city",
- "zip": "$address.zip"
- }
- }];
- let correctAddresses = coll.aggregate(addressPipe).toArray();
-
- // Then compute the same results using $replaceRoot.
- let replaceWithResult = coll.aggregate([
- {$replaceRoot: {newRoot: "$address"}},
- {$sort: {city: 1, zip: 1, street: 1, number: 1}}
- ])
- .toArray();
+const dbName = "test";
+const collName = jsTest.name();
+const coll = db.getCollection(collName);
+coll.drop();
- // Then assert they are the same.
- assert(
- arrayEq(replaceWithResult, correctAddresses),
- "$replaceRoot does not work the same as $project-ing the relevant fields to the top level");
+// Insert a bunch of documents of the form above.
+const nDocs = 10;
+let bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < nDocs; i++) {
+ bulk.insert(generateRandomDocument());
}
+assert.commandWorked(bulk.execute());
+
+// Extract the contents of the address field, and make sure that doing the same
+// with replaceRoot yields the correct answer.
+// First compute each separately, since we know all of the fields in the address,
+// to make sure we have the correct results.
+let addressPipe = [{
+ $project: {
+ "_id": 0,
+ "number": "$address.number",
+ "street": "$address.street",
+ "city": "$address.city",
+ "zip": "$address.zip"
+ }
+}];
+let correctAddresses = coll.aggregate(addressPipe).toArray();
-// Test against the standalone started by resmoke.py.
-let conn = db.getMongo();
-doExecutionTest(conn);
-print("Success! Standalone execution test for $replaceRoot passed.");
+// Then compute the same results using $replaceRoot.
+let replaceWithResult = coll.aggregate([
+ {$replaceRoot: {newRoot: "$address"}},
+ {$sort: {city: 1, zip: 1, street: 1, number: 1}}
+ ])
+ .toArray();
-// Test against a sharded cluster.
-let st = new ShardingTest({shards: 2});
-doExecutionTest(st.s0);
-st.stop();
-print("Success! Sharding test for $replaceRoot passed.");
+// Then assert they are the same.
+assert(arrayEq(replaceWithResult, correctAddresses),
+ "$replaceRoot does not work the same as $project-ing the relevant fields to the top level");
}());
diff --git a/jstests/aggregation/testSlave.js b/jstests/aggregation/testSlave.js
deleted file mode 100644
index 1b3d302be95..00000000000
--- a/jstests/aggregation/testSlave.js
+++ /dev/null
@@ -1,22 +0,0 @@
-// This test just make sure that aggregation is possible on a secondary node.
-// @tags: [
-// requires_replication,
-// requires_spawning_own_processes,
-// ]
-var replTest = new ReplSetTest({name: 'aggTestSlave', nodes: 2});
-var nodes = replTest.startSet();
-replTest.initiate();
-replTest.awaitReplication();
-
-var primary = replTest.getPrimary().getDB('test');
-var secondary = replTest.getSecondary().getDB('test');
-
-var options = {writeConcern: {w: 2}};
-primary.agg.insert({}, options);
-primary.agg.insert({}, options);
-primary.agg.insert({}, options);
-
-var res = secondary.agg.aggregate({$group: {_id: null, count: {$sum: 1}}});
-assert.eq(res.toArray(), [{_id: null, count: 3}]);
-
-replTest.stopSet();
diff --git a/jstests/aggregation/bugs/cursor_timeout.js b/jstests/noPassthrough/agg_cursor_timeout.js
index e9b80b0597f..ff5d7001374 100644
--- a/jstests/aggregation/bugs/cursor_timeout.js
+++ b/jstests/noPassthrough/agg_cursor_timeout.js
@@ -2,9 +2,6 @@
* Tests that an aggregation cursor is killed when it is timed out by the ClientCursorMonitor.
*
* This test was designed to reproduce SERVER-25585.
- * @tags: [
- * requires_spawning_own_processes,
- * ]
*/
(function() {
'use strict';
diff --git a/jstests/aggregation/bugs/lookup_unwind_getmore.js b/jstests/noPassthrough/lookup_unwind_getmore.js
index c184b5e56d6..8e86dba0b37 100644
--- a/jstests/aggregation/bugs/lookup_unwind_getmore.js
+++ b/jstests/noPassthrough/lookup_unwind_getmore.js
@@ -3,9 +3,6 @@
* changes as it unwinds the results.
*
* This test was designed to reproduce SERVER-22537.
- * @tags: [
- * requires_spawning_own_processes,
- * ]
*/
(function() {
'use strict';
diff --git a/jstests/aggregation/bugs/lookup_unwind_killcursor.js b/jstests/noPassthrough/lookup_unwind_killcursor.js
index 829f3b52089..0e3e547b920 100644
--- a/jstests/aggregation/bugs/lookup_unwind_killcursor.js
+++ b/jstests/noPassthrough/lookup_unwind_killcursor.js
@@ -3,9 +3,6 @@
* client for the aggregation pipeline is killed.
*
* This test was designed to reproduce SERVER-24386.
- * @tags: [
- * requires_spawning_own_processes,
- * ]
*/
(function() {
'use strict';
diff --git a/jstests/aggregation/sources/collStats/shard_host_info.js b/jstests/noPassthrough/shard_host_info.js
index ced3f9bb47f..6913d589deb 100644
--- a/jstests/aggregation/sources/collStats/shard_host_info.js
+++ b/jstests/noPassthrough/shard_host_info.js
@@ -3,18 +3,17 @@
* document when run via mongoS, and that the former is absent when run on a non-shard mongoD.
* @tags: [
* requires_sharding,
- * requires_spawning_own_processes,
* ]
*/
(function() {
"use strict";
-// Test mongoD behaviour using the standalone started by resmoke.py.
-let testDB = db.getSiblingDB(jsTestName());
+const conn = MongoRunner.runMongod();
+let testDB = conn.getDB(jsTestName());
let testColl = testDB.test;
// getHostName() doesn't include port, db.getMongo().host is 127.0.0.1:<port>
-const hostName = (getHostName() + ":" + db.getMongo().host.split(":")[1]);
+const hostName = (getHostName() + ":" + testDB.getMongo().host.split(":")[1]);
// Test that the shard field is absent and the host field is present when run on mongoD.
assert.eq(testColl
@@ -25,6 +24,8 @@ assert.eq(testColl
.toArray(),
[{_id: {host: hostName}}]);
+MongoRunner.stopMongod(conn);
+
// Test that both shard and hostname are present for $collStats results on a sharded cluster.
const st = new ShardingTest({name: jsTestName(), shards: 2});
diff --git a/jstests/aggregation/mongos_merge.js b/jstests/sharding/agg_mongos_merge.js
index e99dd77eb02..f36e81b6005 100644
--- a/jstests/aggregation/mongos_merge.js
+++ b/jstests/sharding/agg_mongos_merge.js
@@ -6,14 +6,8 @@
* - Non-splittable streaming stages, e.g. $match, $project, $unwind.
* - Blocking stages in cases where 'allowDiskUse' is false, e.g. $group, $bucketAuto.
*
- * Because wrapping these aggregations in a $facet stage will affect how the pipeline can be merged,
- * and will therefore invalidate the results of the test cases below, we tag this test to prevent it
- * running under the 'aggregation_facet_unwind' passthrough.
- *
* @tags: [
- * do_not_wrap_aggregations_in_facets,
* requires_sharding,
- * requires_spawning_own_processes,
* requires_profiling,
* ]
*/
@@ -66,7 +60,7 @@ assert.commandWorked(mongosDB.adminCommand(
{moveChunk: mongosColl.getFullName(), find: {_id: 150}, to: st.shard1.shardName}));
// Create a random geo co-ord generator for testing.
-var georng = new GeoNearRandomTest(mongosColl);
+var georng = new GeoNearRandomTest(mongosColl, mongosDB);
// Write 400 documents across the 4 chunks.
for (let i = -200; i < 200; i++) {
diff --git a/jstests/aggregation/mongos_slaveok.js b/jstests/sharding/agg_mongos_slaveok.js
index f1c235266fc..287902092bc 100644
--- a/jstests/aggregation/mongos_slaveok.js
+++ b/jstests/sharding/agg_mongos_slaveok.js
@@ -4,7 +4,6 @@
* @tags: [
* requires_replication,
* requires_sharding,
- * requires_spawning_own_processes,
* requires_profiling
* ]
*/
diff --git a/jstests/aggregation/shard_targeting.js b/jstests/sharding/agg_shard_targeting.js
index 7a913f43967..7a913f43967 100644
--- a/jstests/aggregation/shard_targeting.js
+++ b/jstests/sharding/agg_shard_targeting.js
diff --git a/jstests/aggregation/sources/merge/exchange_explain.js b/jstests/sharding/exchange_explain.js
index 23bed99973d..23bed99973d 100644
--- a/jstests/aggregation/sources/merge/exchange_explain.js
+++ b/jstests/sharding/exchange_explain.js
diff --git a/jstests/sharding/lookup.js b/jstests/sharding/lookup.js
index 82a8c63624b..e010c0996e5 100644
--- a/jstests/sharding/lookup.js
+++ b/jstests/sharding/lookup.js
@@ -641,6 +641,18 @@ err = assert.throws(() => sourceColl
{allowDiskUse: true})
.itcount());
assert.eq(err.code, 28769);
+err = assert.throws(
+ () => sourceColl
+ .aggregate(
+ [{$facet: {
+ a: {
+ $lookup:
+ {localField: "a", foreignField: "b", from: fromColl.getName(), as: "same"}
+ }
+ }}],
+ {allowDiskUse: true})
+ .itcount());
+assert.eq(err.code, 40170);
st.stop();
}());
diff --git a/jstests/aggregation/sources/merge/use_cases.js b/jstests/sharding/merge_use_cases.js
index 6c1c71b9419..6c1c71b9419 100644
--- a/jstests/aggregation/sources/merge/use_cases.js
+++ b/jstests/sharding/merge_use_cases.js
diff --git a/jstests/aggregation/pipeline_pass_through_from_mongos.js b/jstests/sharding/pipeline_pass_through_from_mongos.js
index 46805d74372..46805d74372 100644
--- a/jstests/aggregation/pipeline_pass_through_from_mongos.js
+++ b/jstests/sharding/pipeline_pass_through_from_mongos.js
diff --git a/jstests/aggregation/bugs/server37750.js b/jstests/sharding/server37750.js
index 902c427c292..902c427c292 100644
--- a/jstests/aggregation/bugs/server37750.js
+++ b/jstests/sharding/server37750.js
diff --git a/jstests/aggregation/bugs/server6179.js b/jstests/sharding/server6179.js
index 065f5b261ee..065f5b261ee 100644
--- a/jstests/aggregation/bugs/server6179.js
+++ b/jstests/sharding/server6179.js
diff --git a/jstests/aggregation/sharded_agg_cleanup_on_error.js b/jstests/sharding/sharded_agg_cleanup_on_error.js
index 1a57fffe018..1a57fffe018 100644
--- a/jstests/aggregation/sharded_agg_cleanup_on_error.js
+++ b/jstests/sharding/sharded_agg_cleanup_on_error.js