summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
authorEddie Louie <eddie.louie@mongodb.com>2017-10-17 15:47:56 -0400
committerEddie Louie <eddie.louie@mongodb.com>2017-10-18 02:21:38 -0400
commit553dff7f3b96b95883b406c7486a04877fbfb094 (patch)
treee6121ed4c3c269d4a0b58671e3ba60d72a7b4952 /jstests/sharding
parent046a5a01c1bc6eeb05852bed9981cbc457802a00 (diff)
downloadmongo-553dff7f3b96b95883b406c7486a04877fbfb094.tar.gz
SERVER-31579 Tag specific sharding tests as being resource intensive
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/auth.js11
-rw-r--r--jstests/sharding/auto_rebalance_parallel_replica_sets.js4
-rw-r--r--jstests/sharding/autosplit_heuristics.js12
-rw-r--r--jstests/sharding/bouncing_count.js8
-rw-r--r--jstests/sharding/bulk_shard_insert.js10
-rw-r--r--jstests/sharding/commands_that_write_accept_wc_configRS.js10
-rw-r--r--jstests/sharding/commands_that_write_accept_wc_shards.js8
-rw-r--r--jstests/sharding/delete_during_migrate.js12
-rw-r--r--jstests/sharding/large_chunk.js9
-rw-r--r--jstests/sharding/migrateBig_balancer.js6
-rw-r--r--jstests/sharding/move_chunk_wc.js7
-rw-r--r--jstests/sharding/read_pref.js4
-rw-r--r--jstests/sharding/read_pref_cmd.js6
-rw-r--r--jstests/sharding/recovering_slaveok.js11
-rw-r--r--jstests/sharding/remove2.js10
-rw-r--r--jstests/sharding/sharding_balance4.js8
-rw-r--r--jstests/sharding/sharding_migrate_cursor1.js9
-rw-r--r--jstests/sharding/sharding_rs1.js8
-rw-r--r--jstests/sharding/stale_mongos_updates_and_removes.js4
-rw-r--r--jstests/sharding/top_chunk_autosplit.js5
-rw-r--r--jstests/sharding/zbigMapReduce.js10
21 files changed, 142 insertions, 30 deletions
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index f598c43da8a..8a87be3e68f 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -1,5 +1,12 @@
-// Tests administrative sharding operations and map-reduce work or fail as expected, when key-based
-// authentication is used
+/**
+ * Tests administrative sharding operations and map-reduce work or fail as expected, when key-based
+ * authentication is used
+ *
+ * This test is labeled resource intensive because its total io_write is 30MB compared to a median
+ * of 5MB across all sharding tests in wiredTiger. Its total io_write is 630MB compared to a median
+ * of 135MB in mmapv1.
+ * @tags: [resource_intensive]
+ */
(function() {
'use strict';
load("jstests/replsets/rslib.js");
diff --git a/jstests/sharding/auto_rebalance_parallel_replica_sets.js b/jstests/sharding/auto_rebalance_parallel_replica_sets.js
index a5a6d1bde09..15215f3e38f 100644
--- a/jstests/sharding/auto_rebalance_parallel_replica_sets.js
+++ b/jstests/sharding/auto_rebalance_parallel_replica_sets.js
@@ -1,5 +1,9 @@
/**
* Tests that the cluster is balanced in parallel in one balancer round (replica sets).
+ *
+ * This test is labeled resource intensive because its total io_write is 900MB compared to a median
+ * of 135MB across all sharding tests in mmapv1.
+ * @tags: [resource_intensive]
*/
(function() {
'use strict';
diff --git a/jstests/sharding/autosplit_heuristics.js b/jstests/sharding/autosplit_heuristics.js
index 6b7f0e2b290..46daff22e57 100644
--- a/jstests/sharding/autosplit_heuristics.js
+++ b/jstests/sharding/autosplit_heuristics.js
@@ -1,7 +1,11 @@
-//
-// Tests autosplitting heuristics, and that the heuristic counting of chunk sizes
-// works as expected even after splitting.
-//
+/**
+ * Tests autosplitting heuristics, and that the heuristic counting of chunk sizes
+ * works as expected even after splitting.
+ *
+ * This test is labeled resource intensive because its total io_write is 53MB compared to a median
+ * of 5MB across all sharding tests in wiredTiger.
+ * @tags: [resource_intensive]
+ */
(function() {
'use strict';
diff --git a/jstests/sharding/bouncing_count.js b/jstests/sharding/bouncing_count.js
index d73190f4744..ec2451adcba 100644
--- a/jstests/sharding/bouncing_count.js
+++ b/jstests/sharding/bouncing_count.js
@@ -1,4 +1,10 @@
-// Tests whether new sharding is detected on insert by mongos
+/**
+ * Tests whether new sharding is detected on insert by mongos
+ *
+ * This test is labeled resource intensive because its total io_write is 650MB compared to a median
+ * of 135MB across all sharding tests in mmapv1.
+ * @tags: [resource_intensive]
+ */
(function() {
'use strict';
diff --git a/jstests/sharding/bulk_shard_insert.js b/jstests/sharding/bulk_shard_insert.js
index 6d36e5fda93..e8f0c2d6bf0 100644
--- a/jstests/sharding/bulk_shard_insert.js
+++ b/jstests/sharding/bulk_shard_insert.js
@@ -1,5 +1,11 @@
-// Test bulk inserts running alonside the auto-balancer. Ensures that they do not conflict with each
-// other.
+/**
+ * Test bulk inserts running alonside the auto-balancer. Ensures that they do not conflict with each
+ * other.
+ *
+ * This test is labeled resource intensive because its total io_write is 106MB compared to a median
+ * of 5MB across all sharding tests in wiredTiger.
+ * @tags: [resource_intensive]
+ */
(function() {
'use strict';
diff --git a/jstests/sharding/commands_that_write_accept_wc_configRS.js b/jstests/sharding/commands_that_write_accept_wc_configRS.js
index 89c273c77e1..ac4932f290b 100644
--- a/jstests/sharding/commands_that_write_accept_wc_configRS.js
+++ b/jstests/sharding/commands_that_write_accept_wc_configRS.js
@@ -1,6 +1,3 @@
-load('jstests/libs/write_concern_util.js');
-load('jstests/multiVersion/libs/auth_helpers.js');
-
/**
* This file tests that commands that do writes accept a write concern in a sharded cluster. This
* test defines various database commands and what they expect to be true before and after the fact.
@@ -9,7 +6,14 @@ load('jstests/multiVersion/libs/auth_helpers.js');
* replication between nodes to make sure the write concern is actually being waited for. This only
* tests commands that get sent to config servers and must have w: majority specified. If these
* commands fail, they should return an actual error, not just a writeConcernError.
+ *
+ * This test is labeled resource intensive because its total io_write is 70MB compared to a median
+ * of 5MB across all sharding tests in wiredTiger. Its total io_write is 1900MB compared to a median
+ * of 135MB in mmapv1.
+ * @tags: [resource_intensive]
*/
+load('jstests/libs/write_concern_util.js');
+load('jstests/multiVersion/libs/auth_helpers.js');
(function() {
"use strict";
diff --git a/jstests/sharding/commands_that_write_accept_wc_shards.js b/jstests/sharding/commands_that_write_accept_wc_shards.js
index 6e9b8c95b33..4ea13819064 100644
--- a/jstests/sharding/commands_that_write_accept_wc_shards.js
+++ b/jstests/sharding/commands_that_write_accept_wc_shards.js
@@ -1,12 +1,16 @@
-load('jstests/libs/write_concern_util.js');
-
/**
* This file tests that commands that do writes accept a write concern in a sharded cluster. This
* test defines various database commands and what they expect to be true before and after the fact.
* It then runs the commands with an invalid writeConcern and a valid writeConcern and
* ensures that they succeed and fail appropriately. This only tests functions that aren't run
* on config servers.
+ *
+ * This test is labeled resource intensive because its total io_write is 58MB compared to a median
+ * of 5MB across all sharding tests in wiredTiger. Its total io_write is 4200MB compared to a median
+ * of 135MB in mmapv1.
+ * @tags: [resource_intensive]
*/
+load('jstests/libs/write_concern_util.js');
(function() {
"use strict";
diff --git a/jstests/sharding/delete_during_migrate.js b/jstests/sharding/delete_during_migrate.js
index cabd936b7e1..2e7f6220d75 100644
--- a/jstests/sharding/delete_during_migrate.js
+++ b/jstests/sharding/delete_during_migrate.js
@@ -1,6 +1,12 @@
-// Test migrating a big chunk while deletions are happening within that chunk. Test is slightly
-// non-deterministic, since removes could happen before migrate starts. Protect against that by
-// making chunk very large.
+/**
+ * Test migrating a big chunk while deletions are happening within that chunk. Test is slightly
+ * non-deterministic, since removes could happen before migrate starts. Protect against that by
+ * making chunk very large.
+ *
+ * This test is labeled resource intensive because its total io_write is 88MB compared to a median
+ * of 5MB across all sharding tests in wiredTiger.
+ * @tags: [resource_intensive]
+ */
(function() {
'use strict';
diff --git a/jstests/sharding/large_chunk.js b/jstests/sharding/large_chunk.js
index 6fe2041004d..1f224fc211a 100644
--- a/jstests/sharding/large_chunk.js
+++ b/jstests/sharding/large_chunk.js
@@ -1,4 +1,11 @@
-// Where we test operations dealing with large chunks
+/**
+ * Where we test operations dealing with large chunks
+ *
+ * This test is labeled resource intensive because its total io_write is 220MB compared to a median
+ * of 5MB across all sharding tests in wiredTiger. Its total io_write is 1160MB compared to a median
+ * of 135MB in mmapv1.
+ * @tags: [resource_intensive]
+ */
(function() {
'use strict';
diff --git a/jstests/sharding/migrateBig_balancer.js b/jstests/sharding/migrateBig_balancer.js
index 03e98fa5493..ed4b792c915 100644
--- a/jstests/sharding/migrateBig_balancer.js
+++ b/jstests/sharding/migrateBig_balancer.js
@@ -1,3 +1,9 @@
+/**
+ * This test is labeled resource intensive because its total io_write is 95MB compared to a median
+ * of 5MB across all sharding tests in wiredTiger. Its total io_write is 1086MB compared to a median
+ * of 135MB in mmapv1.
+ * @tags: [resource_intensive]
+ */
(function() {
"use strict";
diff --git a/jstests/sharding/move_chunk_wc.js b/jstests/sharding/move_chunk_wc.js
index 85ae3400652..0cc8c163b6e 100644
--- a/jstests/sharding/move_chunk_wc.js
+++ b/jstests/sharding/move_chunk_wc.js
@@ -1,5 +1,3 @@
-load('jstests/libs/write_concern_util.js');
-
/**
* This commands tests that moveChunk gives a proper response when the writeConcern cannot be met.
* The test creates a sharded cluster with shards and config servers of different sizes to see how
@@ -8,7 +6,12 @@ load('jstests/libs/write_concern_util.js');
* It then passes a writeConcern too high for the to shard and sees that it fails. It then passes
* a writeConcern too high for the from shard and sees that that fails. moveChunk does not yield
* a writeConcernError. It should simply fail when the writeConcern is not met on the shards.
+ *
+ * This test is labeled resource intensive because its total io_write is 617MB compared to a median
+ * of 135MB across all sharding tests in mmapv1.
+ * @tags: [resource_intensive]
*/
+load('jstests/libs/write_concern_util.js');
(function() {
"use strict";
diff --git a/jstests/sharding/read_pref.js b/jstests/sharding/read_pref.js
index 2ca144099f0..0640c347871 100644
--- a/jstests/sharding/read_pref.js
+++ b/jstests/sharding/read_pref.js
@@ -1,6 +1,10 @@
/**
* Integration test for read preference and tagging. The more comprehensive unit test can be found
* in dbtests/replica_set_monitor_test.cpp.
+ *
+ * This test is labeled resource intensive because its total io_write is 706MB compared to a median
+ * of 135MB across all sharding tests in mmapv1.
+ * @tags: [resource_intensive]
*/
// Checking UUID consistency involves talking to a shard node, which in this test is shutdown
diff --git a/jstests/sharding/read_pref_cmd.js b/jstests/sharding/read_pref_cmd.js
index 10b8a883507..555615fb5df 100644
--- a/jstests/sharding/read_pref_cmd.js
+++ b/jstests/sharding/read_pref_cmd.js
@@ -1,3 +1,9 @@
+/**
+ * This test is labeled resource intensive because its total io_write is 47MB compared to a median
+ * of 5MB across all sharding tests in wiredTiger. Its total io_write is 1540MB compared to a median
+ * of 135MB in mmapv1.
+ * @tags: [resource_intensive]
+ */
load("jstests/replsets/rslib.js");
var NODE_COUNT = 2;
diff --git a/jstests/sharding/recovering_slaveok.js b/jstests/sharding/recovering_slaveok.js
index 082d4ad6a1f..8287cdd3e14 100644
--- a/jstests/sharding/recovering_slaveok.js
+++ b/jstests/sharding/recovering_slaveok.js
@@ -1,6 +1,11 @@
-// This tests that slaveOk'd queries in sharded setups get correctly routed when a slave goes into
-// RECOVERING state, and don't break
-
+/**
+ * This tests that slaveOk'd queries in sharded setups get correctly routed when a slave goes into
+ * RECOVERING state, and don't break
+ *
+ * This test is labeled resource intensive because its total io_write is 748MB compared to a median
+ * of 135MB across all sharding tests in mmapv1.
+ * @tags: [resource_intensive]
+ */
(function() {
'use strict';
load("jstests/replsets/rslib.js");
diff --git a/jstests/sharding/remove2.js b/jstests/sharding/remove2.js
index da609e50577..05f2b1e0368 100644
--- a/jstests/sharding/remove2.js
+++ b/jstests/sharding/remove2.js
@@ -1,5 +1,11 @@
-// Test that removing and re-adding shard works correctly.
-
+/**
+ * Test that removing and re-adding shard works correctly.
+ *
+ * This test is labeled resource intensive because its total io_write is 59MB compared to a median
+ * of 5MB across all sharding tests in wiredTiger. Its total io_write is 918MB compared to a median
+ * of 135MB in mmapv1.
+ * @tags: [resource_intensive]
+ */
load("jstests/replsets/rslib.js");
// The UUID consistency check uses connections to shards cached on the ShardingTest object, but this
diff --git a/jstests/sharding/sharding_balance4.js b/jstests/sharding/sharding_balance4.js
index 23905ffcb2d..e5c6e1056b5 100644
--- a/jstests/sharding/sharding_balance4.js
+++ b/jstests/sharding/sharding_balance4.js
@@ -1,4 +1,10 @@
-// Check that doing updates done during a migrate all go to the right place
+/**
+ * Check that doing updates done during a migrate all go to the right place
+ *
+ * This test is labeled resource intensive because its total io_write is 36MB compared to a median
+ * of 5MB across all sharding tests in wiredTiger.
+ * @tags: [resource_intensive]
+ */
(function() {
var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}});
diff --git a/jstests/sharding/sharding_migrate_cursor1.js b/jstests/sharding/sharding_migrate_cursor1.js
index 584181cdce2..71858ef833d 100644
--- a/jstests/sharding/sharding_migrate_cursor1.js
+++ b/jstests/sharding/sharding_migrate_cursor1.js
@@ -1,4 +1,11 @@
-// SERVER-2068
+/**
+ * SERVER-2068
+ *
+ * This test is labeled resource intensive because its total io_write is 131MB compared to a median
+ * of 5MB across all sharding tests in wiredTiger. Its total io_write is 1230MB compared to a median
+ * of 135MB in mmapv1.
+ * @tags: [resource_intensive]
+ */
(function() {
var chunkSize = 25;
diff --git a/jstests/sharding/sharding_rs1.js b/jstests/sharding/sharding_rs1.js
index d617666dd55..07af5b03862 100644
--- a/jstests/sharding/sharding_rs1.js
+++ b/jstests/sharding/sharding_rs1.js
@@ -1,4 +1,10 @@
-// tests sharding with replica sets
+/**
+ * tests sharding with replica sets
+ *
+ * This test is labeled resource intensive because its total io_write is 798MB compared to a median
+ * of 135MB across all sharding tests in mmapv1.
+ * @tags: [resource_intensive]
+ */
(function() {
'use strict';
diff --git a/jstests/sharding/stale_mongos_updates_and_removes.js b/jstests/sharding/stale_mongos_updates_and_removes.js
index bacea1f695a..a58d1656590 100644
--- a/jstests/sharding/stale_mongos_updates_and_removes.js
+++ b/jstests/sharding/stale_mongos_updates_and_removes.js
@@ -10,6 +10,10 @@
* versioned, because only one shard can be originally targeted for a point query on the shard key.
*
* All other multi updates and removes are sent to all shards and unversioned.
+ *
+ * This test is labeled resource intensive because its total io_write is 31MB compared to a median
+ * of 5MB across all sharding tests in wiredTiger.
+ * @tags: [resource_intensive]
*/
// Create a new sharded collection with numDocs documents, with two docs sharing each shard key
diff --git a/jstests/sharding/top_chunk_autosplit.js b/jstests/sharding/top_chunk_autosplit.js
index 834eb8c55b7..f3f7486d098 100644
--- a/jstests/sharding/top_chunk_autosplit.js
+++ b/jstests/sharding/top_chunk_autosplit.js
@@ -1,3 +1,8 @@
+/**
+ * This test is labeled resource intensive because its total io_write is 90MB compared to a median
+ * of 5MB across all sharding tests in wiredTiger.
+ * @tags: [resource_intensive]
+ */
function shardSetup(shardConfig, dbName, collName) {
var st = new ShardingTest(shardConfig);
var db = st.getDB(dbName);
diff --git a/jstests/sharding/zbigMapReduce.js b/jstests/sharding/zbigMapReduce.js
index 6c2c6cc56bf..cfa67f2d16f 100644
--- a/jstests/sharding/zbigMapReduce.js
+++ b/jstests/sharding/zbigMapReduce.js
@@ -1,5 +1,11 @@
-// This test is skipped on 32-bit platforms
-
+/**
+ * This test is skipped on 32-bit platforms
+ *
+ * This test is labeled resource intensive because its total io_write is 625MB compared to a median
+ * of 5MB across all sharding tests in wiredTiger. Its total io_write is 3387MB compared to a median
+ * of 135MB in mmapv1.
+ * @tags: [resource_intensive]
+ */
function setupTest() {
var s = new ShardingTest({
shards: 2,