summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorPierlauro Sciarelli <pierlauro.sciarelli@mongodb.com>2022-06-09 07:49:49 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-06-09 08:55:50 +0000
commit9f7a67bbca67241ce555d4799da4f4136ba8f9e6 (patch)
tree52e272e107ca69d34fb1ed2795cb6371e001404a /jstests
parentdf4b732eefe0f7a5a58f6f23baa2b31b45e2a38c (diff)
downloadmongo-9f7a67bbca67241ce555d4799da4f4136ba8f9e6.tar.gz
SERVER-66754 Review tests disabled because expecting initial chunks split
Diffstat (limited to 'jstests')
-rw-r--r--jstests/sharding/shard_max_size.js (renamed from jstests/sharding/sharding_balance2.js)10
-rw-r--r--jstests/sharding/sharding_balance3.js84
-rw-r--r--jstests/sharding/sharding_migrate_cursor1.js98
3 files changed, 2 insertions, 190 deletions
diff --git a/jstests/sharding/sharding_balance2.js b/jstests/sharding/shard_max_size.js
index bfb57e1dec6..35d4fa70e22 100644
--- a/jstests/sharding/sharding_balance2.js
+++ b/jstests/sharding/shard_max_size.js
@@ -1,5 +1,7 @@
/**
* Test the maxSize setting for the addShard command.
+ *
+ * @tags: [does_not_support_stepdowns]
*/
(function() {
'use strict';
@@ -19,13 +21,6 @@ var s = new ShardingTest({
}
});
-// TODO SERVER-66754 review tests disabled because expecting initial chunks split
-if (FeatureFlagUtil.isEnabled(s.configRS.getPrimary().getDB('admin'), 'NoMoreAutoSplitter')) {
- jsTestLog("Skipping as featureFlagNoMoreAutoSplitter is enabled");
- s.stop();
- return;
-}
-
var db = s.getDB("test");
var names = s.getConnNames();
@@ -49,7 +44,6 @@ while (inserted < (40 * 1024 * 1024)) {
assert.commandWorked(bulk.execute());
assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
-assert.gt(findChunksUtil.countChunksForNs(s.config, "test.foo"), 10);
var getShardSize = function(conn) {
var listDatabases = conn.getDB('admin').runCommand({listDatabases: 1});
diff --git a/jstests/sharding/sharding_balance3.js b/jstests/sharding/sharding_balance3.js
deleted file mode 100644
index b13751bd537..00000000000
--- a/jstests/sharding/sharding_balance3.js
+++ /dev/null
@@ -1,84 +0,0 @@
-// Waits for the balancer to run once, then stops it and checks that it is no longer running.
-
-(function() {
-
-load("jstests/sharding/libs/find_chunks_util.js");
-load("jstests/libs/feature_flag_util.js");
-
-var s = new ShardingTest({
- name: "slow_sharding_balance3",
- shards: 2,
- mongos: 1,
- other: {
- chunkSize: 1,
- enableBalancer: true,
- shardOptions:
- {setParameter: {internalQueryMaxBlockingSortMemoryUsageBytes: 32 * 1024 * 1024}}
- }
-});
-
-// TODO SERVER-66754 review tests disabled because expecting initial chunks split
-if (FeatureFlagUtil.isEnabled(s.configRS.getPrimary().getDB('admin'), 'NoMoreAutoSplitter')) {
- jsTestLog("Skipping as featureFlagNoMoreAutoSplitter is enabled");
- s.stop();
- return;
-}
-
-s.adminCommand({enablesharding: "test"});
-s.ensurePrimaryShard('test', s.shard1.shardName);
-
-s.config.settings.find().forEach(printjson);
-
-db = s.getDB("test");
-
-bigString = "";
-while (bigString.length < 10000)
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-
-inserted = 0;
-num = 0;
-var bulk = db.foo.initializeUnorderedBulkOp();
-while (inserted < (40 * 1024 * 1024)) {
- bulk.insert({_id: num++, s: bigString});
- inserted += bigString.length;
-}
-assert.commandWorked(bulk.execute());
-
-s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
-assert.lt(20, findChunksUtil.countChunksForNs(s.config, "test.foo"), "setup2");
-
-function diff1() {
- var x = s.chunkCounts("foo");
- printjson(x);
- return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) -
- Math.min(x[s.shard0.shardName], x[s.shard1.shardName]);
-}
-
-assert.lt(10, diff1());
-
-// Wait for balancer to kick in.
-var initialDiff = diff1();
-assert.soon(function() {
- return diff1() != initialDiff;
-}, "Balancer did not kick in", 5 * 60 * 1000, 1000);
-
-print("* A");
-print("disabling the balancer");
-s.stopBalancer();
-s.config.settings.find().forEach(printjson);
-print("* B");
-
-print(diff1());
-
-var currDiff = diff1();
-var waitTime = 0;
-var startTime = Date.now();
-while (waitTime < (1000 * 60)) {
- // Wait for 60 seconds to ensure balancer did not run
- assert.eq(currDiff, diff1(), "balance with stopped flag should not have happened");
- sleep(5000);
- waitTime = Date.now() - startTime;
-}
-
-s.stop();
-})();
diff --git a/jstests/sharding/sharding_migrate_cursor1.js b/jstests/sharding/sharding_migrate_cursor1.js
deleted file mode 100644
index c0ef8d391fd..00000000000
--- a/jstests/sharding/sharding_migrate_cursor1.js
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * SERVER-2068
- *
- * This test is labeled resource intensive because its total io_write is 131MB compared to a median
- * of 5MB across all sharding tests in wiredTiger.
- * @tags: [resource_intensive]
- */
-(function() {
-
-load("jstests/libs/feature_flag_util.js");
-
-var chunkSize = 25;
-
-var s = new ShardingTest(
- {name: "migrate_cursor1", shards: 2, mongos: 1, other: {chunkSize: chunkSize}});
-// TODO SERVER-66754 review tests disabled because expecting initial chunks split
-if (FeatureFlagUtil.isEnabled(s.configRS.getPrimary().getDB('admin'), 'NoMoreAutoSplitter')) {
- jsTestLog("Skipping as featureFlagNoMoreAutoSplitter is enabled");
- s.stop();
- return;
-}
-
-s.adminCommand({enablesharding: "test"});
-db = s.getDB("test");
-s.ensurePrimaryShard('test', s.shard1.shardName);
-t = db.foo;
-
-bigString = "";
-stringSize = 1024;
-
-while (bigString.length < stringSize)
- bigString += "asdasdas";
-
-stringSize = bigString.length;
-docsPerChunk = Math.ceil((chunkSize * 1024 * 1024) / (stringSize - 12));
-numChunks = 5;
-numDocs = 20 * docsPerChunk;
-
-print("stringSize: " + stringSize + " docsPerChunk: " + docsPerChunk + " numDocs: " + numDocs);
-
-var bulk = t.initializeUnorderedBulkOp();
-for (var i = 0; i < numDocs; i++) {
- bulk.insert({_id: i, s: bigString});
-}
-assert.commandWorked(bulk.execute());
-
-s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
-
-assert.lt(numChunks, s.config.chunks.find().count(), "initial 1");
-
-primary = s.getPrimaryShard("test").getDB("test").foo;
-secondaryName = s.getOther(primary.name);
-secondary = secondaryName.getDB("test").foo;
-
-assert.eq(numDocs, primary.count(), "initial 2");
-assert.eq(0, secondary.count(), "initial 3");
-assert.eq(numDocs, t.count(), "initial 4");
-
-x = primary.find({_id: {$lt: 500}}).batchSize(2);
-x.next(); // 1. Create an open cursor
-
-print("start moving chunks...");
-
-// 2. Move chunk from s0 to s1 without waiting for deletion.
-// Command returns, but the deletion on s0 will block due to the open cursor.
-s.adminCommand({moveChunk: "test.foo", find: {_id: 0}, to: secondaryName.name});
-
-// 3. Start second moveChunk command from s0 to s1.
-// This moveChunk should not observe the above deletion as a 'mod', transfer it to s1 and cause
-// deletion on s1.
-// This moveChunk will wait for deletion.
-join = startParallelShell(
- "db.x.insert( {x:1} ); db.adminCommand( { moveChunk : 'test.foo' , find : { _id : " +
- docsPerChunk * 3 + " } , to : '" + secondaryName.name + "', _waitForDelete: true } )");
-assert.soon(function() {
- return db.x.count() > 0;
-}, "XXX", 30000, 1);
-
-// 4. Close the cursor to enable chunk deletion.
-print("itcount: " + x.itcount());
-
-x = null;
-for (i = 0; i < 5; i++)
- gc();
-
-print("cursor should be gone");
-
-// 5. Waiting for the second moveChunk to finish its deletion.
-// Note the deletion for the first moveChunk may not be finished.
-join();
-
-// assert.soon( function(){ return numDocs == t.count(); } , "at end 1" )
-// 6. Check the total number of docs on both shards to make sure no doc is lost.
-// Use itcount() to ignore orphan docments.
-assert.eq(numDocs, t.find().itcount(), "at end 2");
-
-s.stop();
-})();