summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPierlauro Sciarelli <pierlauro.sciarelli@mongodb.com>2022-02-15 10:34:50 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-02-15 11:17:53 +0000
commit8dccb4a975ef179ab64da323de73341710a4bded (patch)
treeb2c4c79093f8bb5e0cd2099a98ae5eaf6c280d69
parent5c8c37fa88f99a1ff1fb0df7d1fcaaa8990feeb3 (diff)
downloadmongo-8dccb4a975ef179ab64da323de73341710a4bded.tar.gz
SERVER-62553 FSM workload with chunk migrations for different collections
-rw-r--r--jstests/concurrency/fsm_workloads/random_moveChunk_multiple_collections.js104
1 files changed, 104 insertions, 0 deletions
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_multiple_collections.js b/jstests/concurrency/fsm_workloads/random_moveChunk_multiple_collections.js
new file mode 100644
index 00000000000..c9eb00ac7c7
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_multiple_collections.js
@@ -0,0 +1,104 @@
+'use strict';
+
+/**
+ * Perform continuous moveChunk on multiple collections/databases.
+ *
+ * @tags: [
+ * requires_sharding,
+ * assumes_balancer_off,
+ * does_not_support_add_remove_shards,
+ * ]
+ */
+load('jstests/concurrency/fsm_libs/extend_workload.js');
+load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js');
+
+const dbNames = ['db0', 'db1', 'db2'];
+const collNames = ['collA', 'collB', 'collC'];
+
+const withSkipRetryOnNetworkError = (fn) => {
+ const previousSkipRetryOnNetworkError = TestData.skipRetryOnNetworkError;
+ TestData.skipRetryOnNetworkError = true;
+
+ let res = undefined;
+ try {
+ res = fn();
+ } catch (e) {
+ throw e;
+ } finally {
+ TestData.skipRetryOnNetworkError = previousSkipRetryOnNetworkError;
+ }
+
+ return res;
+};
+
+const runWithManualRetriesIfInStepdownSuite = (fn) => {
+ if (TestData.runningWithShardStepdowns) {
+ var result = undefined;
+ assert.soonNoExcept(() => {
+ result = withSkipRetryOnNetworkError(fn);
+ return true;
+ });
+ return result;
+ } else {
+ return fn();
+ }
+};
+
+var $config = extendWorkload($config, function($config, $super) {
+ $config.threadCount = dbNames.length * collNames.length;
+ $config.iterations = 64;
+
+ // Number of documents per partition. (One chunk per partition and one partition per thread).
+ $config.data.partitionSize = 100;
+
+ $config.states.moveChunk = function moveChunk(db, collName, connCache) {
+ const dbName = dbNames[Random.randInt(dbNames.length)];
+ db = db.getSiblingDB(dbName);
+ collName = collNames[Random.randInt(collNames.length)];
+ $super.states.moveChunk.apply(this, [db, collName, connCache]);
+ };
+
+ $config.states.init = function init(db, collName, connCache) {
+ for (var i = 0; i < dbNames.length; i++) {
+ const dbName = dbNames[i];
+ db = db.getSiblingDB(dbName);
+ for (var j = 0; j < collNames.length; j++) {
+ collName = collNames[j];
+ if (TestData.runningWithShardStepdowns) {
+ fsm.forceRunningOutsideTransaction(this);
+ runWithManualRetriesIfInStepdownSuite(() => {
+ $super.states.init.apply(this, [db, collName, connCache]);
+ });
+ } else {
+ $super.states.init.apply(this, [db, collName, connCache]);
+ }
+ }
+ }
+ };
+
+ $config.transitions = {
+ init: {moveChunk: 1.0},
+ moveChunk: {moveChunk: 1.0},
+ };
+
+ $config.setup = function setup(db, collName, cluster) {
+ const shards = Object.keys(cluster.getSerializedCluster().shards);
+ const numShards = shards.length;
+ // Initialize `dbNames.length` databases
+ for (var i = 0; i < dbNames.length; i++) {
+ const dbName = dbNames[i];
+ db = db.getSiblingDB(dbName);
+ db.adminCommand({enablesharding: dbName, primaryShard: shards[i % numShards]});
+ // Initialize `collNames.length` sharded collections per db
+ for (var j = 0; j < collNames.length; j++) {
+ collName = collNames[j];
+ const ns = dbName + '.' + collName;
+ assertAlways.commandWorked(
+ db.adminCommand({shardCollection: ns, key: this.shardKey}));
+ $super.setup.apply(this, [db, collName, cluster]);
+ }
+ }
+ };
+
+ return $config;
+});