summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSpencer T Brody <spencer@mongodb.com>2016-02-02 19:37:01 -0500
committerSpencer T Brody <spencer@mongodb.com>2016-02-04 14:57:52 -0500
commita818421d4f60b61ef81830af396deb1a3bb998de (patch)
treec5fe88919404bb93c00cbdcd547f20f0af1d9910
parentd24254aa15beeb2a93b696b36e40d4def40d4f3f (diff)
downloadmongo-a818421d4f60b61ef81830af396deb1a3bb998de.tar.gz
SERVER-22297 Add framework for writing tests that perform CSRS upgrade, convert existing tests to use it
-rw-r--r--jstests/libs/csrs_upgrade_util.js240
-rw-r--r--jstests/sharding/csrs_upgrade.js191
-rw-r--r--jstests/sharding/csrs_upgrade_during_migrate.js201
3 files changed, 317 insertions, 315 deletions
diff --git a/jstests/libs/csrs_upgrade_util.js b/jstests/libs/csrs_upgrade_util.js
new file mode 100644
index 00000000000..106a9847088
--- /dev/null
+++ b/jstests/libs/csrs_upgrade_util.js
@@ -0,0 +1,240 @@
+/**
+* This file defines a class, CSRSUpgradeCoordinator, which contains logic for spinning up a
+* sharded cluster using SCCC config servers and for upgrading that cluster to CSRS.
+* Include this file and use the CSRSUpgradeCoordinator class in any targetted jstests of csrs
+* upgrade behavior.
+*/
+
+load("jstests/replsets/rslib.js");
+
+var CSRSUpgradeCoordinator = function() {
+"use strict";
+
+var testDBName = jsTestName();
+var dataCollectionName = testDBName + ".data";
+var csrsName = jsTestName() + "-csrs";
+var numCsrsMembers;
+var st;
+var shardConfigs;
+var csrsConfig;
+var csrs;
+var csrs0Opts;
+
+this.getTestDBName = function() {
+ return testDBName;
+};
+
+this.getDataCollectionName = function() {
+ return dataCollectionName;
+};
+
+/**
+ * Returns an array of connections to the CSRS nodes.
+ */
+this.getCSRSNodes = function() {
+ return csrs;
+};
+
+/**
+ * Returns the replica set name of the config server replica set.
+ */
+this.getCSRSName = function() {
+ return csrsName;
+};
+
+/**
+ * Returns a copy of the options used for starting a mongos in the coordinator's cluster.
+ */
+this.getMongosConfig = function() {
+ var sconfig = Object.extend({}, st.s0.fullOptions, /* deep */ true);
+ delete sconfig.port;
+ return sconfig;
+};
+
+this.getMongos = function(n) {
+ return st._mongos[n];
+};
+
+this.getShardName = function(n) {
+ return shardConfigs[n]._id;
+};
+
+/**
+ * Returns the ShardingTest fixture backing this CSRSUpgradeCoordinator.
+ */
+this.getShardingTestFixture = function() {
+ return st;
+};
+
+/**
+ * Private helper method for waiting for a given node to return ismaster:true in its ismaster
+ * command response.
+ */
+var _waitUntilMaster = function (dnode) {
+ var isMasterReply;
+ assert.soon(function () {
+ isMasterReply = dnode.adminCommand({ismaster: 1});
+ return isMasterReply.ismaster;
+ }, function () {
+ return "Expected " + dnode.name + " to respond ismaster:true, but got " +
+ tojson(isMasterReply);
+ });
+};
+
+/**
+* Sets up the underlying sharded cluster in SCCC mode, and shards the test collection on _id.
+*/
+this.setupSCCCCluster = function() {
+ if (TestData.storageEngine == "wiredTiger" || TestData.storageEngine == "") {
+ // TODO(schwerin): SERVER-19739 Support testing CSRS with storage engines other than wired
+ // tiger, when such other storage engines support majority read concern.
+ numCsrsMembers = 3;
+ } else {
+ numCsrsMembers = 4;
+ }
+
+ jsTest.log("Setting up SCCC sharded cluster");
+
+ st = new ShardingTest({name: "csrsUpgrade",
+ mongos: 2,
+ rs: { nodes: 3 },
+ shards: 2,
+ nopreallocj: true,
+ other: {
+ sync: true,
+ enableBalancer: false,
+ useHostname: true
+ }});
+
+ shardConfigs = st.s0.getCollection("config.shards").find().toArray();
+ assert.eq(2, shardConfigs.length);
+
+ jsTest.log("Enabling sharding on " + testDBName + " and making " + this.getShardName(0) +
+ " the primary shard");
+ assert.commandWorked(st.s0.adminCommand({enablesharding: testDBName}));
+ st.ensurePrimaryShard(testDBName, this.getShardName(0));
+
+ jsTest.log("Creating a sharded collection " + dataCollectionName);
+ assert.commandWorked(st.s0.adminCommand({shardcollection: dataCollectionName,
+ key: { _id: 1 }
+ }));
+};
+
+/**
+ * Restarts the first config server as a single node replica set, while still leaving the cluster
+ * operating in SCCC mode.
+ */
+this.restartFirstConfigAsReplSet = function() {
+ jsTest.log("Restarting " + st.c0.name + " as a standalone replica set");
+ csrsConfig = {
+ _id: csrsName,
+ version: 1,
+ configsvr: true,
+ members: [ { _id: 0, host: st.c0.name }]
+ };
+ assert.commandWorked(st.c0.adminCommand({replSetInitiate: csrsConfig}));
+ csrs = [];
+ csrs0Opts = Object.extend({}, st.c0.fullOptions, /* deep */ true);
+ csrs0Opts.restart = true; // Don't clean the data files from the old c0.
+ csrs0Opts.replSet = csrsName;
+ csrs0Opts.configsvrMode = "sccc";
+ MongoRunner.stopMongod(st.c0);
+ csrs.push(MongoRunner.runMongod(csrs0Opts));
+ _waitUntilMaster(csrs[0]);
+};
+
+/**
+ * Starts up the new members of the config server replica set as non-voting, priority zero nodes.
+ */
+this.startNewCSRSNodes = function() {
+ jsTest.log("Starting new CSRS nodes");
+ for (var i = 1; i < numCsrsMembers; ++i) {
+ csrs.push(MongoRunner.runMongod({replSet: csrsName,
+ configsvr: "",
+ storageEngine: "wiredTiger"
+ }));
+ csrsConfig.members.push({ _id: i, host: csrs[i].name, votes: 0, priority: 0 });
+ }
+ csrsConfig.version = 2;
+ jsTest.log("Adding non-voting members to csrs set: " + tojson(csrsConfig));
+ assert.commandWorked(csrs[0].adminCommand({replSetReconfig: csrsConfig}));
+};
+
+this.waitUntilConfigsCaughtUp = function() {
+ waitUntilAllNodesCaughtUp(csrs, 60000);
+};
+
+/**
+ * Stops one of the SCCC config servers, thus disabling changes to cluster metadata and preventing
+ * any further writes to the config servers until the upgrade to CSRS is completed.
+ */
+this.shutdownOneSCCCNode = function() {
+ // Only shut down one of the SCCC config servers to avoid any period without any config servers
+ // online.
+ jsTest.log("Shutting down third SCCC config server node");
+ MongoRunner.stopMongod(st.c2);
+};
+
+/**
+ * Allows all CSRS members to vote, in preparation for switching fully to CSRS mode.
+ */
+this.allowAllCSRSNodesToVote = function() {
+ csrsConfig.members.forEach(function (member) { member.votes = 1; member.priority = 1;});
+ csrsConfig.version = 3;
+ jsTest.log("Allowing all csrs members to vote: " + tojson(csrsConfig));
+ assert.commandWorked(csrs[0].adminCommand({replSetReconfig: csrsConfig}));
+};
+
+/**
+ * Restarts the first member of the config server replica set without the --configsvrMode flag,
+ * marking the official switchover from SCCC to CSRS mode. If the first config server doesn't
+ * support readCommitted, waits for it to automatically go into the REMOVED state. Finally,
+ * it shuts down the one remaining SCCC config server node now that it is no longer needed.
+ */
+this.switchToCSRSMode = function() {
+ jsTest.log("Restarting " + csrs[0].name + " in csrs mode");
+ delete csrs0Opts.configsvrMode;
+ try {
+ csrs[0].adminCommand({replSetStepDown: 60});
+ } catch (e) {} // Expected
+ MongoRunner.stopMongod(csrs[0]);
+ csrs[0] = MongoRunner.runMongod(csrs0Opts);
+ var csrsStatus;
+ assert.soon(function () {
+ csrsStatus = csrs[0].adminCommand({replSetGetStatus: 1});
+ if (csrsStatus.members[0].stateStr == "STARTUP" ||
+ csrsStatus.members[0].stateStr == "STARTUP2" ||
+ csrsStatus.members[0].stateStr == "RECOVERING") {
+ // Make sure first node is fully online or else mongoses still in SCCC mode might not
+ // find any node online to talk to.
+ return false;
+ }
+
+ var i;
+ for (i = 0; i < csrsStatus.members.length; ++i) {
+ if (csrsStatus.members[i].name == csrs[0].name) {
+ var supportsCommitted =
+ csrs[0].getDB("admin").serverStatus().storageEngine.supportsCommittedReads;
+ var stateIsRemoved = csrsStatus.members[i].stateStr == "REMOVED";
+ // If the storage engine supports committed reads, it shouldn't go into REMOVED
+ // state, but if it does not then it should.
+ if (supportsCommitted) {
+ assert(!stateIsRemoved);
+ } else if (!stateIsRemoved) {
+ return false;
+ }
+ }
+ if (csrsStatus.members[i].stateStr == "PRIMARY") {
+ return csrs[i].adminCommand({ismaster: 1}).ismaster;
+ }
+ }
+ return false;
+ }, function() {
+ return "No primary or non-WT engine not removed in " + tojson(csrsStatus);
+ });
+
+ jsTest.log("Shutting down final SCCC config server now that upgrade is complete");
+ MongoRunner.stopMongod(st.c1);
+};
+
+}; \ No newline at end of file
diff --git a/jstests/sharding/csrs_upgrade.js b/jstests/sharding/csrs_upgrade.js
index 31fbe5a1952..09f8db67258 100644
--- a/jstests/sharding/csrs_upgrade.js
+++ b/jstests/sharding/csrs_upgrade.js
@@ -18,23 +18,13 @@
* @tags: [requires_persistence]
*/
-load("jstests/replsets/rslib.js");
+load("jstests/libs/csrs_upgrade_util.js");
var st;
(function() {
"use strict";
- var testDBName = jsTestName();
- var dataCollectionName = testDBName + ".data";
- var csrsName = jsTestName() + "-csrs";
- var numCsrsMembers;
- if (TestData.storageEngine == "wiredTiger" || TestData.storageEngine == "") {
- // TODO(schwerin): SERVER-19739 Support testing CSRS with storage engines other than wired
- // tiger, when such other storage engines support majority read concern.
- numCsrsMembers = 3;
- } else {
- numCsrsMembers = 4;
- }
+ var coordinator = new CSRSUpgradeCoordinator();
var nextSplit = 0;
@@ -44,7 +34,8 @@ var st;
var runNextSplit = function (snode) {
var splitPoint = nextSplit;
nextSplit += 10;
- return snode.adminCommand({split: dataCollectionName, middle: { _id: splitPoint }});
+ return snode.adminCommand({split: coordinator.getDataCollectionName(),
+ middle: { _id: splitPoint }});
};
/**
@@ -60,24 +51,13 @@ var st;
assert.commandWorked(runNextSplit(snode));
// Check that basic crud ops work.
- var dataColl = snode.getCollection(dataCollectionName);
+ var dataColl = snode.getCollection(coordinator.getDataCollectionName());
assert.eq(40, dataColl.find().itcount());
assert.writeOK(dataColl.insert({_id: 100, x: 1}));
assert.writeOK(dataColl.update({_id: 100}, {$inc: {x: 1}}));
assert.writeOK(dataColl.remove({x:2}));
};
- var waitUntilMaster = function (dnode) {
- var isMasterReply;
- assert.soon(function () {
- isMasterReply = dnode.adminCommand({ismaster: 1});
- return isMasterReply.ismaster;
- }, function () {
- return "Expected " + dnode.name + " to respond ismaster:true, but got " +
- tojson(isMasterReply);
- });
- };
-
/**
* Runs a config.version read, then splits the data collection and expects the read to succed
* and the split to fail.
@@ -88,43 +68,17 @@ var st;
assert.commandFailed(runNextSplit(snode));
};
- jsTest.log("Setting up SCCC sharded cluster")
- st = new ShardingTest({
- name: "csrsUpgrade",
- mongos: 2,
- rs: { nodes: 3 },
- shards: 2,
- nopreallocj: true,
- other: {
- sync: true,
- enableBalancer: false,
- useHostname: true
- }
- });
-
- var shardConfigs = st.s0.getCollection("config.shards").find().toArray();
- assert.eq(2, shardConfigs.length);
- var shard0Name = shardConfigs[0]._id;
-
- jsTest.log("Enabling sharding on " + testDBName + " and making " + shard0Name +
- " the primary shard");
- assert.commandWorked(st.s0.adminCommand({enablesharding: testDBName}));
- st.ensurePrimaryShard(testDBName, shard0Name);
+ coordinator.setupSCCCCluster();
- jsTest.log("Creating a sharded collection " + dataCollectionName);
- assert.commandWorked(st.s0.adminCommand({
- shardcollection: dataCollectionName,
- key: { _id: 1 }
- }));
- assert.commandWorked(runNextSplit(st.s0));
- assert.commandWorked(st.s0.adminCommand({
- moveChunk: dataCollectionName,
+ assert.commandWorked(runNextSplit(coordinator.getMongos(0)));
+ assert.commandWorked(coordinator.getMongos(0).adminCommand({
+ moveChunk: coordinator.getDataCollectionName(),
find: { _id: 0 },
- to: shardConfigs[1]._id
+ to: coordinator.getShardName(1)
}));
- jsTest.log("Inserting data into " + dataCollectionName);
- st.s1.getCollection(dataCollectionName).insert(
+ jsTest.log("Inserting data into " + coordinator.getDataCollectionName());
+ coordinator.getMongos(1).getCollection(coordinator.getDataCollectionName()).insert(
(function () {
var result = [];
var i;
@@ -134,107 +88,38 @@ var st;
return result;
}()));
- jsTest.log("Restarting " + st.c0.name + " as a standalone replica set");
- var csrsConfig = {
- _id: csrsName,
- version: 1,
- configsvr: true,
- members: [ { _id: 0, host: st.c0.name }]
- };
- assert.commandWorked(st.c0.adminCommand({replSetInitiate: csrsConfig}));
- var csrs = [];
- var csrs0Opts = Object.extend({}, st.c0.fullOptions, /* deep */ true);
- csrs0Opts.restart = true; // Don't clean the data files from the old c0.
- csrs0Opts.replSet = csrsName;
- csrs0Opts.configsvrMode = "sccc";
- MongoRunner.stopMongod(st.c0);
- csrs.push(MongoRunner.runMongod(csrs0Opts));
- waitUntilMaster(csrs[0]);
-
- assertOpsWork(st.s0, "using SCCC protocol when first config server is a 1-node replica set");
-
- jsTest.log("Starting new CSRS nodes");
- for (var i = 1; i < numCsrsMembers; ++i) {
- csrs.push(MongoRunner.runMongod({
- replSet: csrsName,
- configsvr: "",
- storageEngine: "wiredTiger"
- }));
- csrsConfig.members.push({ _id: i, host: csrs[i].name, votes: 0, priority: 0 });
- }
- csrsConfig.version = 2;
- jsTest.log("Adding non-voting members to csrs set: " + tojson(csrsConfig));
- assert.commandWorked(csrs[0].adminCommand({replSetReconfig: csrsConfig}));
+ coordinator.restartFirstConfigAsReplSet();
+
+ assertOpsWork(coordinator.getMongos(0),
+ "using SCCC protocol when first config server is a 1-node replica set");
+
+ coordinator.startNewCSRSNodes();
jsTest.log("Splitting a chunk to confirm that the SCCC protocol works w/ 1 rs " +
"node with secondaries");
- assertOpsWork(st.s0, "using SCCC protocol when first config server is primary of " +
- csrs.length + "-node replica set");
-
- waitUntilAllNodesCaughtUp(csrs);
-
- jsTest.log("Shutting down second and third SCCC config server nodes");
- MongoRunner.stopMongod(st.c1);
- MongoRunner.stopMongod(st.c2);
-
- assertCannotSplit(st.s0, "with two SCCC nodes down");
-
- csrsConfig.members.forEach(function (member) { member.votes = 1; member.priority = 1});
- csrsConfig.version = 3;
- jsTest.log("Allowing all csrs members to vote: " + tojson(csrsConfig));
- assert.commandWorked(csrs[0].adminCommand({replSetReconfig: csrsConfig}));
-
- assertCannotSplit(st.s0, "with two SCCC nodes down, even though CSRS is almost ready");
-
- jsTest.log("Restarting " + csrs[0].name + " in csrs mode");
- delete csrs0Opts.configsvrMode;
- try {
- csrs[0].adminCommand({replSetStepDown: 60});
- } catch (e) {} // Expected
- MongoRunner.stopMongod(csrs[0]);
- csrs[0] = MongoRunner.runMongod(csrs0Opts);
- var csrsStatus;
- assert.soon(function () {
- csrsStatus = csrs[0].adminCommand({replSetGetStatus: 1});
- if (csrsStatus.members[0].stateStr == "STARTUP" ||
- csrsStatus.members[0].stateStr == "STARTUP2" ||
- csrsStatus.members[0].stateStr == "RECOVERING") {
- // Make sure first node is fully online or else mongoses still in SCCC mode might not
- // find any node online to talk to.
- return false;
- }
+ assertOpsWork(coordinator.getMongos(0),
+ "using SCCC protocol when first config server is primary of " +
+ coordinator.getCSRSNodes().length + "-node replica set");
- var i;
- for (i = 0; i < csrsStatus.members.length; ++i) {
- if (csrsStatus.members[i].name == csrs[0].name) {
- var supportsCommitted =
- csrs[0].getDB("admin").serverStatus().storageEngine.supportsCommittedReads;
- var stateIsRemoved = csrsStatus.members[i].stateStr == "REMOVED";
- // If the storage engine supports committed reads, it shouldn't go into REMOVED
- // state, but if it does not then it should.
- if (supportsCommitted) {
- assert(!stateIsRemoved);
- } else if (!stateIsRemoved) {
- return false;
- }
- }
- if (csrsStatus.members[i].stateStr == "PRIMARY") {
- return csrs[i].adminCommand({ismaster: 1}).ismaster;
- }
- }
- return false;
- }, function() {
- return "No primary or non-WT engine not removed in " + tojson(csrsStatus);
- });
-
- var sconfig = Object.extend({}, st.s0.fullOptions, /* deep */ true);
- delete sconfig.port;
- sconfig.configdb = csrsName + "/" + csrs[0].name;
+ coordinator.waitUntilConfigsCaughtUp();
+ coordinator.shutdownOneSCCCNode();
+
+ assertCannotSplit(coordinator.getMongos(0), "with one SCCC node down");
+
+ coordinator.allowAllCSRSNodesToVote();
+
+ assertCannotSplit(coordinator.getMongos(0),
+ "with one SCCC node down, even though CSRS is almost ready");
+
+ coordinator.switchToCSRSMode();
+
+ var sconfig = coordinator.getMongosConfig();
+ sconfig.configdb = coordinator.getCSRSName() + "/" + coordinator.getCSRSNodes()[0].name;
assertOpsWork(MongoRunner.runMongos(sconfig),
"when mongos started with --configdb=" + sconfig.configdb);
- sconfig.configdb = st.s0.fullOptions.configdb;
+ sconfig = coordinator.getMongosConfig();
assertOpsWork(MongoRunner.runMongos(sconfig),
"when mongos started with --configdb=" + sconfig.configdb);
- assertOpsWork(st.s0, "on mongos that drove the upgrade");
- assertOpsWork(st.s1, "on mongos that was previously unaware of the upgrade");
+ assertOpsWork(coordinator.getMongos(0), "on mongos that drove the upgrade");
+ assertOpsWork(coordinator.getMongos(1), "on mongos that was previously unaware of the upgrade");
}());
diff --git a/jstests/sharding/csrs_upgrade_during_migrate.js b/jstests/sharding/csrs_upgrade_during_migrate.js
index ef903285305..c1fac8eeff6 100644
--- a/jstests/sharding/csrs_upgrade_during_migrate.js
+++ b/jstests/sharding/csrs_upgrade_during_migrate.js
@@ -8,82 +8,35 @@
* @tags: [requires_persistence]
*/
load("jstests/replsets/rslib.js");
+load("jstests/libs/csrs_upgrade_util.js");
var st;
(function() {
"use strict";
- var testDBName = "csrs_upgrade_during_migrate";
- var dataCollectionName = testDBName + ".data";
- var csrsName = jsTestName() + "-csrs";
- var numCsrsMembers;
- if (TestData.storageEngine == "wiredTiger" || TestData.storageEngine == "") {
- // TODO(schwerin): SERVER-19739 Support testing CSRS with storage engines other than wired
- // tiger, when such other storage engines support majority read concern.
- numCsrsMembers = 3;
- } else {
- numCsrsMembers = 4;
- }
-
- var waitUntilMaster = function (dnode) {
- var isMasterReply;
- assert.soon(function () {
- isMasterReply = dnode.adminCommand({ismaster: 1});
- return isMasterReply.ismaster;
- }, function () {
- return "Expected " + dnode.name + " to respond ismaster:true, but got " +
- tojson(isMasterReply);
- });
- };
-
/*
- * If 'delayed' is true adds a 30 second slave delay to the secondary of the set referred to by
- * 'rst'. If 'delayed' is false, removes slave delay from the secondary.
+ * If 'delayed' is true adds a 30 second slave delay to the secondaries of the set referred to
+ * by 'rst'. If 'delayed' is false, removes slave delay from the secondaries.
*/
var setSlaveDelay = function(rst, delayed) {
var conf = rst.getPrimary().getDB('local').system.replset.findOne();
conf.version++;
- var secondaryIndex = 0;
- if (conf.members[secondaryIndex].host === rst.getPrimary().host) {
- secondaryIndex = 1;
+ for (var i = 0; i < conf.members.length; i++) {
+ if (conf.members[i].host === rst.getPrimary().host) {
+ continue;
+ }
+ conf.members[i].priority = 0;
+ conf.members[i].hidden = true;
+ conf.members[i].slaveDelay = delayed ? 30 : 0;
}
- conf.members[secondaryIndex].priority = 0;
- conf.members[secondaryIndex].hidden = true;
- conf.members[secondaryIndex].slaveDelay = delayed ? 30 : 0;
reconfig(rst, conf);
}
- jsTest.log("Setting up SCCC sharded cluster")
- st = new ShardingTest({
- name: "csrsUpgrade",
- mongos: 2,
- rs: { nodes: 2 },
- shards: 2,
- nopreallocj: true,
- other: {
- sync: true,
- enableBalancer: false,
- useHostname: true,
- }
- });
-
- var shardConfigs = st.s0.getCollection("config.shards").find().toArray();
- assert.eq(2, shardConfigs.length);
- var shard0Name = shardConfigs[0]._id;
-
- jsTest.log("Enabling sharding on " + testDBName + " and making " + shard0Name +
- " the primary shard");
- assert.commandWorked(st.s0.adminCommand({enablesharding: testDBName}));
- st.ensurePrimaryShard(testDBName, shard0Name);
-
- jsTest.log("Creating a sharded collection " + dataCollectionName);
- assert.commandWorked(st.s0.adminCommand({
- shardcollection: dataCollectionName,
- key: { _id: 1 }
- }));
+ var coordinator = new CSRSUpgradeCoordinator();
+ coordinator.setupSCCCCluster();
- jsTest.log("Inserting data into " + dataCollectionName);
- st.s1.getCollection(dataCollectionName).insert(
+ jsTest.log("Inserting data into " + coordinator.getDataCollectionName());
+ coordinator.getMongos(1).getCollection(coordinator.getDataCollectionName()).insert(
(function () {
var result = [];
var i;
@@ -94,40 +47,14 @@ var st;
}()), {writeConcern: {w: 'majority'}});
jsTest.log("Introducing slave delay on shards to ensure migration is slow");
- setSlaveDelay(st.rs0, true);
- setSlaveDelay(st.rs1, true);
-
- jsTest.log("Restarting " + st.c0.name + " as a standalone replica set");
- var csrsConfig = {
- _id: csrsName,
- version: 1,
- configsvr: true,
- members: [ { _id: 0, host: st.c0.name }],
- };
- assert.commandWorked(st.c0.adminCommand({replSetInitiate: csrsConfig}));
- var csrs = [];
- var csrs0Opts = Object.extend({}, st.c0.fullOptions, /* deep */ true);
- csrs0Opts.restart = true; // Don't clean the data files from the old c0.
- csrs0Opts.replSet = csrsName;
- csrs0Opts.configsvrMode = "sccc";
- MongoRunner.stopMongod(st.c0);
- csrs.push(MongoRunner.runMongod(csrs0Opts));
- waitUntilMaster(csrs[0]);
-
- jsTest.log("Starting new CSRS nodes");
- for (var i = 1; i < numCsrsMembers; ++i) {
- csrs.push(MongoRunner.runMongod({
- replSet: csrsName,
- configsvr: "",
- storageEngine: "wiredTiger"
- }));
- csrsConfig.members.push({ _id: i, host: csrs[i].name, votes: 0, priority: 0 });
- }
- csrsConfig.version = 2;
- jsTest.log("Adding non-voting members to csrs set: " + tojson(csrsConfig));
- assert.commandWorked(csrs[0].adminCommand({replSetReconfig: csrsConfig}));
+ var shardRS0 = coordinator.getShardingTestFixture().rs0;
+ var shardRS1 = coordinator.getShardingTestFixture().rs1;
+ setSlaveDelay(shardRS0, true);
+ setSlaveDelay(shardRS1, true);
- waitUntilAllNodesCaughtUp(csrs, 60000);
+ coordinator.restartFirstConfigAsReplSet();
+ coordinator.startNewCSRSNodes();
+ coordinator.waitUntilConfigsCaughtUp();
jsTest.log("Starting long-running chunk migration");
var joinParallelShell = startParallelShell(
@@ -137,85 +64,34 @@ var st;
to: 'csrsUpgrade-rs1'
});
assert.commandFailedWithCode(res, ErrorCodes.IncompatibleCatalogManager);
- }, st.s.port);
+ }, coordinator.getMongos(0).port);
// Wait for migration to start
assert.soon(function() {
- return st.s0.getDB('config').changelog.findOne({what: 'moveChunk.start'});
+ var configDB = coordinator.getMongos(0).getDB('config');
+ return configDB.changelog.findOne({what: 'moveChunk.start'});
});
- // Only shut down one of the SCCC config servers to avoid any period without any config servers
- // online.
- jsTest.log("Shutting down third SCCC config server node");
- MongoRunner.stopMongod(st.c2);
-
- csrsConfig.members.forEach(function (member) { member.votes = 1; member.priority = 1});
- csrsConfig.version = 3;
- jsTest.log("Allowing all csrs members to vote: " + tojson(csrsConfig));
- assert.commandWorked(csrs[0].adminCommand({replSetReconfig: csrsConfig}));
-
- jsTest.log("Restarting " + csrs[0].name + " in csrs mode");
- delete csrs0Opts.configsvrMode;
- try {
- csrs[0].adminCommand({replSetStepDown: 60});
- } catch (e) {} // Expected
- MongoRunner.stopMongod(csrs[0]);
- csrs[0] = MongoRunner.runMongod(csrs0Opts);
- var csrsStatus;
- assert.soon(function () {
- csrsStatus = csrs[0].adminCommand({replSetGetStatus: 1});
- if (csrsStatus.members[0].stateStr == "STARTUP" ||
- csrsStatus.members[0].stateStr == "STARTUP2" ||
- csrsStatus.members[0].stateStr == "RECOVERING") {
- // Make sure first node is fully online or else mongoses still in SCCC mode might not
- // find any node online to talk to.
- return false;
- }
-
- var i;
- for (i = 0; i < csrsStatus.members.length; ++i) {
- if (csrsStatus.members[i].name == csrs[0].name) {
- var supportsCommitted =
- csrs[0].getDB("admin").serverStatus().storageEngine.supportsCommittedReads;
- var stateIsRemoved = csrsStatus.members[i].stateStr == "REMOVED";
- // If the storage engine supports committed reads, it shouldn't go into REMOVED
- // state, but if it does not then it should.
- if (supportsCommitted) {
- assert(!stateIsRemoved);
- } else if (!stateIsRemoved) {
- return false;
- }
- }
- if (csrsStatus.members[i].stateStr == "PRIMARY") {
- return csrs[i].adminCommand({ismaster: 1}).ismaster;
- }
- }
- return false;
- }, function() {
- return "No primary or non-WT engine not removed in " + tojson(csrsStatus);
- });
+ coordinator.shutdownOneSCCCNode();
+ coordinator.allowAllCSRSNodesToVote();
+ coordinator.switchToCSRSMode();
joinParallelShell(); // This will verify that the migration failed with the expected code
- jsTest.log("Shutting down final SCCC config server now that upgrade is complete");
- MongoRunner.stopMongod(st.c1);
-
jsTest.log("Ensure that leftover distributed locks don't prevent future migrations");
// Remove slave delay so that the migration can finish in a reasonable amount of time.
- st.rs0.nodes.map(m=>m.getDB("admin").setLogLevel(2,"replication")); // BF-1637
- st.rs1.nodes.map(m=>m.getDB("admin").setLogLevel(2,"replication")); // BF-1637
- setSlaveDelay(st.rs0, false);
- setSlaveDelay(st.rs1, false);
- st.rs0.awaitReplication(60000);
- st.rs1.awaitReplication(60000);
- // Due to SERVER-20290 the recipient shard may not immediately realize that the migration that
+ setSlaveDelay(shardRS0, false);
+ setSlaveDelay(shardRS1, false);
+ shardRS0.awaitReplication(60000);
+ shardRS1.awaitReplication(60000);
+
+ // The recipient shard may not immediately realize that the migration that
// was going on during the upgrade has been aborted, so we need to wait until it notices this
// before starting a new migration.
- // TODO(spencer): Remove this after SERVER-20290 is fixed.
jsTest.log("Waiting for previous migration to be fully cleaned up");
assert.soon(function() {
- var res = st.rs1.getPrimary().adminCommand('_recvChunkStatus');
+ var res = shardRS1.getPrimary().adminCommand('_recvChunkStatus');
assert.commandWorked(res);
if (res.active) {
printjson(res);
@@ -224,9 +100,10 @@ var st;
});
jsTest.log("Starting new migration after upgrade, which should succeed");
- assert.commandWorked(st.s0.adminCommand({moveChunk: dataCollectionName,
- find: { _id: 0 },
- to: shardConfigs[1]._id
- }));
+ assert.commandWorked(coordinator.getMongos(0).adminCommand(
+ {moveChunk: coordinator.getDataCollectionName(),
+ find: { _id: 0 },
+ to: coordinator.getShardName(1)
+ }));
}());