summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--etc/backports_required_for_multiversion_tests.yml12
-rw-r--r--jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js9
-rw-r--r--jstests/replsets/config_tags_race_with_default_write_concern.js68
-rw-r--r--jstests/replsets/default_write_concern_race_with_config_tags.js66
-rw-r--r--jstests/replsets/reconfig_may_not_remove_custom_wc_in_use.js55
-rw-r--r--src/mongo/db/commands/rwc_defaults_commands.cpp18
-rw-r--r--src/mongo/db/repl/repl_set_config.cpp23
-rw-r--r--src/mongo/db/repl/repl_set_config.h6
-rw-r--r--src/mongo/db/repl/repl_set_config_test.cpp187
-rw-r--r--src/mongo/db/repl/repl_set_tag.h31
-rw-r--r--src/mongo/db/repl/replication_coordinator.h22
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp52
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h72
-rw-r--r--src/mongo/db/repl/replication_coordinator_mock.cpp6
-rw-r--r--src/mongo/db/repl/replication_coordinator_mock.h15
-rw-r--r--src/mongo/db/repl/replication_coordinator_noop.cpp6
-rw-r--r--src/mongo/db/repl/replication_coordinator_noop.h15
-rw-r--r--src/mongo/embedded/replication_coordinator_embedded.cpp5
-rw-r--r--src/mongo/embedded/replication_coordinator_embedded.h15
19 files changed, 679 insertions, 4 deletions
diff --git a/etc/backports_required_for_multiversion_tests.yml b/etc/backports_required_for_multiversion_tests.yml
index 35518804cb6..22866415c5f 100644
--- a/etc/backports_required_for_multiversion_tests.yml
+++ b/etc/backports_required_for_multiversion_tests.yml
@@ -154,6 +154,12 @@ last-continuous:
test_file: jstests/aggregation/lookup_let_optimization.js
- ticket: SERVER-63129
test_file: jstests/replsets/tenant_migration_resume_collection_cloner_after_recipient_failover_with_dropped_views.js
+ - ticket: SERVER-61864
+ test_file: jstests/replsets/reconfig_may_not_remove_custom_wc_in_use.js
+ - ticket: SERVER-61864
+ test_file: jstests/replsets/default_write_concern_race_with_config_tags.js
+ - ticket: SERVER-61864
+ test_file: jstests/replsets/config_tags_race_with_default_write_concern.js
- ticket: SERVER-62710
test_file: jstests/sharding/max_time_ms_does_not_leak_shard_cursor.js
- ticket: SERVER-62759
@@ -461,6 +467,12 @@ last-lts:
test_file: jstests/aggregation/lookup_let_optimization.js
- ticket: SERVER-63129
test_file: jstests/replsets/tenant_migration_resume_collection_cloner_after_recipient_failover_with_dropped_views.js
+ - ticket: SERVER-61864
+ test_file: jstests/replsets/reconfig_may_not_remove_custom_wc_in_use.js
+ - ticket: SERVER-61864
+ test_file: jstests/replsets/default_write_concern_race_with_config_tags.js
+ - ticket: SERVER-61864
+ test_file: jstests/replsets/config_tags_race_with_default_write_concern.js
- ticket: SERVER-62710
test_file: jstests/sharding/max_time_ms_does_not_leak_shard_cursor.js
- ticket: SERVER-62759
diff --git a/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js b/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
index 19768c66c0d..14f1aad2480 100644
--- a/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
+++ b/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
@@ -60,8 +60,13 @@ if (storageEngine !== "wiredTiger") {
var primary = rst.getPrimary(); // Waits for PRIMARY state.
// The default WC is majority and we want the delayed secondary to fall behind in replication.
- assert.commandWorked(primary.adminCommand(
- {setDefaultRWConcern: 1, defaultWriteConcern: {w: 1}, writeConcern: {w: 1}}));
+ // Retry to make sure the primary is done executing (but not necessarily replicating) the
+ // reconfig.
+ assert.soonNoExcept(function() {
+ assert.commandWorked(primary.adminCommand(
+ {setDefaultRWConcern: 1, defaultWriteConcern: {w: 1}, writeConcern: {w: 1}}));
+ return true;
+ });
// Reconfigure primary with a small cache size so less data needs to be
// inserted to make the cache full while trying to trigger a stall.
diff --git a/jstests/replsets/config_tags_race_with_default_write_concern.js b/jstests/replsets/config_tags_race_with_default_write_concern.js
new file mode 100644
index 00000000000..e510d3495dc
--- /dev/null
+++ b/jstests/replsets/config_tags_race_with_default_write_concern.js
@@ -0,0 +1,68 @@
+/**
+ * Test that config write concern tag changes are always synchronized with default write concern
+ * changes. In this case the reconfig command would win the potential race.
+ */
+
+(function() {
+"use strict";
+load("jstests/libs/write_concern_util.js");
+load("jstests/replsets/rslib.js");
+
+const name = jsTestName();
+const rst = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {tags: {region: "us", category: "A"}}},
+ {rsConfig: {tags: {region: "us", category: "B"}}},
+ {rsConfig: {tags: {region: "eu", category: "A"}}}
+ ],
+ settings: {getLastErrorModes: {multiRegion: {region: 2}}}
+});
+
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+
+jsTestLog("Setting up setDefaultRWC thread.");
+let hangDuringSetRWCFP = configureFailPoint(primary, "hangWhileSettingDefaultRWC");
+
+let waitForSetDefaultRWC = startParallelShell(function() {
+ jsTestLog("Begin setDefaultRWC.");
+ assert.commandWorked(
+ db.adminCommand({setDefaultRWConcern: 1, defaultWriteConcern: {w: "multiRegion"}}));
+ jsTestLog("End setDefaultRWC.");
+}, primary.port);
+
+jsTestLog("Waiting for setDefaultRWC thread to hit failpoint.");
+hangDuringSetRWCFP.wait();
+
+jsTestLog("Attempting a non-WC tag changing reconfig which should not be affected.");
+let cfg = rst.getReplSetConfigFromNode();
+cfg.settings.catchUpTimeoutMillis = 99999; // unrelated setting
+reconfig(rst, cfg);
+
+jsTestLog("Attempting WC tag-changing reconfig (should fail).");
+cfg = rst.getReplSetConfigFromNode();
+cfg.settings.getLastErrorModes = {
+ multiRegion: {region: 2},
+ multiCategory: {category: 2}
+};
+assert.throwsWithCode(() => {
+ reconfig(rst, cfg);
+}, ErrorCodes.ConflictingOperationInProgress);
+
+jsTestLog("Allowing setDefaultRWC to finish.");
+hangDuringSetRWCFP.off();
+waitForSetDefaultRWC();
+
+jsTestLog("Attempting reconfig again (should succeed).");
+reconfig(rst, cfg);
+
+jsTestLog("Checking writeability.");
+const coll = primary.getDB("db").getCollection("coll");
+assert.commandWorked(coll.insert({a: 1}));
+rst.awaitReplication();
+
+rst.stopSet();
+})();
diff --git a/jstests/replsets/default_write_concern_race_with_config_tags.js b/jstests/replsets/default_write_concern_race_with_config_tags.js
new file mode 100644
index 00000000000..4bbebf31af9
--- /dev/null
+++ b/jstests/replsets/default_write_concern_race_with_config_tags.js
@@ -0,0 +1,66 @@
+/**
+ * Test that default write concern changes are always synchronized with condig write concern tag
+ * changes. In this case the reconfig command would win the potential race.
+ */
+
+(function() {
+"use strict";
+load("jstests/libs/parallel_shell_helpers.js");
+load("jstests/libs/write_concern_util.js");
+
+const name = jsTestName();
+const rst = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {tags: {region: "us", category: "A"}}},
+ {rsConfig: {tags: {region: "us", category: "B"}}},
+ {rsConfig: {tags: {region: "eu", category: "A"}}}
+ ],
+ settings: {getLastErrorModes: {multiRegion: {region: 2}}}
+});
+
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+
+jsTestLog("Setting up reconfig thread.");
+let hangDuringReconfigFP = configureFailPoint(primary, "hangBeforeNewConfigValidationChecks");
+let cfg = rst.getReplSetConfigFromNode();
+cfg.version += 1;
+cfg.settings.getLastErrorModes = {
+ multiRegion: {region: 2},
+ multiCategory: {category: 2}
+};
+
+function runReconfigFn(cfg) {
+ jsTestLog("Running reconfig");
+ assert.commandWorked(
+ db.adminCommand({replSetReconfig: cfg, maxTimeMS: ReplSetTest.kDefaultTimeoutMS}));
+ jsTestLog("Finished running reconfig");
+}
+
+jsTestLog("Waiting for reconfig thread to hit failpoint.");
+let waitForReconfig = startParallelShell(funWithArgs(runReconfigFn, cfg), primary.port);
+hangDuringReconfigFP.wait();
+
+jsTestLog("Attempting setDefaultRWC (should fail).");
+assert.commandFailedWithCode(
+ primary.adminCommand({setDefaultRWConcern: 1, defaultWriteConcern: {w: "multiRegion"}}),
+ ErrorCodes.ConfigurationInProgress);
+
+jsTestLog("Allowing reconfig to finish.");
+hangDuringReconfigFP.off();
+waitForReconfig();
+
+jsTestLog("Attempting setDefaultRWC again (should succeed).");
+assert.commandWorked(
+ primary.adminCommand({setDefaultRWConcern: 1, defaultWriteConcern: {w: "multiRegion"}}));
+
+jsTestLog("Checking writeability.");
+const coll = primary.getDB("db").getCollection("coll");
+assert.commandWorked(coll.insert({a: 1}));
+rst.awaitReplication();
+
+rst.stopSet();
+})();
diff --git a/jstests/replsets/reconfig_may_not_remove_custom_wc_in_use.js b/jstests/replsets/reconfig_may_not_remove_custom_wc_in_use.js
new file mode 100644
index 00000000000..8ee1a7ced02
--- /dev/null
+++ b/jstests/replsets/reconfig_may_not_remove_custom_wc_in_use.js
@@ -0,0 +1,55 @@
+/**
+ * Tests that we may not use reconfig to remove a custom write concern definition that
+ * is currently set to the default. We are free to remove that definition once it is no longer in
+ * use.
+ */
+
+(function() {
+"use strict";
+load("jstests/libs/write_concern_util.js");
+load("jstests/replsets/rslib.js");
+
+const name = jsTestName();
+const rst = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {tags: {region: "us"}}},
+ {rsConfig: {tags: {region: "us"}}},
+ {rsConfig: {tags: {region: "eu"}}}
+ ],
+ settings: {getLastErrorModes: {multiRegion: {region: 2}}}
+});
+
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+
+jsTestLog("Setting the write concern to match the custom definition.");
+assert.commandWorked(
+ primary.adminCommand({setDefaultRWConcern: 1, defaultWriteConcern: {w: "multiRegion"}}));
+
+let cfg = rst.getReplSetConfigFromNode();
+cfg.settings.getLastErrorModes = {};
+
+jsTestLog("Attempting reconfig to remove the definition while it is still in use (should fail).");
+assert.throwsWithCode(() => {
+ reconfig(rst, cfg);
+}, ErrorCodes.NewReplicaSetConfigurationIncompatible);
+
+jsTestLog("Resetting the write concern to majority.");
+assert.commandWorked(primary.adminCommand({setDefaultRWConcern: 1, defaultWriteConcern: {w: 2}}));
+
+jsTestLog(
+ "Removing custom write concern definition now that it is no longer in use (should succceed).");
+cfg = rst.getReplSetConfigFromNode();
+cfg.settings.getLastErrorModes = {};
+reconfig(rst, cfg);
+
+jsTestLog("Checking writeability.");
+const coll = primary.getDB("db").getCollection("coll");
+assert.commandWorked(coll.insert({a: 1}));
+rst.awaitReplication();
+
+rst.stopSet();
+})();
diff --git a/src/mongo/db/commands/rwc_defaults_commands.cpp b/src/mongo/db/commands/rwc_defaults_commands.cpp
index f02cd506186..7fd405804f9 100644
--- a/src/mongo/db/commands/rwc_defaults_commands.cpp
+++ b/src/mongo/db/commands/rwc_defaults_commands.cpp
@@ -42,10 +42,14 @@
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/logv2/log.h"
#include "mongo/rpc/get_status_from_command_result.h"
+#include "mongo/util/fail_point.h"
namespace mongo {
namespace {
+// Hang during the execution of SetDefaultRWConcernCommand.
+MONGO_FAIL_POINT_DEFINE(hangWhileSettingDefaultRWC);
+
/**
* Replaces the persisted default read/write concern document with a new one representing the given
* defaults. Waits for the write concern on the given operation context to be satisfied before
@@ -113,6 +117,17 @@ public:
auto typedRun(OperationContext* opCtx) {
assertNotStandaloneOrShardServer(opCtx, SetDefaultRWConcern::kCommandName);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
+ auto wcChanges = replCoord->getWriteConcernTagChanges();
+
+ // Synchronize this change with potential changes to the write concern tags.
+ uassert(ErrorCodes::ConfigurationInProgress,
+ "Replica set reconfig in progress. Please retry the command later.",
+ wcChanges->reserveDefaultWriteConcernChange());
+ ON_BLOCK_EXIT([&]() { wcChanges->releaseDefaultWriteConcernChange(); });
+
+ hangWhileSettingDefaultRWC.pauseWhileSet();
+
auto& rwcDefaults = ReadWriteConcernDefaults::get(opCtx->getServiceContext());
auto newDefaults = rwcDefaults.generateNewCWRWCToBeSavedOnDisk(
opCtx, request().getDefaultReadConcern(), request().getDefaultWriteConcern());
@@ -120,8 +135,7 @@ public:
// because it only has to exist on the actual shards in order to be valid.
if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) {
if (auto optWC = newDefaults.getDefaultWriteConcern()) {
- uassertStatusOK(
- repl::ReplicationCoordinator::get(opCtx)->validateWriteConcern(*optWC));
+ uassertStatusOK(replCoord->validateWriteConcern(*optWC));
}
}
diff --git a/src/mongo/db/repl/repl_set_config.cpp b/src/mongo/db/repl/repl_set_config.cpp
index 86368756c7f..902435b53e0 100644
--- a/src/mongo/db/repl/repl_set_config.cpp
+++ b/src/mongo/db/repl/repl_set_config.cpp
@@ -766,6 +766,29 @@ ReplSetConfigPtr ReplSetConfig::getRecipientConfig() const {
return _recipientConfig;
}
+bool ReplSetConfig::areWriteConcernModesTheSame(ReplSetConfig* otherConfig) const {
+ auto modeNames = getWriteConcernNames();
+ auto otherModeNames = otherConfig->getWriteConcernNames();
+
+ if (modeNames.size() != otherModeNames.size()) {
+ return false;
+ }
+
+ for (auto it = modeNames.begin(); it != modeNames.end(); it++) {
+ auto swPatternA = findCustomWriteMode(*it);
+ auto swPatternB = otherConfig->findCustomWriteMode(*it);
+ if (!swPatternA.isOK() || !swPatternB.isOK()) {
+ return false;
+ }
+
+ if (swPatternA.getValue() != swPatternB.getValue()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
MemberConfig* MutableReplSetConfig::_findMemberByID(MemberId id) {
for (auto it = getMembers().begin(); it != getMembers().end(); ++it) {
if (it->getId() == id) {
diff --git a/src/mongo/db/repl/repl_set_config.h b/src/mongo/db/repl/repl_set_config.h
index 5e410b65963..847677cd292 100644
--- a/src/mongo/db/repl/repl_set_config.h
+++ b/src/mongo/db/repl/repl_set_config.h
@@ -552,6 +552,12 @@ public:
*/
ReplSetConfigPtr getRecipientConfig() const;
+ /**
+ * Compares the write concern modes with another config and returns 'true' if they are
+ * identical.
+ */
+ bool areWriteConcernModesTheSame(ReplSetConfig* otherConfig) const;
+
private:
/**
* Sets replica set ID to 'defaultReplicaSetId' if 'cfg' does not contain an ID.
diff --git a/src/mongo/db/repl/repl_set_config_test.cpp b/src/mongo/db/repl/repl_set_config_test.cpp
index be6753afba4..e43bd0385c8 100644
--- a/src/mongo/db/repl/repl_set_config_test.cpp
+++ b/src/mongo/db/repl/repl_set_config_test.cpp
@@ -1931,6 +1931,193 @@ TEST(ReplSetConfig, MakeCustomWriteMode) {
ASSERT_TRUE(swPattern.isOK());
}
+TEST(ReplSetConfig, SameWriteConcernModesNoCustom) {
+ auto config = ReplSetConfig::parse(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "term" << 1.0 << "protocolVersion"
+ << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))));
+
+ auto otherConfig = ReplSetConfig::parse(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "term" << 1.0
+ << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:6789"))));
+
+ ASSERT(config.areWriteConcernModesTheSame(&otherConfig));
+ ASSERT(otherConfig.areWriteConcernModesTheSame(&config));
+}
+
+TEST(ReplSetConfig, SameWriteConcernModesOneCustom) {
+ auto config = ReplSetConfig::parse(
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "term" << 1.0 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("NYC"
+ << "NY")))
+ << "settings" << BSON("getLastErrorModes" << BSON("eastCoast" << BSON("NYC" << 1)))));
+
+ auto otherConfig = ReplSetConfig::parse(
+ BSON("_id"
+ << "rs0"
+ << "version" << 2 << "term" << 1.0 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:6789"
+ << "tags"
+ << BSON("NYC"
+ << "NY")))
+ << "settings" << BSON("getLastErrorModes" << BSON("eastCoast" << BSON("NYC" << 1)))));
+
+ ASSERT(config.areWriteConcernModesTheSame(&otherConfig));
+ ASSERT(otherConfig.areWriteConcernModesTheSame(&config));
+}
+
+TEST(ReplSetConfig, DifferentWriteConcernModesOneCustomDifferentName) {
+ auto config = ReplSetConfig::parse(
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "term" << 1.0 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("NYC"
+ << "NY")))
+ << "settings" << BSON("getLastErrorModes" << BSON("somename" << BSON("NYC" << 1)))));
+
+ auto otherConfig = ReplSetConfig::parse(
+ BSON("_id"
+ << "rs0"
+ << "version" << 2 << "term" << 1.0 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:6789"
+ << "tags"
+ << BSON("NYC"
+ << "NY")))
+ << "settings" << BSON("getLastErrorModes" << BSON("othername" << BSON("NYC" << 1)))));
+
+ ASSERT_FALSE(config.areWriteConcernModesTheSame(&otherConfig));
+ ASSERT_FALSE(otherConfig.areWriteConcernModesTheSame(&config));
+}
+
+TEST(ReplSetConfig, DifferentWriteConcernModesOneCustomDifferentCounts) {
+ auto config = ReplSetConfig::parse(
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "term" << 1.0 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("NYC"
+ << "NY"))
+ << BSON("_id" << 1 << "host"
+ << "otherhost:12345"
+ << "tags"
+ << BSON("NYC"
+ << "NY")))
+ << "settings" << BSON("getLastErrorModes" << BSON("eastCoast" << BSON("NYC" << 1)))));
+
+ auto otherConfig = ReplSetConfig::parse(
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "term" << 1.0 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("NYC"
+ << "NY"))
+ << BSON("_id" << 1 << "host"
+ << "otherhost:12345"
+ << "tags"
+ << BSON("NYC"
+ << "NY")))
+ << "settings" << BSON("getLastErrorModes" << BSON("eastCoast" << BSON("NYC" << 2)))));
+
+ ASSERT_FALSE(config.areWriteConcernModesTheSame(&otherConfig));
+ ASSERT_FALSE(otherConfig.areWriteConcernModesTheSame(&config));
+}
+
+TEST(ReplSetConfig, DifferentWriteConcernModesExtraTag) {
+ auto config = ReplSetConfig::parse(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "term" << 1.0 << "protocolVersion"
+ << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("NYC"
+ << "NY"))
+ << BSON("_id" << 1 << "host"
+ << "otherhost:12345"
+ << "tags"
+ << BSON("Boston"
+ << "MA")))
+ << "settings"
+ << BSON("getLastErrorModes"
+ << BSON("nyonly" << BSON("NYC" << 1) << "maonly"
+ << BSON("Boston" << 1)))));
+
+ auto otherConfig = ReplSetConfig::parse(
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "term" << 1.0 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("NYC"
+ << "NY"))
+ << BSON("_id" << 1 << "host"
+ << "otherhost:12345"
+ << "tags"
+ << BSON("Boston"
+ << "MA")))
+ << "settings" << BSON("getLastErrorModes" << BSON("nyonly" << BSON("NYC" << 1)))));
+
+ ASSERT_FALSE(config.areWriteConcernModesTheSame(&otherConfig));
+ ASSERT_FALSE(otherConfig.areWriteConcernModesTheSame(&config));
+}
+
+TEST(ReplSetConfig, DifferentWriteConcernModesSameNameDifferentDefinition) {
+ auto config = ReplSetConfig::parse(
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "term" << 1.0 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("NYC"
+ << "NY"))
+ << BSON("_id" << 1 << "host"
+ << "otherhost:12345"
+ << "tags"
+ << BSON("Boston"
+ << "MA")))
+ << "settings" << BSON("getLastErrorModes" << BSON("eastCoast" << BSON("NYC" << 1)))));
+
+ auto otherConfig = ReplSetConfig::parse(BSON(
+ "_id"
+ << "rs0"
+ << "version" << 1 << "term" << 1.0 << "protocolVersion" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("NYC"
+ << "NY"))
+ << BSON("_id" << 1 << "host"
+ << "otherhost:12345"
+ << "tags"
+ << BSON("Boston"
+ << "MA")))
+ << "settings" << BSON("getLastErrorModes" << BSON("eastCoast" << BSON("Boston" << 1)))));
+
+ ASSERT_FALSE(config.areWriteConcernModesTheSame(&otherConfig));
+ ASSERT_FALSE(otherConfig.areWriteConcernModesTheSame(&config));
+}
+
} // namespace
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/repl_set_tag.h b/src/mongo/db/repl/repl_set_tag.h
index 87dd68bd2ac..420ace32052 100644
--- a/src/mongo/db/repl/repl_set_tag.h
+++ b/src/mongo/db/repl/repl_set_tag.h
@@ -154,6 +154,37 @@ public:
return _constraints.end();
}
+ /**
+ * Gets the number of constraints in this pattern.
+ */
+ size_t getNumConstraints() const {
+ return _constraints.size();
+ }
+
+ bool operator==(const ReplSetTagPattern& other) const {
+ if (getNumConstraints() != other.getNumConstraints()) {
+ return false;
+ }
+
+ for (auto itrA = constraintsBegin(); itrA != constraintsEnd(); itrA++) {
+ bool same = false;
+ for (auto itrB = other.constraintsBegin(); itrB != other.constraintsEnd(); itrB++) {
+ if (*itrA == *itrB) {
+ same = true;
+ break;
+ }
+ }
+ if (!same) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool operator!=(const ReplSetTagPattern& other) const {
+ return !operator==(other);
+ }
+
private:
std::vector<TagCountConstraint> _constraints;
};
diff --git a/src/mongo/db/repl/replication_coordinator.h b/src/mongo/db/repl/replication_coordinator.h
index 8cb3d976242..01966a7722a 100644
--- a/src/mongo/db/repl/replication_coordinator.h
+++ b/src/mongo/db/repl/replication_coordinator.h
@@ -1147,6 +1147,28 @@ public:
*/
virtual void recordIfCWWCIsSetOnConfigServerOnStartup(OperationContext* opCtx) = 0;
+ /**
+ * Interface used to synchronize changes to custom write concern tags in the config and
+ * custom default write concern settings.
+ *
+ * Use [reserve|release]DefaultWriteConcernChanges when making changes to the current
+ * default read/write concern.
+ * Use [reserve|release]ConfigWriteConcernTagChanges when executing a reconfig that
+ * could potentially change read/write concern tags.
+ */
+ class WriteConcernTagChanges {
+ public:
+ WriteConcernTagChanges() = default;
+ virtual ~WriteConcernTagChanges() = default;
+ virtual bool reserveDefaultWriteConcernChange() = 0;
+ virtual void releaseDefaultWriteConcernChange() = 0;
+
+ virtual bool reserveConfigWriteConcernTagChange() = 0;
+ virtual void releaseConfigWriteConcernTagChange() = 0;
+ };
+
+ virtual WriteConcernTagChanges* getWriteConcernTagChanges() = 0;
+
protected:
ReplicationCoordinator();
};
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 0805cb25fe1..23cae7e6c1a 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -149,6 +149,8 @@ MONGO_FAIL_POINT_DEFINE(hangAfterReconfig);
MONGO_FAIL_POINT_DEFINE(skipBeforeFetchingConfig);
// Hang after grabbing the RSTL but before we start rejecting writes.
MONGO_FAIL_POINT_DEFINE(stepdownHangAfterGrabbingRSTL);
+// Hang before making checks on the new config relative to the current one.
+MONGO_FAIL_POINT_DEFINE(hangBeforeNewConfigValidationChecks);
// Simulates returning a specified error in the hello response.
MONGO_FAIL_POINT_DEFINE(setCustomErrorInHelloResponseMongoD);
// Throws right before the call into recoverTenantMigrationAccessBlockers.
@@ -3690,6 +3692,27 @@ Status ReplicationCoordinatorImpl::_doReplSetReconfig(OperationContext* opCtx,
return status;
ReplSetConfig newConfig = newConfigStatus.getValue();
+ // Synchronize this change with potential changes to the default write concern.
+ auto wcChanges = getWriteConcernTagChanges();
+ auto mustReleaseWCChange = false;
+ if (!oldConfig.areWriteConcernModesTheSame(&newConfig)) {
+ if (!wcChanges->reserveConfigWriteConcernTagChange()) {
+ return Status(
+ ErrorCodes::ConflictingOperationInProgress,
+ "Default write concern change(s) in progress. Please retry the reconfig later.");
+ }
+ // Reservation OK.
+ mustReleaseWCChange = true;
+ }
+
+ ON_BLOCK_EXIT([&] {
+ if (mustReleaseWCChange) {
+ wcChanges->releaseConfigWriteConcernTagChange();
+ }
+ });
+
+ hangBeforeNewConfigValidationChecks.pauseWhileSet();
+
// Excluding reconfigs that bump the config term during step-up from checking against changing
// the implicit default write concern, as it is not needed.
if (!skipSafetyChecks /* skipping step-up reconfig */) {
@@ -3740,6 +3763,30 @@ Status ReplicationCoordinatorImpl::_doReplSetReconfig(OperationContext* opCtx,
}
}
}
+
+ // If we are currently using a custom write concern as the default, check that the
+ // corresponding definition still exists in the new config.
+ if (serverGlobalParams.clusterRole == ClusterRole::None) {
+ try {
+ const auto rwcDefaults =
+ ReadWriteConcernDefaults::get(opCtx->getServiceContext()).getDefault(opCtx);
+ const auto wcDefault = rwcDefaults.getDefaultWriteConcern();
+ if (wcDefault) {
+ auto validateWCStatus = newConfig.validateWriteConcern(wcDefault.get());
+ if (!validateWCStatus.isOK()) {
+ return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ str::stream() << "May not remove custom write concern "
+ "definition from config while it is in "
+ "use as the default write concern. "
+ "Change the default write concern to a "
+ "non-conflicting setting and try the "
+ "reconfig again.");
+ }
+ }
+ } catch (const DBException& e) {
+ return e.toStatus("Exception while loading write concern default during reconfig");
+ }
+ }
}
BSONObj oldConfigObj = oldConfig.toBSON();
@@ -6159,5 +6206,10 @@ void ReplicationCoordinatorImpl::_validateDefaultWriteConcernOnShardStartup(With
}
}
+ReplicationCoordinatorImpl::WriteConcernTagChanges*
+ReplicationCoordinatorImpl::getWriteConcernTagChanges() {
+ return &_writeConcernTagChanges;
+}
+
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index 8affe603f30..4d24f253799 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -518,6 +518,74 @@ public:
*/
void cancelElection_forTest();
+ /**
+ * Implementation of an interface used to synchronize changes to custom write concern tags in
+ * the config and custom default write concern settings.
+ * See base class fore more information.
+ */
+ class WriteConcernTagChangesImpl : public WriteConcernTagChanges {
+ public:
+ WriteConcernTagChangesImpl() = default;
+ virtual ~WriteConcernTagChangesImpl() = default;
+
+ bool reserveDefaultWriteConcernChange() override {
+ stdx::lock_guard lock(_mutex);
+ if (_configWriteConcernTagChanges > 0) {
+ return false;
+ }
+ _defaultWriteConcernChanges++;
+ return true;
+ }
+
+ void releaseDefaultWriteConcernChange() override {
+ stdx::lock_guard lock(_mutex);
+ invariant(_defaultWriteConcernChanges > 0);
+ _defaultWriteConcernChanges--;
+ }
+
+ bool reserveConfigWriteConcernTagChange() override {
+ stdx::lock_guard lock(_mutex);
+ if (_defaultWriteConcernChanges > 0) {
+ return false;
+ }
+ _configWriteConcernTagChanges++;
+ return true;
+ }
+
+ void releaseConfigWriteConcernTagChange() override {
+ stdx::lock_guard lock(_mutex);
+ invariant(_configWriteConcernTagChanges > 0);
+ _configWriteConcernTagChanges--;
+ }
+
+ private:
+ //
+ // All member variables are labeled with one of the following codes indicating the
+ // synchronization rules for accessing them.
+ //
+ // (R) Read-only in concurrent operation; no synchronization required.
+ // (S) Self-synchronizing; access in any way from any context.
+ // (PS) Pointer is read-only in concurrent operation, item pointed to is self-synchronizing;
+ // Access in any context.
+ // (M) Reads and writes guarded by _mutex
+ // (I) Independently synchronized, see member variable comment.
+
+ // The number of config write concern tag changes currently underway.
+ size_t _configWriteConcernTagChanges{0}; // (M)
+
+ // The number of default write concern changes currently underway.
+ size_t _defaultWriteConcernChanges{0}; // (M)
+
+ // Used to synchronize access to the above variables.
+ Mutex _mutex = MONGO_MAKE_LATCH(
+ "ReplicationCoordinatorImpl::PendingWriteConcernTagChangesImpl::_mutex"); // (S)
+ };
+
+ /**
+ * Returns a pointer to the WriteConcernTagChanges used by this instance.
+ */
+ WriteConcernTagChanges* getWriteConcernTagChanges() override;
+
private:
using CallbackFn = executor::TaskExecutor::CallbackFn;
@@ -1766,6 +1834,10 @@ private:
boost::optional<bool> _wasCWWCSetOnConfigServerOnStartup;
InitialSyncerInterface::OnCompletionFn _onCompletion;
+
+ // Construct used to synchronize default write concern changes with config write concern
+ // changes.
+ WriteConcernTagChangesImpl _writeConcernTagChanges;
};
} // namespace repl
diff --git a/src/mongo/db/repl/replication_coordinator_mock.cpp b/src/mongo/db/repl/replication_coordinator_mock.cpp
index 0dfcd6e66e1..d396c2c429d 100644
--- a/src/mongo/db/repl/replication_coordinator_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_mock.cpp
@@ -782,5 +782,11 @@ void ReplicationCoordinatorMock::recordIfCWWCIsSetOnConfigServerOnStartup(Operat
MONGO_UNREACHABLE;
}
+ReplicationCoordinatorMock::WriteConcernTagChanges*
+ReplicationCoordinatorMock::getWriteConcernTagChanges() {
+ MONGO_UNREACHABLE;
+ return nullptr;
+}
+
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_mock.h b/src/mongo/db/repl/replication_coordinator_mock.h
index a03227ce33e..a77d5ad4982 100644
--- a/src/mongo/db/repl/replication_coordinator_mock.h
+++ b/src/mongo/db/repl/replication_coordinator_mock.h
@@ -404,6 +404,21 @@ public:
virtual void recordIfCWWCIsSetOnConfigServerOnStartup(OperationContext* opCtx) final;
+ class WriteConcernTagChangesMock : public WriteConcernTagChanges {
+ virtual ~WriteConcernTagChangesMock() = default;
+ virtual bool reserveDefaultWriteConcernChange() {
+ return false;
+ };
+ virtual void releaseDefaultWriteConcernChange() {}
+
+ virtual bool reserveConfigWriteConcernTagChange() {
+ return false;
+ };
+ virtual void releaseConfigWriteConcernTagChange() {}
+ };
+
+ virtual WriteConcernTagChanges* getWriteConcernTagChanges() override;
+
private:
ServiceContext* const _service;
ReplSettings _settings;
diff --git a/src/mongo/db/repl/replication_coordinator_noop.cpp b/src/mongo/db/repl/replication_coordinator_noop.cpp
index 9ca8ffe1d9d..66f46c4fd92 100644
--- a/src/mongo/db/repl/replication_coordinator_noop.cpp
+++ b/src/mongo/db/repl/replication_coordinator_noop.cpp
@@ -607,5 +607,11 @@ void ReplicationCoordinatorNoOp::recordIfCWWCIsSetOnConfigServerOnStartup(Operat
MONGO_UNREACHABLE;
}
+ReplicationCoordinatorNoOp::WriteConcernTagChanges*
+ReplicationCoordinatorNoOp::getWriteConcernTagChanges() {
+ MONGO_UNREACHABLE;
+ return nullptr;
+}
+
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_noop.h b/src/mongo/db/repl/replication_coordinator_noop.h
index 635ac26d371..baf4b586d2b 100644
--- a/src/mongo/db/repl/replication_coordinator_noop.h
+++ b/src/mongo/db/repl/replication_coordinator_noop.h
@@ -332,6 +332,21 @@ public:
virtual void recordIfCWWCIsSetOnConfigServerOnStartup(OperationContext* opCtx) final;
+ class WriteConcernTagChangesNoOp : public WriteConcernTagChanges {
+ virtual ~WriteConcernTagChangesNoOp() = default;
+ virtual bool reserveDefaultWriteConcernChange() {
+ return false;
+ };
+ virtual void releaseDefaultWriteConcernChange() {}
+
+ virtual bool reserveConfigWriteConcernTagChange() {
+ return false;
+ };
+ virtual void releaseConfigWriteConcernTagChange() {}
+ };
+
+ virtual WriteConcernTagChanges* getWriteConcernTagChanges() override;
+
private:
ServiceContext* const _service;
};
diff --git a/src/mongo/embedded/replication_coordinator_embedded.cpp b/src/mongo/embedded/replication_coordinator_embedded.cpp
index 53e95a0739c..d0182bfce59 100644
--- a/src/mongo/embedded/replication_coordinator_embedded.cpp
+++ b/src/mongo/embedded/replication_coordinator_embedded.cpp
@@ -636,5 +636,10 @@ void ReplicationCoordinatorEmbedded::recordIfCWWCIsSetOnConfigServerOnStartup(
MONGO_UNREACHABLE;
}
+ReplicationCoordinatorEmbedded::WriteConcernTagChanges*
+ReplicationCoordinatorEmbedded::getWriteConcernTagChanges() {
+ UASSERT_NOT_IMPLEMENTED;
+}
+
} // namespace embedded
} // namespace mongo
diff --git a/src/mongo/embedded/replication_coordinator_embedded.h b/src/mongo/embedded/replication_coordinator_embedded.h
index b6c5bfe1c62..f2d29ddea37 100644
--- a/src/mongo/embedded/replication_coordinator_embedded.h
+++ b/src/mongo/embedded/replication_coordinator_embedded.h
@@ -342,6 +342,21 @@ public:
virtual void recordIfCWWCIsSetOnConfigServerOnStartup(OperationContext* opCtx) final;
+ class WriteConcernTagChangesEmbedded : public WriteConcernTagChanges {
+ virtual ~WriteConcernTagChangesEmbedded() = default;
+ virtual bool reserveDefaultWriteConcernChange() {
+ return false;
+ };
+ virtual void releaseDefaultWriteConcernChange() {}
+
+ virtual bool reserveConfigWriteConcernTagChange() {
+ return false;
+ };
+ virtual void releaseConfigWriteConcernTagChange() {}
+ };
+
+ virtual WriteConcernTagChanges* getWriteConcernTagChanges() override;
+
private:
// Back pointer to the ServiceContext that has started the instance.
ServiceContext* const _service;