summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMoustafa Maher <m.maher@10gen.com>2021-07-22 00:15:21 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-07-22 19:28:32 +0000
commite817e6b1b9dd1a49a63a6d469eb3f890ff3ba512 (patch)
tree90e414940254b66bd1c76be85bcb67bb399eae2d
parentf3d6c279b5fb4eb15ae57145aa7a1316753d6016 (diff)
downloadmongo-e817e6b1b9dd1a49a63a6d469eb3f890ff3ba512.tar.gz
SERVER-56800 Fail addShard if CWWC disagrees with existing CWWC on cluster
-rw-r--r--jstests/sharding/cwwc_conflict_add_shard.js153
-rw-r--r--src/mongo/db/read_write_concern_defaults.cpp15
-rw-r--r--src/mongo/db/read_write_concern_defaults.h5
-rw-r--r--src/mongo/db/repl/SConscript1
-rw-r--r--src/mongo/db/repl/hello.idl5
-rw-r--r--src/mongo/db/repl/replication_info.cpp6
-rw-r--r--src/mongo/db/s/SConscript1
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp37
8 files changed, 218 insertions, 5 deletions
diff --git a/jstests/sharding/cwwc_conflict_add_shard.js b/jstests/sharding/cwwc_conflict_add_shard.js
new file mode 100644
index 00000000000..eb1e95f2b84
--- /dev/null
+++ b/jstests/sharding/cwwc_conflict_add_shard.js
@@ -0,0 +1,153 @@
+/**
+ * Tests adding shard to sharded cluster will fail if CWWC on shard disagrees with existing CWWC on
+ * cluster.
+ * @tags: [requires_fcv_50, requires_majority_read_concern, requires_persistence]
+ */
+
+(function() {
+"use strict";
+
+const st = new ShardingTest({shards: 1, mongos: 1});
+const admin = st.getDB('admin');
+let shardServer;
+
+function createNewShard() {
+ if (shardServer) {
+ shardServer.stopSet();
+ }
+
+ shardServer = new ReplSetTest({name: "shardServer", nodes: 3, useHostName: true});
+ shardServer.startSet();
+ shardServer.initiate();
+}
+
+function convertShardToRS() {
+ jsTestLog("Converting shard server to replicaSet.");
+ shardServer.nodes.forEach(function(node) {
+ delete node.fullOptions.shardsvr;
+ });
+
+ shardServer.restart(shardServer.nodes);
+ shardServer.awaitNodesAgreeOnPrimary();
+}
+
+function convertRSToShard() {
+ jsTestLog("Converting replicaSet server to shardServer.");
+ shardServer.restart(shardServer.nodes, {shardsvr: ""});
+ shardServer.awaitNodesAgreeOnPrimary();
+}
+
+function removeShardAndWait() {
+ jsTestLog("Removing the shard from the cluster should succeed.");
+ const removeShardCmd = {removeShard: shardServer.getURL()};
+ const res = st.s.adminCommand(removeShardCmd);
+
+ assert.commandWorked(res);
+ assert(res.state === "started");
+
+ assert.soon(function() {
+ let res = st.s.adminCommand(removeShardCmd);
+ if (res.state === "completed") {
+ return true;
+ } else {
+ jsTestLog("Still waiting for shard removal to complete:");
+ printjson(res);
+ return false;
+ }
+ });
+
+ jsTestLog("Shard removal completed.");
+}
+
+function testAddShard(cwwcOnShard, cwwcOnCluster, shouldSucceed, fixCWWCOnShard) {
+ jsTestLog("Running addShard test with CWWCOnShard: " + tojson(cwwcOnShard) +
+ " and CWWCOnCluster: " + tojson(cwwcOnCluster));
+
+ let cwwcOnShardCmd, cwwcOnClusterCmd;
+
+ if (cwwcOnShard) {
+ jsTestLog("Setting the CWWC on shard before adding it.");
+ cwwcOnShardCmd = {
+ setDefaultRWConcern: 1,
+ defaultWriteConcern: cwwcOnShard,
+ writeConcern: {w: "majority"}
+ };
+ assert.commandWorked(shardServer.getPrimary().adminCommand(cwwcOnShardCmd));
+ shardServer.awaitReplication();
+ }
+
+ if (cwwcOnCluster) {
+ jsTestLog("Setting the CWWC on cluster before adding shard.");
+ cwwcOnClusterCmd = {
+ setDefaultRWConcern: 1,
+ defaultWriteConcern: cwwcOnCluster,
+ writeConcern: {w: "majority"}
+ };
+ assert.commandWorked(st.s.adminCommand(cwwcOnClusterCmd));
+ }
+
+ jsTestLog("Attempting to add shard to the cluster");
+ convertRSToShard();
+
+ if (!shouldSucceed) {
+ jsTestLog("Adding shard to the cluster should fail.");
+ assert.commandFailed(admin.runCommand({addshard: shardServer.getURL()}));
+
+ if (fixCWWCOnShard) {
+ convertShardToRS();
+ jsTestLog("Setting the CWWC on shard to match CWWC on cluster.");
+ assert.commandWorked(shardServer.getPrimary().adminCommand(cwwcOnClusterCmd));
+ shardServer.awaitReplication();
+ convertRSToShard();
+ } else {
+ jsTestLog("Setting the CWWC on cluster to match CWWC on shard.");
+ assert.commandWorked(st.s.adminCommand(cwwcOnShardCmd));
+ }
+ }
+
+ jsTestLog("Adding shard to the cluster should succeed.");
+ assert.commandWorked(admin.runCommand({addshard: shardServer.getURL()}));
+
+ // Cleanup.
+ removeShardAndWait();
+ convertShardToRS();
+}
+
+const cwwc = [
+ {w: 1},
+ {w: "majority"},
+ {w: "majority", wtimeout: 0, j: false},
+ {w: "majority", wtimeout: 0, j: true}
+];
+
+createNewShard();
+// No CWWC set neither on shard nor cluster should succeed.
+testAddShard(undefined /* cwwcOnShard */, undefined /* cwwcOnCluster */, true /* shouldSucceed */);
+
+// No CWWC set on cluster while shard has CWWC should fail.
+testAddShard(cwwc[0] /* cwwcOnShard */,
+ undefined /* cwwcOnCluster */,
+ false /* shouldSucceed */,
+ false /* fixCWWCOnShard */);
+
+createNewShard();
+// No CWWC set on shard while cluster has CWWC should succeed.
+testAddShard(undefined /* cwwcOnShard */, cwwc[0] /* cwwcOnCluster */, true /* shouldSucceed */);
+
+for (var i = 0; i < cwwc.length; i++) {
+ // Setting the same CWWC on shard and cluster should succeed.
+ testAddShard(cwwc[i] /* cwwcOnShard */, cwwc[i] /* cwwcOnCluster */, true /* shouldSucceed */);
+ for (var j = i + 1; j < cwwc.length; j++) {
+ for (const fixCWWCOnShard of [true, false]) {
+ // Setting different CWWC on shard and cluster should fail.
+ testAddShard(cwwc[i] /* cwwcOnShard */,
+ cwwc[j] /* cwwcOnCluster */,
+ false /* shouldSucceed */,
+ fixCWWCOnShard);
+ }
+ }
+}
+
+st.stop();
+shardServer.stopSet();
+})();
diff --git a/src/mongo/db/read_write_concern_defaults.cpp b/src/mongo/db/read_write_concern_defaults.cpp
index a5f90ae3da6..50a643e89e2 100644
--- a/src/mongo/db/read_write_concern_defaults.cpp
+++ b/src/mongo/db/read_write_concern_defaults.cpp
@@ -150,9 +150,7 @@ RWConcernDefault ReadWriteConcernDefaults::generateNewCWRWCToBeSavedOnDisk(
}
bool ReadWriteConcernDefaults::isCWWCSet(OperationContext* opCtx) {
- auto cached = _getDefaultCWRWCFromDisk(opCtx).value_or(RWConcernDefaultAndTime());
- return cached.getDefaultWriteConcern() &&
- !cached.getDefaultWriteConcern().get().usedDefaultConstructedWC;
+ return getCWWC(opCtx) ? true : false;
}
void ReadWriteConcernDefaults::observeDirectWriteToConfigSettings(OperationContext* opCtx,
@@ -301,6 +299,17 @@ ReadWriteConcernDefaults::getDefaultWriteConcern(OperationContext* opCtx) {
return current.getDefaultWriteConcern();
}
+boost::optional<ReadWriteConcernDefaults::WriteConcern> ReadWriteConcernDefaults::getCWWC(
+ OperationContext* opCtx) {
+ auto cached = _getDefaultCWRWCFromDisk(opCtx);
+ if (cached && cached.get().getDefaultWriteConcern() &&
+ !cached.get().getDefaultWriteConcern().get().usedDefaultConstructedWC) {
+ return cached.get().getDefaultWriteConcern().get();
+ }
+
+ return boost::none;
+}
+
ReadWriteConcernDefaults& ReadWriteConcernDefaults::get(ServiceContext* service) {
return *getReadWriteConcernDefaults(service);
}
diff --git a/src/mongo/db/read_write_concern_defaults.h b/src/mongo/db/read_write_concern_defaults.h
index 5166b5098fa..7e31f914015 100644
--- a/src/mongo/db/read_write_concern_defaults.h
+++ b/src/mongo/db/read_write_concern_defaults.h
@@ -172,6 +172,11 @@ public:
*/
boost::optional<bool> getImplicitDefaultWriteConcernMajority_forTest();
+ /**
+ * Gets the cluster-wide write concern (CWWC) persisted on disk.
+ */
+ boost::optional<WriteConcern> getCWWC(OperationContext* opCtx);
+
private:
enum class Type { kReadWriteConcernEntry };
diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript
index 7e19388a3cb..84dd97d8902 100644
--- a/src/mongo/db/repl/SConscript
+++ b/src/mongo/db/repl/SConscript
@@ -1238,6 +1238,7 @@ env.Library(
],
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/db/commands/server_status',
+ '$BUILD_DIR/mongo/db/read_write_concern_defaults',
'$BUILD_DIR/mongo/db/stats/counters',
'$BUILD_DIR/mongo/transport/message_compressor',
'hello_auth',
diff --git a/src/mongo/db/repl/hello.idl b/src/mongo/db/repl/hello.idl
index 03e4457a9f1..a3e7392585b 100644
--- a/src/mongo/db/repl/hello.idl
+++ b/src/mongo/db/repl/hello.idl
@@ -32,6 +32,7 @@ global:
imports:
- "mongo/db/auth/auth_types.idl"
- "mongo/db/repl/replication_types.idl"
+ - "mongo/db/write_concern_options.idl"
- "mongo/idl/basic_types.idl"
- "mongo/rpc/metadata/client_metadata.idl"
- "mongo/rpc/topology_version.idl"
@@ -197,6 +198,10 @@ structs:
# Only populated on shard server.
type: bool
optional: true
+ cwwc:
+ # Only populated on shard server.
+ type: WriteConcern
+ optional: true
commands:
hello:
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index 218a51bece5..e0df47b07c4 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -51,6 +51,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/ops/write_ops.h"
#include "mongo/db/query/internal_plans.h"
+#include "mongo/db/read_write_concern_defaults.h"
#include "mongo/db/repl/hello_auth.h"
#include "mongo/db/repl/hello_gen.h"
#include "mongo/db/repl/hello_response.h"
@@ -127,6 +128,11 @@ TopologyVersion appendReplicationInfo(OperationContext* opCtx,
if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
result->append(HelloCommandReply::kIsImplicitDefaultMajorityWCFieldName,
replCoord->getConfig().isImplicitDefaultWriteConcernMajority());
+
+ auto cwwc = ReadWriteConcernDefaults::get(opCtx).getCWWC(opCtx);
+ if (cwwc) {
+ result->append(HelloCommandReply::kCwwcFieldName, cwwc.get().toBSON());
+ }
}
return helloResponse->getTopologyVersion().get();
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 9487406f929..277ec956dff 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -272,6 +272,7 @@ env.Library(
'$BUILD_DIR/mongo/db/dbdirectclient',
'$BUILD_DIR/mongo/db/pipeline/process_interface/shardsvr_process_interface',
'$BUILD_DIR/mongo/db/pipeline/sharded_agg_helpers',
+ '$BUILD_DIR/mongo/db/repl/hello_command',
'$BUILD_DIR/mongo/db/repl/read_concern_args',
'$BUILD_DIR/mongo/db/repl/repl_coordinator_interface',
'$BUILD_DIR/mongo/db/repl/replica_set_aware_service',
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
index 3584d17577a..669ff694087 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
@@ -38,6 +38,7 @@
#include <set>
#include "mongo/base/status_with.h"
+#include "mongo/bson/bsonobj_comparator.h"
#include "mongo/bson/util/bson_extract.h"
#include "mongo/client/connection_string.h"
#include "mongo/client/read_preference.h"
@@ -53,6 +54,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/read_write_concern_defaults.h"
+#include "mongo/db/repl/hello_gen.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/repl_set_config.h"
#include "mongo/db/s/add_shard_cmd_gen.h"
@@ -393,8 +395,8 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
<< " as a shard since it is a config server"};
}
- if (resIsMaster.hasField("isImplicitDefaultMajorityWC") &&
- !resIsMaster.getBoolField("isImplicitDefaultMajorityWC") &&
+ if (resIsMaster.hasField(HelloCommandReply::kIsImplicitDefaultMajorityWCFieldName) &&
+ !resIsMaster.getBoolField(HelloCommandReply::kIsImplicitDefaultMajorityWCFieldName) &&
!ReadWriteConcernDefaults::get(opCtx).isCWWCSet(opCtx)) {
return {
ErrorCodes::OperationFailed,
@@ -407,6 +409,37 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
"using the setDefaultRWConcern command and try again."};
}
+ if (resIsMaster.hasField(HelloCommandReply::kCwwcFieldName)) {
+ auto cwwcOnShard = WriteConcernOptions::parse(
+ resIsMaster.getObjectField(HelloCommandReply::kCwwcFieldName))
+ .getValue()
+ .toBSON();
+
+ auto cachedCWWC = ReadWriteConcernDefaults::get(opCtx).getCWWC(opCtx);
+ if (!cachedCWWC) {
+ return {ErrorCodes::OperationFailed,
+ str::stream() << "Cannot add " << connectionString.toString()
+ << " as a shard since the cluster-wide write concern is set on "
+ "the shard and not set on the cluster. Set the CWWC on the "
+ "cluster to the same CWWC as the shard and try again."
+ << " The CWWC on the shard is (" << cwwcOnShard << ")."};
+ }
+
+ auto cwwcOnConfig = cachedCWWC.get().toBSON();
+ BSONObjComparator comparator(
+ BSONObj(), BSONObjComparator::FieldNamesMode::kConsider, nullptr);
+ if (comparator.compare(cwwcOnShard, cwwcOnConfig) != 0) {
+ return {
+ ErrorCodes::OperationFailed,
+ str::stream()
+ << "Cannot add " << connectionString.toString()
+ << " as a shard since the cluster-wide write concern set on the shard doesn't "
+ "match the one set on the cluster. Make sure they match and try again."
+ << " The CWWC on the shard is (" << cwwcOnShard
+ << "), and the CWWC on the cluster is (" << cwwcOnConfig << ")."};
+ }
+ }
+
// If the shard is part of a replica set, make sure all the hosts mentioned in the connection
// string are part of the set. It is fine if not all members of the set are mentioned in the
// connection string, though.