summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSpencer T Brody <spencer@mongodb.com>2015-10-12 18:47:37 -0400
committerSpencer T Brody <spencer@mongodb.com>2015-10-13 23:27:06 -0400
commit110e24cb3571778f4abb53e8f121b14f529307f6 (patch)
tree988034cf220254503fdaafadb5f8abbe2f7ffc74
parent3c706c7fcabbb267a29c482b1c016e838ce82588 (diff)
downloadmongo-110e24cb3571778f4abb53e8f121b14f529307f6.tar.gz
SERVER-20891 User-initiated writes to the config server must use w:majority write concern
-rw-r--r--jstests/sharding/conf_server_write_concern.js7
-rw-r--r--jstests/sharding/sharding_with_keyfile_auth.js83
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp189
-rw-r--r--src/mongo/db/write_concern.cpp21
-rw-r--r--src/mongo/db/write_concern_options.cpp4
-rw-r--r--src/mongo/db/write_concern_options.h6
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp45
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp82
-rw-r--r--src/mongo/s/cluster_write.cpp58
9 files changed, 298 insertions, 197 deletions
diff --git a/jstests/sharding/conf_server_write_concern.js b/jstests/sharding/conf_server_write_concern.js
index f3d20e71410..938b0b9a9dc 100644
--- a/jstests/sharding/conf_server_write_concern.js
+++ b/jstests/sharding/conf_server_write_concern.js
@@ -11,11 +11,16 @@ function writeToConfigTest(){
{ $set: { stopped: true }},
{ writeConcern: { w: 'majority' }}));
- // w:1 should still work
+ // w:1 should still work - it gets automatically upconverted to w:majority
assert.writeOK(confDB.settings.update({ _id: 'balancer' },
{ $set: { stopped: true }},
{ writeConcern: { w: 1 }}));
+ // Write concerns other than w:1 and w:majority should fail.
+ assert.writeError(confDB.settings.update({ _id: 'balancer' },
+ { $set: { stopped: true }},
+ { writeConcern: { w: 2 }}));
+
st.stop();
}
diff --git a/jstests/sharding/sharding_with_keyfile_auth.js b/jstests/sharding/sharding_with_keyfile_auth.js
deleted file mode 100644
index 5e0ebab64c0..00000000000
--- a/jstests/sharding/sharding_with_keyfile_auth.js
+++ /dev/null
@@ -1,83 +0,0 @@
-// Tests sharding with a key file
-
-myTestName = "sharding_with_keyfile"
-
-keyFile = "jstests/sharding/" + myTestName + ".key";
-
-run( "chmod" , "600" , keyFile );
-
-var st = new ShardingTest({ name : myTestName ,
- shards : 2,
- mongos : 1,
- keyFile : keyFile })
-
-// Make sure all our instances got the key
-var configs = st._configServers
-var mongoses = st._mongos
-
-var createUserWC = st.configRS == null ? null : { w: st.configRS.numNodes };
-
-mongoses[0].getDB("admin").createUser({ user: "root", pwd: "pass", roles: ["root"] }, createUserWC);
-
-for( var i = 0; i < configs.length; i++ ){
- var confAdmin = configs[i].getDB( "admin" );
- confAdmin.auth( "root", "pass" );
- printjson( confAdmin.runCommand({ getCmdLineOpts : 1 }) )
- assert.eq( confAdmin.runCommand({ getCmdLineOpts : 1 }).parsed.security.keyFile, keyFile )
-}
-
-for( var i = 0; i < mongoses.length; i++ ){
- var monsAdmin = mongoses[i].getDB( "admin" );
- monsAdmin.auth( "root", "pass" );
- printjson( monsAdmin.runCommand({ getCmdLineOpts : 1 }) )
- assert.eq( monsAdmin.runCommand({ getCmdLineOpts : 1 }).parsed.security.keyFile, keyFile )
-}
-
-var mongos = new Mongo( "localhost:" + st.s0.port )
-var coll = mongos.getDB( "test" ).foo;
-
-mongos.getDB( "admin" ).auth( "root", "pass" );
-mongos.getDB( "admin" ).runCommand({shardCollection : coll, key : {_id : 1}});
-
-// Create an index so we can find by num later
-coll.ensureIndex({ insert : 1 })
-
-// For more logging
-// mongos.getDB("admin").runCommand({ setParameter : 1, logLevel : 3 })
-
-print( "INSERT!" )
-
-// Insert a bunch of data
-var toInsert = 2000;
-var bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < toInsert; i++ ){
- bulk.insert({ my : "test", data : "to", insert : i });
-}
-assert.writeOK(bulk.execute());
-
-print( "UPDATE!" )
-
-// Update a bunch of data
-var toUpdate = toInsert;
-bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < toUpdate; i++ ){
- var id = coll.findOne({ insert : i })._id;
- bulk.find({ insert : i, _id : id }).updateOne({ $inc : { counter : 1 } });
-}
-assert.writeOK(bulk.execute());
-
-print( "DELETE" )
-
-// Remove a bunch of data
-var toDelete = toInsert / 2;
-bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < toDelete; i++ ){
- bulk.find({ insert : i }).remove();
-}
-assert.writeOK(bulk.execute());
-
-// Make sure the right amount of data is there
-assert.eq( coll.find().count(), toInsert / 2 )
-
-// Finish
-st.stop()
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index d60c54b35f7..30387107ff4 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -532,14 +532,15 @@ Status removePrivilegeDocuments(OperationContext* txn,
*/
Status writeAuthSchemaVersionIfNeeded(OperationContext* txn,
AuthorizationManager* authzManager,
- int foundSchemaVersion) {
+ int foundSchemaVersion,
+ const BSONObj& writeConcern) {
Status status = updateOneAuthzDocument(
txn,
AuthorizationManager::versionCollectionNamespace,
AuthorizationManager::versionDocumentQuery,
BSON("$set" << BSON(AuthorizationManager::schemaVersionFieldName << foundSchemaVersion)),
- true, // upsert
- BSONObj()); // write concern
+ true, // upsert
+ writeConcern);
if (status == ErrorCodes::NoMatchingDocument) { // SERVER-11492
status = Status::OK();
}
@@ -552,7 +553,9 @@ Status writeAuthSchemaVersionIfNeeded(OperationContext* txn,
* for the MongoDB 2.6 and 3.0 MongoDB-CR/SCRAM mixed auth mode.
* Returns an error otherwise.
*/
-Status requireAuthSchemaVersion26Final(OperationContext* txn, AuthorizationManager* authzManager) {
+Status requireAuthSchemaVersion26Final(OperationContext* txn,
+ AuthorizationManager* authzManager,
+ const BSONObj& writeConcern) {
int foundSchemaVersion;
Status status = authzManager->getAuthorizationVersion(txn, &foundSchemaVersion);
if (!status.isOK()) {
@@ -567,7 +570,7 @@ Status requireAuthSchemaVersion26Final(OperationContext* txn, AuthorizationManag
<< AuthorizationManager::schemaVersion26Final << " but found "
<< foundSchemaVersion);
}
- return writeAuthSchemaVersionIfNeeded(txn, authzManager, foundSchemaVersion);
+ return writeAuthSchemaVersionIfNeeded(txn, authzManager, foundSchemaVersion, writeConcern);
}
/**
@@ -712,7 +715,7 @@ public:
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(txn, authzManager, args.writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -822,7 +825,7 @@ public:
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(txn, authzManager, args.writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -886,26 +889,25 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ UserName userName;
+ BSONObj writeConcern;
+ Status status =
+ auth::parseAndValidateDropUserCommand(cmdObj, dbname, &userName, &writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
-
- UserName userName;
- BSONObj writeConcern;
- status = auth::parseAndValidateDropUserCommand(cmdObj, dbname, &userName, &writeConcern);
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager, writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- int nMatched;
-
audit::logDropUser(ClientBasic::getCurrent(), userName);
+ int nMatched;
status = removePrivilegeDocuments(txn,
BSON(AuthorizationManager::USER_NAME_FIELD_NAME
<< userName.getUser()
@@ -959,26 +961,24 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
-
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ BSONObj writeConcern;
+ Status status =
+ auth::parseAndValidateDropAllUsersFromDatabaseCommand(cmdObj, dbname, &writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
- BSONObj writeConcern;
- status =
- auth::parseAndValidateDropAllUsersFromDatabaseCommand(cmdObj, dbname, &writeConcern);
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager, writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- int numRemoved;
-
audit::logDropAllUsersFromDatabase(ClientBasic::getCurrent(), dbname);
+ int numRemoved;
status = removePrivilegeDocuments(txn,
BSON(AuthorizationManager::USER_DB_FIELD_NAME << dbname),
writeConcern,
@@ -1023,24 +1023,24 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
-
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
std::string userNameString;
std::vector<RoleName> roles;
BSONObj writeConcern;
- status = auth::parseRolePossessionManipulationCommands(
+ Status status = auth::parseRolePossessionManipulationCommands(
cmdObj, "grantRolesToUser", dbname, &userNameString, &roles, &writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager, writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+
UserName userName(userNameString, dbname);
unordered_set<RoleName> userRoles;
status = getCurrentUserRoles(txn, authzManager, userName, &userRoles);
@@ -1098,24 +1098,24 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
-
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
std::string userNameString;
std::vector<RoleName> roles;
BSONObj writeConcern;
- status = auth::parseRolePossessionManipulationCommands(
+ Status status = auth::parseRolePossessionManipulationCommands(
cmdObj, "revokeRolesFromUser", dbname, &userNameString, &roles, &writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager, writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+
UserName userName(userNameString, dbname);
unordered_set<RoleName> userRoles;
status = getCurrentUserRoles(txn, authzManager, userName, &userRoles);
@@ -1346,7 +1346,7 @@ public:
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(txn, authzManager, args.writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1430,7 +1430,7 @@ public:
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(txn, authzManager, args.writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1497,24 +1497,24 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
-
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
RoleName roleName;
PrivilegeVector privilegesToAdd;
BSONObj writeConcern;
- status = auth::parseAndValidateRolePrivilegeManipulationCommands(
+ Status status = auth::parseAndValidateRolePrivilegeManipulationCommands(
cmdObj, "grantPrivilegesToRole", dbname, &roleName, &privilegesToAdd, &writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager, writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+
if (RoleGraph::isBuiltinRole(roleName)) {
return appendCommandStatus(
result,
@@ -1605,24 +1605,25 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
-
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ RoleName roleName;
+ PrivilegeVector privilegesToRemove;
+ BSONObj writeConcern;
+ Status status =
+ auth::parseAndValidateRolePrivilegeManipulationCommands(cmdObj,
+ "revokePrivilegesFromRole",
+ dbname,
+ &roleName,
+ &privilegesToRemove,
+ &writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- RoleName roleName;
- PrivilegeVector privilegesToRemove;
- BSONObj writeConcern;
- status = auth::parseAndValidateRolePrivilegeManipulationCommands(cmdObj,
- "revokePrivilegesFromRole",
- dbname,
- &roleName,
- &privilegesToRemove,
- &writeConcern);
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager, writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1742,7 +1743,7 @@ public:
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(txn, authzManager, writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1815,24 +1816,24 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
-
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
std::string roleNameString;
std::vector<RoleName> rolesToRemove;
BSONObj writeConcern;
- status = auth::parseRolePossessionManipulationCommands(
+ Status status = auth::parseRolePossessionManipulationCommands(
cmdObj, "revokeRolesFromRole", dbname, &roleNameString, &rolesToRemove, &writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager, writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+
RoleName roleName(roleNameString, dbname);
if (RoleGraph::isBuiltinRole(roleName)) {
return appendCommandStatus(
@@ -1907,18 +1908,18 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
-
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ RoleName roleName;
+ BSONObj writeConcern;
+ Status status = auth::parseDropRoleCommand(cmdObj, dbname, &roleName, &writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- RoleName roleName;
- BSONObj writeConcern;
- status = auth::parseDropRoleCommand(cmdObj, dbname, &roleName, &writeConcern);
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager, writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -2072,7 +2073,7 @@ public:
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(txn, authzManager, writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -2697,7 +2698,7 @@ public:
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ status = requireAuthSchemaVersion26Final(txn, authzManager, args.writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp
index f035b3f5bfd..64f2ad068db 100644
--- a/src/mongo/db/write_concern.cpp
+++ b/src/mongo/db/write_concern.cpp
@@ -130,13 +130,24 @@ Status validateWriteConcern(const WriteConcernOptions& writeConcern) {
const repl::ReplicationCoordinator::Mode replMode =
repl::getGlobalReplicationCoordinator()->getReplicationMode();
- if (isConfigServer && replMode != repl::ReplicationCoordinator::modeReplSet) {
- // SCCC config servers can have a master-slave oplog, but we still don't allow w > 1.
- if (writeConcern.wNumNodes > 1) {
- return Status(ErrorCodes::BadValue,
- "cannot use 'w' > 1 on a sync cluster connection config server host");
+ if (isConfigServer) {
+ if (!writeConcern.validForConfigServers()) {
+ return Status(
+ ErrorCodes::BadValue,
+ str::stream()
+ << "w:1 and w:'majority' are the only valid write concerns when writing to "
+ "config servers, got: " << writeConcern.toBSON().toString());
+ }
+ if (replMode == repl::ReplicationCoordinator::modeReplSet && writeConcern.wMode == "") {
+ invariant(writeConcern.wNumNodes == 1);
+ return Status(
+ ErrorCodes::BadValue,
+ str::stream()
+ << "w: 'majority' is the only valid write concern when writing to config "
+ "server replica sets, got: " << writeConcern.toBSON().toString());
}
}
+
if (replMode == repl::ReplicationCoordinator::modeNone) {
if (writeConcern.wNumNodes > 1) {
return Status(ErrorCodes::BadValue, "cannot use 'w' > 1 when a host is not replicated");
diff --git a/src/mongo/db/write_concern_options.cpp b/src/mongo/db/write_concern_options.cpp
index e24f652517d..b099d868a96 100644
--- a/src/mongo/db/write_concern_options.cpp
+++ b/src/mongo/db/write_concern_options.cpp
@@ -187,4 +187,8 @@ bool WriteConcernOptions::shouldWaitForOtherNodes() const {
return !wMode.empty() || wNumNodes > 1;
}
+bool WriteConcernOptions::validForConfigServers() const {
+ return wNumNodes == 1 || wMode == kMajority;
+}
+
} // namespace mongo
diff --git a/src/mongo/db/write_concern_options.h b/src/mongo/db/write_concern_options.h
index a0cb45f8da9..1bac963f16f 100644
--- a/src/mongo/db/write_concern_options.h
+++ b/src/mongo/db/write_concern_options.h
@@ -87,6 +87,12 @@ public:
*/
bool shouldWaitForOtherNodes() const;
+ /**
+ * Returns true if this is a valid write concern to use against a config server.
+ * TODO(spencer): Once we stop supporting SCCC config servers, forbid this from allowing w:1
+ */
+ bool validForConfigServers() const;
+
void reset() {
syncMode = NONE;
wNumNodes = 0;
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp
index acb2162f823..bb0c14fe7a0 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp
@@ -867,6 +867,49 @@ bool CatalogManagerReplicaSet::runUserManagementWriteCommand(OperationContext* t
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result) {
+ BSONObj cmdToRun = cmdObj;
+ {
+ // Make sure that the if the command has a write concern that it is w:1 or w:majority, and
+ // convert w:1 or no write concern to w:majority before sending.
+ WriteConcernOptions writeConcern;
+ const char* writeConcernFieldName = "writeConcern";
+ BSONElement writeConcernElement = cmdObj[writeConcernFieldName];
+ bool initialCmdHadWriteConcern = !writeConcernElement.eoo();
+ if (initialCmdHadWriteConcern) {
+ Status status = writeConcern.parse(writeConcernElement.Obj());
+ if (!status.isOK()) {
+ return Command::appendCommandStatus(*result, status);
+ }
+ if (!writeConcern.validForConfigServers()) {
+ return Command::appendCommandStatus(
+ *result,
+ Status(ErrorCodes::InvalidOptions,
+ str::stream()
+ << "Invalid replication write concern. Writes to config server "
+ "replica sets must use w:'majority', got: "
+ << writeConcern.toBSON()));
+ }
+ }
+ writeConcern.wMode = WriteConcernOptions::kMajority;
+ writeConcern.wNumNodes = 0;
+
+ BSONObjBuilder modifiedCmd;
+ if (!initialCmdHadWriteConcern) {
+ modifiedCmd.appendElements(cmdObj);
+ } else {
+ BSONObjIterator cmdObjIter(cmdObj);
+ while (cmdObjIter.more()) {
+ BSONElement e = cmdObjIter.next();
+ if (str::equals(e.fieldName(), writeConcernFieldName)) {
+ continue;
+ }
+ modifiedCmd.append(e);
+ }
+ }
+ modifiedCmd.append(writeConcernFieldName, writeConcern.toBSON());
+ cmdToRun = modifiedCmd.obj();
+ }
+
auto scopedDistLock =
getDistLockManager()->lock(txn, "authorizationData", commandName, Seconds{5});
if (!scopedDistLock.isOK()) {
@@ -874,7 +917,7 @@ bool CatalogManagerReplicaSet::runUserManagementWriteCommand(OperationContext* t
}
auto response =
- grid.shardRegistry()->runCommandOnConfigWithNotMasterRetries(txn, dbname, cmdObj);
+ grid.shardRegistry()->runCommandOnConfigWithNotMasterRetries(txn, dbname, cmdToRun);
if (!response.isOK()) {
return Command::appendCommandStatus(*result, response.getStatus());
}
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp
index 47dd2befb6b..e46c2582ea3 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp
@@ -700,9 +700,84 @@ TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandSuccess) {
onCommand([](const RemoteCommandRequest& request) {
ASSERT_EQUALS("test", request.dbname);
+ // Since no write concern was sent we will add w:majority
ASSERT_EQUALS(BSON("dropUser"
<< "test"
- << "maxTimeMS" << 30000),
+ << "writeConcern" << BSON("w"
+ << "majority"
+ << "wtimeout" << 0) << "maxTimeMS" << 30000),
+ request.cmdObj);
+
+ ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
+
+ BSONObjBuilder responseBuilder;
+ Command::appendCommandStatus(responseBuilder,
+ Status(ErrorCodes::UserNotFound, "User test@test not found"));
+ return responseBuilder.obj();
+ });
+
+ // Now wait for the runUserManagementWriteCommand call to return
+ future.timed_get(kFutureTimeout);
+}
+
+TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandInvalidWriteConcern) {
+ configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
+
+ BSONObjBuilder responseBuilder;
+ bool ok =
+ catalogManager()->runUserManagementWriteCommand(operationContext(),
+ "dropUser",
+ "test",
+ BSON("dropUser"
+ << "test"
+ << "writeConcern" << BSON("w" << 2)),
+ &responseBuilder);
+ ASSERT_FALSE(ok);
+
+ Status commandStatus = Command::getStatusFromCommandResult(responseBuilder.obj());
+ ASSERT_EQUALS(ErrorCodes::InvalidOptions, commandStatus);
+ ASSERT_STRING_CONTAINS(commandStatus.reason(), "Invalid replication write concern");
+}
+
+TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandRewriteWriteConcern) {
+ // Tests that if you send a w:1 write concern it gets replaced with w:majority
+ configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
+
+ distLock()->expectLock(
+ [](StringData name,
+ StringData whyMessage,
+ milliseconds waitFor,
+ milliseconds lockTryInterval) {
+ ASSERT_EQUALS("authorizationData", name);
+ ASSERT_EQUALS("dropUser", whyMessage);
+ },
+ Status::OK());
+
+ auto future =
+ launchAsync([this] {
+ BSONObjBuilder responseBuilder;
+ bool ok =
+ catalogManager()->runUserManagementWriteCommand(
+ operationContext(),
+ "dropUser",
+ "test",
+ BSON("dropUser"
+ << "test"
+ << "writeConcern" << BSON("w" << 1 << "wtimeout" << 30)),
+ &responseBuilder);
+ ASSERT_FALSE(ok);
+
+ Status commandStatus = Command::getStatusFromCommandResult(responseBuilder.obj());
+ ASSERT_EQUALS(ErrorCodes::UserNotFound, commandStatus);
+ });
+
+ onCommand([](const RemoteCommandRequest& request) {
+ ASSERT_EQUALS("test", request.dbname);
+ ASSERT_EQUALS(BSON("dropUser"
+ << "test"
+ << "writeConcern" << BSON("w"
+ << "majority"
+ << "wtimeout" << 30) << "maxTimeMS" << 30000),
request.cmdObj);
ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
@@ -803,9 +878,12 @@ TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandNotMasterRetrySuc
onCommand([host2](const RemoteCommandRequest& request) {
ASSERT_EQUALS(host2, request.target);
ASSERT_EQUALS("test", request.dbname);
+ // Since no write concern was sent we will add w:majority
ASSERT_EQUALS(BSON("dropUser"
<< "test"
- << "maxTimeMS" << 30000),
+ << "writeConcern" << BSON("w"
+ << "majority"
+ << "wtimeout" << 0) << "maxTimeMS" << 30000),
request.cmdObj);
ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
diff --git a/src/mongo/s/cluster_write.cpp b/src/mongo/s/cluster_write.cpp
index 5bc28be8fb3..cd2ca6fa233 100644
--- a/src/mongo/s/cluster_write.cpp
+++ b/src/mongo/s/cluster_write.cpp
@@ -204,9 +204,9 @@ void ClusterWriter::write(OperationContext* txn,
BatchedCommandResponse* response) {
// Add _ids to insert request if req'd
unique_ptr<BatchedCommandRequest> idRequest(BatchedCommandRequest::cloneWithIds(origRequest));
- const BatchedCommandRequest& request = NULL != idRequest.get() ? *idRequest : origRequest;
+ const BatchedCommandRequest* request = NULL != idRequest.get() ? idRequest.get() : &origRequest;
- const NamespaceString& nss = request.getNS();
+ const NamespaceString& nss = request->getNS();
if (!nss.isValid()) {
toBatchError(Status(ErrorCodes::InvalidNamespace, nss.ns() + " is not a valid namespace"),
response);
@@ -220,13 +220,13 @@ void ClusterWriter::write(OperationContext* txn,
return;
}
- if (request.sizeWriteOps() == 0u) {
+ if (request->sizeWriteOps() == 0u) {
toBatchError(Status(ErrorCodes::InvalidLength, "no write ops were included in the batch"),
response);
return;
}
- if (request.sizeWriteOps() > BatchedCommandRequest::kMaxWriteBatchSize) {
+ if (request->sizeWriteOps() > BatchedCommandRequest::kMaxWriteBatchSize) {
toBatchError(Status(ErrorCodes::InvalidLength,
str::stream() << "exceeded maximum write batch size of "
<< BatchedCommandRequest::kMaxWriteBatchSize),
@@ -235,7 +235,7 @@ void ClusterWriter::write(OperationContext* txn,
}
string errMsg;
- if (request.isInsertIndexRequest() && !request.isValidIndexRequest(&errMsg)) {
+ if (request->isInsertIndexRequest() && !request->isValidIndexRequest(&errMsg)) {
toBatchError(Status(ErrorCodes::InvalidOptions, errMsg), response);
return;
}
@@ -243,26 +243,62 @@ void ClusterWriter::write(OperationContext* txn,
// Config writes and shard writes are done differently
const string dbName = nss.db().toString();
+ unique_ptr<BatchedCommandRequest> requestWithWriteConcern;
if (dbName == "config" || dbName == "admin") {
- grid.catalogManager(txn)->writeConfigServerDirect(txn, request, response);
+ // w:majority is the only valid write concern for writes to the config servers.
+ // We also allow w:1 to come in on a user-initiated write, though we convert it here to
+ // w:majority before sending it to the config servers.
+ bool rewriteCmdWithWriteConcern = false;
+ WriteConcernOptions writeConcern;
+ if (request->isWriteConcernSet()) {
+ Status status = writeConcern.parse(request->getWriteConcern());
+ if (!status.isOK()) {
+ toBatchError(status, response);
+ return;
+ }
+ if (!writeConcern.validForConfigServers()) {
+ toBatchError(Status(ErrorCodes::InvalidOptions,
+ "Invalid replication write concern. Writes to config servers "
+ "must use w:'majority'"),
+ response);
+ return;
+ }
+ if (writeConcern.wMode == "") {
+ invariant(writeConcern.wNumNodes == 1);
+ rewriteCmdWithWriteConcern = true;
+ }
+ } else {
+ rewriteCmdWithWriteConcern = true;
+ }
+
+ if (rewriteCmdWithWriteConcern) {
+ requestWithWriteConcern.reset(new BatchedCommandRequest(request->getBatchType()));
+ request->cloneTo(requestWithWriteConcern.get());
+ writeConcern.wMode = WriteConcernOptions::kMajority;
+ writeConcern.wNumNodes = 0;
+ requestWithWriteConcern->setWriteConcern(writeConcern.toBSON());
+ request = requestWithWriteConcern.get();
+ }
+
+ grid.catalogManager(txn)->writeConfigServerDirect(txn, *request, response);
} else {
- ChunkManagerTargeter targeter(request.getTargetingNSS());
+ ChunkManagerTargeter targeter(request->getTargetingNSS());
Status targetInitStatus = targeter.init(txn);
if (!targetInitStatus.isOK()) {
// Errors will be reported in response if we are unable to target
warning() << "could not initialize targeter for"
- << (request.isInsertIndexRequest() ? " index" : "")
- << " write op in collection " << request.getTargetingNS();
+ << (request->isInsertIndexRequest() ? " index" : "")
+ << " write op in collection " << request->getTargetingNS();
}
DBClientShardResolver resolver;
DBClientMultiCommand dispatcher;
BatchWriteExec exec(&targeter, &resolver, &dispatcher);
- exec.executeBatch(txn, request, response);
+ exec.executeBatch(txn, *request, response);
if (_autoSplit) {
- splitIfNeeded(txn, request.getNS(), *targeter.getStats());
+ splitIfNeeded(txn, request->getNS(), *targeter.getStats());
}
_stats->setShardStats(exec.releaseStats());