summaryrefslogtreecommitdiff
path: root/src/mongo/db/s
diff options
context:
space:
mode:
authorEsha Maharishi <esha.maharishi@mongodb.com>2017-10-12 21:52:12 -0400
committerEsha Maharishi <esha.maharishi@mongodb.com>2017-10-17 15:32:55 -0400
commit9abf192c564e60cef196921dc1c20efddbdf6477 (patch)
treece28484b6542ecd135f7251abf0368e3f6b7fe5c /src/mongo/db/s
parentaeabbf96ff3c2990f553ba0a5e6e1d18ebddab2f (diff)
downloadmongo-9abf192c564e60cef196921dc1c20efddbdf6477.tar.gz
SERVER-31524 make all metadata commands wait for majority writeConcern when returning success early
Diffstat (limited to 'src/mongo/db/s')
-rw-r--r--src/mongo/db/s/config/configsvr_add_shard_command.cpp5
-rw-r--r--src/mongo/db/s/config/configsvr_create_database_command.cpp7
-rw-r--r--src/mongo/db/s/config/configsvr_enable_sharding_command.cpp7
-rw-r--r--src/mongo/db/s/config/configsvr_move_primary_command.cpp25
-rw-r--r--src/mongo/db/s/config/configsvr_remove_shard_command.cpp5
-rw-r--r--src/mongo/db/s/config/configsvr_shard_collection_command.cpp19
6 files changed, 46 insertions, 22 deletions
diff --git a/src/mongo/db/s/config/configsvr_add_shard_command.cpp b/src/mongo/db/s/config/configsvr_add_shard_command.cpp
index 5f927830da1..214e109ebb3 100644
--- a/src/mongo/db/s/config/configsvr_add_shard_command.cpp
+++ b/src/mongo/db/s/config/configsvr_add_shard_command.cpp
@@ -116,6 +116,11 @@ public:
return appendCommandStatus(result, validationStatus);
}
+ uassert(ErrorCodes::InvalidOptions,
+ str::stream() << "addShard must be called with majority writeConcern, got "
+ << cmdObj,
+ opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority);
+
audit::logAddShard(Client::getCurrent(),
parsedRequest.hasName() ? parsedRequest.getName() : "",
parsedRequest.getConnString().toString(),
diff --git a/src/mongo/db/s/config/configsvr_create_database_command.cpp b/src/mongo/db/s/config/configsvr_create_database_command.cpp
index 567216846d9..86690de4e2e 100644
--- a/src/mongo/db/s/config/configsvr_create_database_command.cpp
+++ b/src/mongo/db/s/config/configsvr_create_database_command.cpp
@@ -112,6 +112,11 @@ public:
str::stream() << "invalid db name specified: " << dbname,
NamespaceString::validDBName(dbname, NamespaceString::DollarInDbNameBehavior::Allow));
+ uassert(ErrorCodes::InvalidOptions,
+ str::stream() << "createDatabase must be called with majority writeConcern, got "
+ << cmdObj,
+ opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority);
+
// Make sure to force update of any stale metadata
ON_BLOCK_EXIT([opCtx, dbname] { Grid::get(opCtx)->catalogCache()->purgeDatabase(dbname); });
@@ -125,7 +130,7 @@ public:
auto dbDistLock = uassertStatusOK(catalogClient->getDistLockManager()->lock(
opCtx, dbname, "createDatabase", DistLockManager::kDefaultLockTimeout));
- uassertStatusOK(ShardingCatalogManager::get(opCtx)->createDatabase(opCtx, dbname));
+ ShardingCatalogManager::get(opCtx)->createDatabase(opCtx, dbname);
return true;
}
diff --git a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
index ae10f744052..62b1ff60606 100644
--- a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
+++ b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
@@ -118,6 +118,11 @@ public:
str::stream() << "can't shard " + dbname + " database"});
}
+ uassert(ErrorCodes::InvalidOptions,
+ str::stream() << "enableSharding must be called with majority writeConcern, got "
+ << cmdObj,
+ opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority);
+
// Make sure to force update of any stale metadata
ON_BLOCK_EXIT([opCtx, dbname] { Grid::get(opCtx)->catalogCache()->purgeDatabase(dbname); });
@@ -131,7 +136,7 @@ public:
auto dbDistLock = uassertStatusOK(catalogClient->getDistLockManager()->lock(
opCtx, dbname, "enableSharding", DistLockManager::kDefaultLockTimeout));
- uassertStatusOK(ShardingCatalogManager::get(opCtx)->enableSharding(opCtx, dbname));
+ ShardingCatalogManager::get(opCtx)->enableSharding(opCtx, dbname);
audit::logEnableSharding(Client::getCurrent(), dbname);
return true;
diff --git a/src/mongo/db/s/config/configsvr_move_primary_command.cpp b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
index ecacfac74eb..8b6b1491528 100644
--- a/src/mongo/db/s/config/configsvr_move_primary_command.cpp
+++ b/src/mongo/db/s/config/configsvr_move_primary_command.cpp
@@ -132,6 +132,11 @@ public:
str::stream() << "Can't move primary for " << dbname << " database"});
}
+ uassert(ErrorCodes::InvalidOptions,
+ str::stream() << "movePrimary must be called with majority writeConcern, got "
+ << cmdObj,
+ opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority);
+
auto const catalogClient = Grid::get(opCtx)->catalogClient();
auto const catalogCache = Grid::get(opCtx)->catalogCache();
auto const shardRegistry = Grid::get(opCtx)->shardRegistry();
@@ -175,22 +180,12 @@ public:
}();
if (fromShard->getId() == toShard->getId()) {
- // Since we did a local read of the database entry above, make sure we wait for majority
- // commit before returning success. (A previous movePrimary attempt may have failed with
- // a write concern error).
- // If the Client for this movePrimary attempt is the *same* as the one that made the
- // earlier movePrimary attempt, the opTime on the ReplClientInfo for this Client will
- // already be at least as recent as the earlier movePrimary's write. However, if this
- // is a *different* Client, the opTime may not be as recent, so to be safe we wait for
- // the system's last opTime to be majority-committed.
+ // We did a local read of the database entry above and found that this movePrimary
+ // request was already satisfied. However, the data may not be majority committed (a
+ // previous movePrimary attempt may have failed with a write concern error).
+ // Since the current Client doesn't know the opTime of the last write to the database
+ // entry, make it wait for the last opTime in the system when we wait for writeConcern.
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
- WriteConcernResult unusedWCResult;
- uassertStatusOK(
- waitForWriteConcern(opCtx,
- repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(),
- kMajorityWriteConcern,
- &unusedWCResult));
-
result << "primary" << toShard->toString();
return true;
}
diff --git a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
index b7c10d58540..ad4188d7647 100644
--- a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
+++ b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
@@ -98,6 +98,11 @@ public:
cmdObj.firstElement().type() == BSONType::String);
const std::string target = cmdObj.firstElement().str();
+ uassert(ErrorCodes::InvalidOptions,
+ str::stream() << "removeShard must be called with majority writeConcern, got "
+ << cmdObj,
+ opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority);
+
const auto shardStatus =
Grid::get(opCtx)->shardRegistry()->getShard(opCtx, ShardId(target));
if (!shardStatus.isOK()) {
diff --git a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
index 17d0bd1b2cd..53d0f5a2042 100644
--- a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/collation/collator_factory_interface.h"
+#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/repl_set_config.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/sessions_collection.h"
@@ -255,8 +256,6 @@ boost::optional<CollectionType> checkIfAlreadyShardedWithSameOptions(
OperationContext* opCtx,
const NamespaceString& nss,
const ConfigsvrShardCollectionRequest& request) {
- // TODO (SERVER-31027): Replace this direct read with using the routing table cache once UUIDs
- // are stored in the routing table cache.
auto existingColls =
uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
opCtx,
@@ -268,8 +267,6 @@ boost::optional<CollectionType> checkIfAlreadyShardedWithSameOptions(
1))
.docs;
- // If the collection is already sharded, fail if the deduced options in this request do not
- // match the options the collection was originally sharded with.
if (!existingColls.empty()) {
auto existingOptions = uassertStatusOK(CollectionType::fromBSON(existingColls.front()));
@@ -279,13 +276,20 @@ boost::optional<CollectionType> checkIfAlreadyShardedWithSameOptions(
requestedOptions.setDefaultCollation(*request.getCollation());
requestedOptions.setUnique(request.getUnique());
+ // If the collection is already sharded, fail if the deduced options in this request do not
+ // match the options the collection was originally sharded with.
uassert(ErrorCodes::AlreadyInitialized,
str::stream() << "sharding already enabled for collection " << nss.ns()
<< " with options "
<< existingOptions.toString(),
requestedOptions.hasSameOptions(existingOptions));
- // If the options do match, return the existing collection's full spec.
+ // We did a local read of the collection entry above and found that this shardCollection
+ // request was already satisfied. However, the data may not be majority committed (a
+ // previous shardCollection attempt may have failed with a writeConcern error).
+ // Since the current Client doesn't know the opTime of the last write to the collection
+ // entry, make it wait for the last opTime in the system when we wait for writeConcern.
+ repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
return existingOptions;
}
@@ -734,6 +738,11 @@ public:
"_configsvrShardCollection can only be run on config servers",
serverGlobalParams.clusterRole == ClusterRole::ConfigServer);
+ uassert(ErrorCodes::InvalidOptions,
+ str::stream() << "shardCollection must be called with majority writeConcern, got "
+ << cmdObj,
+ opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority);
+
// Do not allow sharding collections while a featureCompatibilityVersion upgrade or
// downgrade is in progress (see SERVER-31231 for details).
Lock::ExclusiveLock lk(opCtx->lockState(), FeatureCompatibilityVersion::fcvLock);