summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDianna Hohensee <dianna.hohensee@10gen.com>2017-06-02 16:26:32 -0400
committerDianna Hohensee <dianna.hohensee@10gen.com>2017-06-15 09:18:13 -0400
commit9c3099ca1e4a33af44771543626601ab3eab8750 (patch)
tree6888bab241972834774a0678faa78674195e268b
parent59ea14f97808e343a4a10a1b5352b7096b695e38 (diff)
downloadmongo-9c3099ca1e4a33af44771543626601ab3eab8750.tar.gz
SERVER-27713 create an OpObserver for shard chunk metadata updates
-rw-r--r--src/mongo/bson/util/bson_extract.cpp4
-rw-r--r--src/mongo/db/namespace_string.cpp7
-rw-r--r--src/mongo/db/namespace_string.h6
-rw-r--r--src/mongo/db/op_observer_impl.cpp2
-rw-r--r--src/mongo/db/s/collection_range_deleter.cpp6
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp106
-rw-r--r--src/mongo/db/s/collection_sharding_state.h48
-rw-r--r--src/mongo/db/s/collection_sharding_state_test.cpp8
-rw-r--r--src/mongo/db/s/sharding_state.cpp16
-rw-r--r--src/mongo/db/s/sharding_state_recovery.cpp11
-rw-r--r--src/mongo/db/s/sharding_state_test.cpp8
-rw-r--r--src/mongo/s/catalog/sharding_catalog_add_shard_test.cpp4
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp2
-rw-r--r--src/mongo/s/catalog/type_shard_collection.cpp3
14 files changed, 174 insertions, 57 deletions
diff --git a/src/mongo/bson/util/bson_extract.cpp b/src/mongo/bson/util/bson_extract.cpp
index 8a453c02472..b6ea2f8853e 100644
--- a/src/mongo/bson/util/bson_extract.cpp
+++ b/src/mongo/bson/util/bson_extract.cpp
@@ -38,7 +38,9 @@ Status bsonExtractField(const BSONObj& object, StringData fieldName, BSONElement
return Status(ErrorCodes::NoSuchKey,
mongoutils::str::stream() << "Missing expected field \""
<< fieldName.toString()
- << "\"");
+ << "\" in object '"
+ << object
+ << "'.");
*outElement = element;
return Status::OK();
}
diff --git a/src/mongo/db/namespace_string.cpp b/src/mongo/db/namespace_string.cpp
index 0e195ee7870..c41e4b91bb6 100644
--- a/src/mongo/db/namespace_string.cpp
+++ b/src/mongo/db/namespace_string.cpp
@@ -72,7 +72,7 @@ const string escapeTable[256] = {
".240", ".241", ".242", ".243", ".244", ".245", ".246", ".247", ".248", ".249", ".250", ".251",
".252", ".253", ".254", ".255"};
-const char kConfigCollection[] = "admin.system.version";
+const char kServerConfiguration[] = "admin.system.version";
const char kLogicalTimeKeysCollection[] = "admin.system.keys";
constexpr auto listCollectionsCursorCol = "$cmd.listCollections"_sd;
@@ -91,7 +91,7 @@ bool legalClientSystemNS(StringData ns) {
if (ns == "admin.system.roles")
return true;
- if (ns == kConfigCollection)
+ if (ns == kServerConfiguration)
return true;
if (ns == kLogicalTimeKeysCollection)
return true;
@@ -113,8 +113,9 @@ constexpr StringData NamespaceString::kAdminDb;
constexpr StringData NamespaceString::kLocalDb;
constexpr StringData NamespaceString::kConfigDb;
constexpr StringData NamespaceString::kSystemDotViewsCollectionName;
+constexpr StringData NamespaceString::kShardConfigCollectionsCollectionName;
-const NamespaceString NamespaceString::kConfigCollectionNamespace(kConfigCollection);
+const NamespaceString NamespaceString::kServerConfigurationNamespace(kServerConfiguration);
bool NamespaceString::isListCollectionsCursorNS() const {
return coll() == listCollectionsCursorCol;
diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h
index f49054c57ef..dc1714c2e10 100644
--- a/src/mongo/db/namespace_string.h
+++ b/src/mongo/db/namespace_string.h
@@ -72,12 +72,16 @@ public:
// Name for the system views collection
static constexpr StringData kSystemDotViewsCollectionName = "system.views"_sd;
+ // Name for a shard's collections metadata collection, each document of which indicates the
+ // state of a specific collection.
+ static constexpr StringData kShardConfigCollectionsCollectionName = "config.collections"_sd;
+
// Namespace for storing configuration data, which needs to be replicated if the server is
// running as a replica set. Documents in this collection should represent some configuration
// state of the server, which needs to be recovered/consulted at startup. Each document in this
// namespace should have its _id set to some string, which meaningfully describes what it
// represents.
- static const NamespaceString kConfigCollectionNamespace;
+ static const NamespaceString kServerConfigurationNamespace;
/**
* Constructs an empty NamespaceString.
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index 3e306a4c3eb..d7b47edeb2f 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -117,7 +117,7 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg
auto css = CollectionShardingState::get(opCtx, args.nss);
if (!args.fromMigrate) {
- css->onUpdateOp(opCtx, args.updatedDoc);
+ css->onUpdateOp(opCtx, args.criteria, args.update, args.updatedDoc);
}
if (strstr(args.nss.ns().c_str(), ".system.js")) {
diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp
index 2392305fe5d..fb2197b8d0c 100644
--- a/src/mongo/db/s/collection_range_deleter.cpp
+++ b/src/mongo/db/s/collection_range_deleter.cpp
@@ -126,11 +126,11 @@ auto CollectionRangeDeleter::cleanUpNextRange(OperationContext* opCtx,
// documents in the range -- excepting any queries with a read-concern option
// 'ignoreChunkMigration'
try {
- auto& adminSystemVersion = NamespaceString::kConfigCollectionNamespace;
+ auto& serverConfigurationNss = NamespaceString::kServerConfigurationNamespace;
auto epoch = scopedCollectionMetadata->getCollVersion().epoch();
- AutoGetCollection autoAdmin(opCtx, adminSystemVersion, MODE_IX);
+ AutoGetCollection autoAdmin(opCtx, serverConfigurationNss, MODE_IX);
- Helpers::upsert(opCtx, adminSystemVersion.ns(),
+ Helpers::upsert(opCtx, serverConfigurationNss.ns(),
BSON("_id" << "startRangeDeletion" << "ns" << nss.ns() << "epoch" << epoch
<< "min" << range->getMin() << "max" << range->getMax()));
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index 551224a5198..8a12519b94b 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -32,6 +32,7 @@
#include "mongo/db/s/collection_sharding_state.h"
+#include "mongo/bson/util/bson_extract.h"
#include "mongo/db/client.h"
#include "mongo/db/concurrency/lock_state.h"
#include "mongo/db/db_raii.h"
@@ -50,6 +51,8 @@
#include "mongo/s/catalog/sharding_catalog_manager.h"
#include "mongo/s/catalog/type_config_version.h"
#include "mongo/s/catalog/type_shard.h"
+#include "mongo/s/catalog/type_shard_collection.h"
+#include "mongo/s/catalog_cache.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/cluster_identity_loader.h"
#include "mongo/s/grid.h"
@@ -82,6 +85,18 @@ private:
const ShardIdentityType _shardIdentity;
};
+/**
+ * Checks via the ReplicationCoordinator whether this server is a primary/standalone that can do
+ * writes. This function may return false if the server is primary but in drain mode.
+ *
+ * Note: expects the global lock to be held so that a meaningful answer is returned -- replica set
+ * state cannot change under a lock.
+ */
+bool isPrimary(OperationContext* opCtx, const NamespaceString& nss) {
+ // If the server can execute writes, then it is either a primary or standalone.
+ return repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss);
+}
+
} // unnamed namespace
CollectionShardingState::CollectionShardingState(ServiceContext* sc, NamespaceString nss)
@@ -251,7 +266,7 @@ void CollectionShardingState::onInsertOp(OperationContext* opCtx, const BSONObj&
dassert(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
if (serverGlobalParams.clusterRole == ClusterRole::ShardServer &&
- _nss == NamespaceString::kConfigCollectionNamespace) {
+ _nss == NamespaceString::kServerConfigurationNamespace) {
if (auto idElem = insertedDoc["_id"]) {
if (idElem.str() == ShardIdentityType::IdName) {
auto shardIdentityDoc = uassertStatusOK(ShardIdentityType::fromBSON(insertedDoc));
@@ -269,11 +284,20 @@ void CollectionShardingState::onInsertOp(OperationContext* opCtx, const BSONObj&
}
}
-void CollectionShardingState::onUpdateOp(OperationContext* opCtx, const BSONObj& updatedDoc) {
+void CollectionShardingState::onUpdateOp(OperationContext* opCtx,
+ const BSONObj& query,
+ const BSONObj& update,
+ const BSONObj& updatedDoc) {
dassert(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
+ // TODO: should this be before or after my new code????
checkShardVersionOrThrow(opCtx);
+ if (serverGlobalParams.clusterRole == ClusterRole::ShardServer &&
+ _nss == NamespaceString::kShardConfigCollectionsCollectionName) {
+ _onConfigRefreshCompleteInvalidateCachedMetadata(opCtx, query, update);
+ }
+
if (_sourceMgr) {
_sourceMgr->getCloner()->onUpdateOp(opCtx, updatedDoc);
}
@@ -283,19 +307,23 @@ void CollectionShardingState::onDeleteOp(OperationContext* opCtx,
const CollectionShardingState::DeleteState& deleteState) {
dassert(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
- if (serverGlobalParams.clusterRole == ClusterRole::ShardServer &&
- _nss == NamespaceString::kConfigCollectionNamespace) {
-
- if (auto idElem = deleteState.idDoc["_id"]) {
- auto idStr = idElem.str();
- if (idStr == ShardIdentityType::IdName) {
- if (!repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback()) {
- uasserted(40070,
- "cannot delete shardIdentity document while in --shardsvr mode");
- } else {
- warning() << "Shard identity document rolled back. Will shut down after "
- "finishing rollback.";
- ShardIdentityRollbackNotifier::get(opCtx)->recordThatRollbackHappened();
+ if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
+ if (_nss == NamespaceString::kShardConfigCollectionsCollectionName) {
+ _onConfigDeleteInvalidateCachedMetadata(opCtx, deleteState.idDoc);
+ }
+
+ if (_nss == NamespaceString::kServerConfigurationNamespace) {
+ if (auto idElem = deleteState.idDoc["_id"]) {
+ auto idStr = idElem.str();
+ if (idStr == ShardIdentityType::IdName) {
+ if (!repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback()) {
+ uasserted(40070,
+ "cannot delete shardIdentity document while in --shardsvr mode");
+ } else {
+ warning() << "Shard identity document rolled back. Will shut down after "
+ "finishing rollback.";
+ ShardIdentityRollbackNotifier::get(opCtx)->recordThatRollbackHappened();
+ }
}
}
}
@@ -327,7 +355,7 @@ void CollectionShardingState::onDropCollection(OperationContext* opCtx,
dassert(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
if (serverGlobalParams.clusterRole == ClusterRole::ShardServer &&
- _nss == NamespaceString::kConfigCollectionNamespace) {
+ _nss == NamespaceString::kServerConfigurationNamespace) {
// Dropping system collections is not allowed for end users.
invariant(!opCtx->writesAreReplicated());
invariant(repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback());
@@ -354,6 +382,52 @@ void CollectionShardingState::onDropCollection(OperationContext* opCtx,
}
}
+void CollectionShardingState::_onConfigRefreshCompleteInvalidateCachedMetadata(
+ OperationContext* opCtx, const BSONObj& query, const BSONObj& update) {
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
+ invariant(serverGlobalParams.clusterRole == ClusterRole::ShardServer);
+
+ // If not primary, check whether a chunk metadata refresh just finished and invalidate the
+ // catalog cache's in-memory chunk metadata cache if true.
+ if (!isPrimary(opCtx, _nss)) {
+ // Extract which collection entry is being updated
+ std::string refreshCollection;
+ fassertStatusOK(
+ 40477,
+ bsonExtractStringField(query, ShardCollectionType::uuid.name(), &refreshCollection));
+
+ // Parse the '$set' update, which will contain the 'refreshSequenceNumber' if it is present.
+ BSONElement updateElement;
+ fassertStatusOK(40478,
+ bsonExtractTypedField(update, StringData("$set"), Object, &updateElement));
+ BSONObj setField = updateElement.Obj();
+
+ // The refreshSequenceNumber is only updated when a chunk metadata refresh completes.
+ if (setField.hasField(ShardCollectionType::refreshSequenceNumber.name())) {
+ Grid::get(opCtx)->catalogCache()->invalidateShardedCollection(refreshCollection);
+ }
+ }
+}
+
+void CollectionShardingState::_onConfigDeleteInvalidateCachedMetadata(OperationContext* opCtx,
+ const BSONObj& query) {
+ dassert(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
+ invariant(serverGlobalParams.clusterRole == ClusterRole::ShardServer);
+
+ // If not primary, invalidate the catalog cache's in-memory chunk metadata cache for the
+ // collection specified in 'query'. The collection metadata has been dropped, so the cached
+ // metadata must be invalidated.
+ if (!isPrimary(opCtx, _nss)) {
+ // Extract which collection entry is being deleted from the _id field.
+ std::string deletedCollection;
+ fassertStatusOK(
+ 40479,
+ bsonExtractStringField(query, ShardCollectionType::uuid.name(), &deletedCollection));
+
+ Grid::get(opCtx)->catalogCache()->invalidateShardedCollection(deletedCollection);
+ }
+}
+
bool CollectionShardingState::_checkShardVersionOk(OperationContext* opCtx,
string* errmsg,
ChunkVersion* expectedShardVersion,
diff --git a/src/mongo/db/s/collection_sharding_state.h b/src/mongo/db/s/collection_sharding_state.h
index 28ad2ad12d1..a393f9a8363 100644
--- a/src/mongo/db/s/collection_sharding_state.h
+++ b/src/mongo/db/s/collection_sharding_state.h
@@ -205,21 +205,53 @@ public:
*/
boost::optional<KeyRange> getNextOrphanRange(BSONObj const& startingFrom);
- // Replication subsystem hooks. If this collection is serving as a source for migration, these
- // methods inform it of any changes to its contents.
-
+ /**
+ * Replication oplog OpObserver hooks. Informs the sharding system of changes that may be
+ * relevant to ongoing operations.
+ *
+ * The global exclusive lock is expected to be held by the caller of any of these functions.
+ */
bool isDocumentInMigratingChunk(OperationContext* opCtx, const BSONObj& doc);
-
void onInsertOp(OperationContext* opCtx, const BSONObj& insertedDoc);
-
- void onUpdateOp(OperationContext* opCtx, const BSONObj& updatedDoc);
-
+ void onUpdateOp(OperationContext* opCtx,
+ const BSONObj& query,
+ const BSONObj& update,
+ const BSONObj& updatedDoc);
void onDeleteOp(OperationContext* opCtx, const DeleteState& deleteState);
-
void onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName);
private:
/**
+ * On shard secondaries, invalidates the catalog cache's in-memory routing table for the
+ * collection specified in the _id field of 'query' if 'update' bumps the refreshSequenceNumber
+ * -- meaning a chunk metadata refresh finished being applied to the collection's locally
+ * persisted metadata store. Invalidating the catalog cache's routing table will provoke a
+ * routing table refresh next time the routing table is requested and the new metadata updates
+ * will be found.
+ *
+ * query - BSON with an _id that identifies which collections entry is being updated.
+ * update - the $set update being applied to the collections entry.
+ *
+ * The global exclusive lock is expected to be held by the caller.
+ */
+ void _onConfigRefreshCompleteInvalidateCachedMetadata(OperationContext* opCtx,
+ const BSONObj& query,
+ const BSONObj& update);
+
+ /**
+ * On secondaries, invalidates the catalog cache's in-memory routing table for the collection
+ * specified in the _id field of 'query' because the collection entry has been deleted as part
+ * of dropping the data collection. Invalidating the catalog cache's routing table will provoke
+ * a routing table refresh next time the routing table is requested and the metadata update will
+ * be discovered.
+ *
+ * query - BSON with an _id field that identifies which collections entry is being updated.
+ *
+ * The global exclusive lock is expected to be held by the caller.
+ */
+ void _onConfigDeleteInvalidateCachedMetadata(OperationContext* opCtx, const BSONObj& query);
+
+ /**
* Checks whether the shard version of the operation matches that of the collection.
*
* opCtx - Operation context from which to retrieve the operation's expected version.
diff --git a/src/mongo/db/s/collection_sharding_state_test.cpp b/src/mongo/db/s/collection_sharding_state_test.cpp
index c1b15ce8c0c..adf569a3141 100644
--- a/src/mongo/db/s/collection_sharding_state_test.cpp
+++ b/src/mongo/db/s/collection_sharding_state_test.cpp
@@ -73,7 +73,7 @@ TEST_F(CollShardingStateTest, GlobalInitGetsCalledAfterWriteCommits) {
Lock::GlobalWrite lock(operationContext());
CollectionShardingState collShardingState(getServiceContext(),
- NamespaceString::kConfigCollectionNamespace);
+ NamespaceString::kServerConfigurationNamespace);
ShardIdentityType shardIdentity;
shardIdentity.setConfigsvrConnString(
@@ -97,7 +97,7 @@ TEST_F(CollShardingStateTest, GlobalInitDoesntGetCalledIfWriteAborts) {
Lock::GlobalWrite lock(operationContext());
CollectionShardingState collShardingState(getServiceContext(),
- NamespaceString::kConfigCollectionNamespace);
+ NamespaceString::kServerConfigurationNamespace);
ShardIdentityType shardIdentity;
shardIdentity.setConfigsvrConnString(
@@ -143,7 +143,7 @@ TEST_F(CollShardingStateTest, OnInsertOpThrowWithIncompleteShardIdentityDocument
Lock::GlobalWrite lock(operationContext());
CollectionShardingState collShardingState(getServiceContext(),
- NamespaceString::kConfigCollectionNamespace);
+ NamespaceString::kServerConfigurationNamespace);
ShardIdentityType shardIdentity;
shardIdentity.setShardName("a");
@@ -157,7 +157,7 @@ TEST_F(CollShardingStateTest, GlobalInitDoesntGetsCalledIfShardIdentityDocWasNot
Lock::GlobalWrite lock(operationContext());
CollectionShardingState collShardingState(getServiceContext(),
- NamespaceString::kConfigCollectionNamespace);
+ NamespaceString::kServerConfigurationNamespace);
WriteUnitOfWork wuow(operationContext());
collShardingState.onInsertOp(operationContext(), BSON("_id" << 1));
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index bf150a59957..04f0049c2e5 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -412,7 +412,7 @@ StatusWith<bool> ShardingState::initializeShardingAwarenessIfNeeded(OperationCon
"queryableBackupMode. If not in queryableBackupMode, you can edit "
"the shardIdentity document by starting the server *without* "
"--shardsvr, manually updating the shardIdentity document in the "
- << NamespaceString::kConfigCollectionNamespace.toString()
+ << NamespaceString::kServerConfigurationNamespace.toString()
<< " collection, and restarting the server with --shardsvr."};
}
@@ -420,7 +420,8 @@ StatusWith<bool> ShardingState::initializeShardingAwarenessIfNeeded(OperationCon
BSONObj shardIdentityBSON;
bool foundShardIdentity = false;
try {
- AutoGetCollection autoColl(opCtx, NamespaceString::kConfigCollectionNamespace, MODE_IS);
+ AutoGetCollection autoColl(
+ opCtx, NamespaceString::kServerConfigurationNamespace, MODE_IS);
foundShardIdentity = Helpers::findOne(opCtx,
autoColl.getCollection(),
BSON("_id" << ShardIdentityType::IdName),
@@ -433,7 +434,7 @@ StatusWith<bool> ShardingState::initializeShardingAwarenessIfNeeded(OperationCon
if (!foundShardIdentity) {
warning() << "Started with --shardsvr, but no shardIdentity document was found on "
"disk in "
- << NamespaceString::kConfigCollectionNamespace
+ << NamespaceString::kServerConfigurationNamespace
<< ". This most likely means this server has not yet been added to a "
"sharded cluster.";
return false;
@@ -459,7 +460,7 @@ StatusWith<bool> ShardingState::initializeShardingAwarenessIfNeeded(OperationCon
if (!shardIdentityBSON.isEmpty()) {
warning() << "Not started with --shardsvr, but a shardIdentity document was found "
"on disk in "
- << NamespaceString::kConfigCollectionNamespace << ": "
+ << NamespaceString::kServerConfigurationNamespace << ": "
<< shardIdentityBSON;
}
return false;
@@ -583,14 +584,15 @@ Status ShardingState::updateShardIdentityConfigString(OperationContext* opCtx,
const std::string& newConnectionString) {
BSONObj updateObj(ShardIdentityType::createConfigServerUpdateObject(newConnectionString));
- UpdateRequest updateReq(NamespaceString::kConfigCollectionNamespace);
+ UpdateRequest updateReq(NamespaceString::kServerConfigurationNamespace);
updateReq.setQuery(BSON("_id" << ShardIdentityType::IdName));
updateReq.setUpdates(updateObj);
- UpdateLifecycleImpl updateLifecycle(NamespaceString::kConfigCollectionNamespace);
+ UpdateLifecycleImpl updateLifecycle(NamespaceString::kServerConfigurationNamespace);
updateReq.setLifecycle(&updateLifecycle);
try {
- AutoGetOrCreateDb autoDb(opCtx, NamespaceString::kConfigCollectionNamespace.db(), MODE_X);
+ AutoGetOrCreateDb autoDb(
+ opCtx, NamespaceString::kServerConfigurationNamespace.db(), MODE_X);
auto result = update(opCtx, autoDb.getDb(), updateReq);
if (result.numMatched == 0) {
diff --git a/src/mongo/db/s/sharding_state_recovery.cpp b/src/mongo/db/s/sharding_state_recovery.cpp
index 72ee2b87028..5f322d945ee 100644
--- a/src/mongo/db/s/sharding_state_recovery.cpp
+++ b/src/mongo/db/s/sharding_state_recovery.cpp
@@ -188,7 +188,8 @@ Status modifyRecoveryDocument(OperationContext* opCtx,
try {
// Use boost::optional so we can release the locks early
boost::optional<AutoGetOrCreateDb> autoGetOrCreateDb;
- autoGetOrCreateDb.emplace(opCtx, NamespaceString::kConfigCollectionNamespace.db(), MODE_X);
+ autoGetOrCreateDb.emplace(
+ opCtx, NamespaceString::kServerConfigurationNamespace.db(), MODE_X);
BSONObj updateObj = RecoveryDocument::createChangeObj(
grid.shardRegistry()->getConfigServerConnectionString(),
@@ -198,11 +199,11 @@ Status modifyRecoveryDocument(OperationContext* opCtx,
LOG(1) << "Changing sharding recovery document " << redact(updateObj);
- UpdateRequest updateReq(NamespaceString::kConfigCollectionNamespace);
+ UpdateRequest updateReq(NamespaceString::kServerConfigurationNamespace);
updateReq.setQuery(RecoveryDocument::getQuery());
updateReq.setUpdates(updateObj);
updateReq.setUpsert();
- UpdateLifecycleImpl updateLifecycle(NamespaceString::kConfigCollectionNamespace);
+ UpdateLifecycleImpl updateLifecycle(NamespaceString::kServerConfigurationNamespace);
updateReq.setLifecycle(&updateLifecycle);
UpdateResult result = update(opCtx, autoGetOrCreateDb->getDb(), updateReq);
@@ -254,7 +255,7 @@ Status ShardingStateRecovery::recover(OperationContext* opCtx) {
BSONObj recoveryDocBSON;
try {
- AutoGetCollection autoColl(opCtx, NamespaceString::kConfigCollectionNamespace, MODE_IS);
+ AutoGetCollection autoColl(opCtx, NamespaceString::kServerConfigurationNamespace, MODE_IS);
if (!Helpers::findOne(
opCtx, autoColl.getCollection(), RecoveryDocument::getQuery(), recoveryDocBSON)) {
return Status::OK();
@@ -289,7 +290,7 @@ Status ShardingStateRecovery::recover(OperationContext* opCtx) {
Status status =
grid.catalogClient(opCtx)->logChange(opCtx,
"Sharding minOpTime recovery",
- NamespaceString::kConfigCollectionNamespace.ns(),
+ NamespaceString::kServerConfigurationNamespace.ns(),
recoveryDocBSON,
ShardingCatalogClient::kMajorityWriteConcern);
if (!status.isOK())
diff --git a/src/mongo/db/s/sharding_state_test.cpp b/src/mongo/db/s/sharding_state_test.cpp
index c47acfa143e..754baec8754 100644
--- a/src/mongo/db/s/sharding_state_test.cpp
+++ b/src/mongo/db/s/sharding_state_test.cpp
@@ -419,7 +419,7 @@ TEST_F(ShardingStateTest,
<< "shardIdentity"
<< "configsvrConnectionString"
<< "invalid");
- _dbDirectClient->insert(NamespaceString::kConfigCollectionNamespace.toString(),
+ _dbDirectClient->insert(NamespaceString::kServerConfigurationNamespace.toString(),
invalidShardIdentity);
storageGlobalParams.readOnly = false;
@@ -450,7 +450,7 @@ TEST_F(ShardingStateTest,
ASSERT_OK(shardIdentity.validate());
BSONObj validShardIdentity = shardIdentity.toBSON();
- _dbDirectClient->insert(NamespaceString::kConfigCollectionNamespace.toString(),
+ _dbDirectClient->insert(NamespaceString::kServerConfigurationNamespace.toString(),
validShardIdentity);
storageGlobalParams.readOnly = false;
@@ -478,7 +478,7 @@ TEST_F(ShardingStateTest,
TEST_F(ShardingStateTest,
InitializeShardingAwarenessIfNeededNotReadOnlyAndNotShardServerAndInvalidShardIdentity) {
- _dbDirectClient->insert(NamespaceString::kConfigCollectionNamespace.toString(),
+ _dbDirectClient->insert(NamespaceString::kServerConfigurationNamespace.toString(),
BSON("_id"
<< "shardIdentity"
<< "configsvrConnectionString"
@@ -510,7 +510,7 @@ TEST_F(ShardingStateTest,
ASSERT_OK(shardIdentity.validate());
BSONObj validShardIdentity = shardIdentity.toBSON();
- _dbDirectClient->insert(NamespaceString::kConfigCollectionNamespace.toString(),
+ _dbDirectClient->insert(NamespaceString::kServerConfigurationNamespace.toString(),
validShardIdentity);
// The shardIdentity doc on disk is ignored if ClusterRole is None.
diff --git a/src/mongo/s/catalog/sharding_catalog_add_shard_test.cpp b/src/mongo/s/catalog/sharding_catalog_add_shard_test.cpp
index 9302406e38c..1bbc85e887d 100644
--- a/src/mongo/s/catalog/sharding_catalog_add_shard_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_add_shard_test.cpp
@@ -161,7 +161,7 @@ protected:
invariant(request.parseBSON("admin", upsertCmdObj, &errMsg) || !request.isValid(&errMsg));
expectUpdatesReturnSuccess(expectedHost,
- NamespaceString(NamespaceString::kConfigCollectionNamespace),
+ NamespaceString(NamespaceString::kServerConfigurationNamespace),
request.getUpdateRequest());
}
@@ -178,7 +178,7 @@ protected:
invariant(request.parseBSON("admin", upsertCmdObj, &errMsg) || !request.isValid(&errMsg));
expectUpdatesReturnFailure(expectedHost,
- NamespaceString(NamespaceString::kConfigCollectionNamespace),
+ NamespaceString(NamespaceString::kServerConfigurationNamespace),
request.getUpdateRequest(),
statusToReturn);
}
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
index 674cba90727..9d766d11f46 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
@@ -727,7 +727,7 @@ BSONObj ShardingCatalogManagerImpl::createShardIdentityUpsertForAddShard(
updateRequest->addToUpdates(updateDoc.release());
BatchedCommandRequest commandRequest(updateRequest.release());
- commandRequest.setNS(NamespaceString::kConfigCollectionNamespace);
+ commandRequest.setNS(NamespaceString::kServerConfigurationNamespace);
commandRequest.setWriteConcern(ShardingCatalogClient::kMajorityWriteConcern.toBSON());
return commandRequest.toBSON();
diff --git a/src/mongo/s/catalog/type_shard_collection.cpp b/src/mongo/s/catalog/type_shard_collection.cpp
index 935a37248de..1737f453564 100644
--- a/src/mongo/s/catalog/type_shard_collection.cpp
+++ b/src/mongo/s/catalog/type_shard_collection.cpp
@@ -39,7 +39,8 @@
namespace mongo {
-const std::string ShardCollectionType::ConfigNS = "config.collections";
+const std::string ShardCollectionType::ConfigNS =
+ NamespaceString::kShardConfigCollectionsCollectionName.toString();
const BSONField<std::string> ShardCollectionType::uuid("_id");
const BSONField<std::string> ShardCollectionType::ns("ns");