summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorKevin Pulo <kevin.pulo@mongodb.com>2018-02-01 07:39:32 +0000
committerKevin Pulo <kevin.pulo@mongodb.com>2018-02-01 07:39:32 +0000
commit6476d7205b2bf4a8d02bf877bb84f1290878bb04 (patch)
tree643d0826ea220b521e605d3f537b7b978eec3a83 /src/mongo/db
parentad7235f3b739c3aeb0c21ddaba58fd1cb494cc61 (diff)
downloadmongo-6476d7205b2bf4a8d02bf877bb84f1290878bb04.tar.gz
SERVER-18137 Use NamespaceString for fully-qualified ns in sharding
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp2
-rw-r--r--src/mongo/db/key_generator_update_test.cpp14
-rw-r--r--src/mongo/db/keys_collection_cache_test.cpp20
-rw-r--r--src/mongo/db/keys_collection_client_direct.cpp5
-rw-r--r--src/mongo/db/keys_collection_client_direct.h2
-rw-r--r--src/mongo/db/keys_collection_document.cpp2
-rw-r--r--src/mongo/db/keys_collection_document.h3
-rw-r--r--src/mongo/db/keys_collection_manager_sharding_test.cpp20
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp4
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp13
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.cpp8
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.h3
-rw-r--r--src/mongo/db/s/balancer/balancer_policy_test.cpp2
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp6
-rw-r--r--src/mongo/db/s/balancer/migration_manager_test.cpp60
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request.cpp7
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request_test.cpp4
-rw-r--r--src/mongo/db/s/balancer/type_migration.cpp8
-rw-r--r--src/mongo/db/s/balancer/type_migration.h3
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp4
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_drop_collection_command.cpp2
-rw-r--r--src/mongo/db/s/config/configsvr_shard_collection_command.cpp6
-rw-r--r--src/mongo/db/s/shard_metadata_util.cpp4
-rw-r--r--src/mongo/db/s/shard_metadata_util_test.cpp2
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp6
26 files changed, 107 insertions, 107 deletions
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 0f9ea8a543a..ee1de8b7627 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -595,7 +595,7 @@ void updateUUIDSchemaVersion(OperationContext* opCtx, bool upgrade) {
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(CollectionType::ConfigNS),
+ CollectionType::ConfigNS,
BSON("dropped" << false), // query
BSONObj(), // sort
boost::none // limit
diff --git a/src/mongo/db/key_generator_update_test.cpp b/src/mongo/db/key_generator_update_test.cpp
index 79b26326f18..ee33e8204bf 100644
--- a/src/mongo/db/key_generator_update_test.cpp
+++ b/src/mongo/db/key_generator_update_test.cpp
@@ -120,7 +120,7 @@ TEST_F(KeyGeneratorUpdateTest, ShouldCreateAnotherKeyIfOnlyOneKeyExists) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
{
auto allKeys = getKeys(operationContext());
@@ -167,12 +167,12 @@ TEST_F(KeyGeneratorUpdateTest, ShouldCreateAnotherKeyIfNoValidKeyAfterCurrent) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
KeysCollectionDocument origKey2(
2, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey2.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON()));
{
auto allKeys = getKeys(operationContext());
@@ -250,12 +250,12 @@ TEST_F(KeyGeneratorUpdateTest, ShouldCreate2KeysIfAllKeysAreExpired) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
KeysCollectionDocument origKey2(
2, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey2.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON()));
{
auto allKeys = getKeys(operationContext());
@@ -347,12 +347,12 @@ TEST_F(KeyGeneratorUpdateTest, ShouldNotCreateNewKeyIfThereAre2UnexpiredKeys) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
KeysCollectionDocument origKey2(
2, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey2.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON()));
{
auto allKeys = getKeys(operationContext());
diff --git a/src/mongo/db/keys_collection_cache_test.cpp b/src/mongo/db/keys_collection_cache_test.cpp
index 6f50180ac8d..5ecbd828915 100644
--- a/src/mongo/db/keys_collection_cache_test.cpp
+++ b/src/mongo/db/keys_collection_cache_test.cpp
@@ -78,7 +78,7 @@ TEST_F(CacheTest, GetKeyShouldReturnCorrectKeyAfterRefresh) {
KeysCollectionDocument origKey1(
1, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
auto refreshStatus = cache.refresh(operationContext());
ASSERT_OK(refreshStatus.getStatus());
@@ -109,7 +109,7 @@ TEST_F(CacheTest, GetKeyShouldReturnErrorIfNoKeyIsValidForGivenTime) {
KeysCollectionDocument origKey1(
1, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
auto refreshStatus = cache.refresh(operationContext());
ASSERT_OK(refreshStatus.getStatus());
@@ -132,17 +132,17 @@ TEST_F(CacheTest, GetKeyShouldReturnOldestKeyPossible) {
KeysCollectionDocument origKey0(
0, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(100, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey0.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey0.toBSON()));
KeysCollectionDocument origKey1(
1, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
KeysCollectionDocument origKey2(
2, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey2.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON()));
auto refreshStatus = cache.refresh(operationContext());
ASSERT_OK(refreshStatus.getStatus());
@@ -173,7 +173,7 @@ TEST_F(CacheTest, RefreshShouldNotGetKeysForOtherPurpose) {
KeysCollectionDocument origKey0(
0, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(100, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey0.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey0.toBSON()));
{
auto refreshStatus = cache.refresh(operationContext());
@@ -186,7 +186,7 @@ TEST_F(CacheTest, RefreshShouldNotGetKeysForOtherPurpose) {
KeysCollectionDocument origKey1(
1, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
{
auto refreshStatus = cache.refresh(operationContext());
@@ -217,7 +217,7 @@ TEST_F(CacheTest, RefreshCanIncrementallyGetNewKeys) {
KeysCollectionDocument origKey0(
0, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(100, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey0.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey0.toBSON()));
{
auto refreshStatus = cache.refresh(operationContext());
@@ -237,12 +237,12 @@ TEST_F(CacheTest, RefreshCanIncrementallyGetNewKeys) {
KeysCollectionDocument origKey1(
1, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
KeysCollectionDocument origKey2(
2, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey2.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON()));
{
auto refreshStatus = cache.refresh(operationContext());
diff --git a/src/mongo/db/keys_collection_client_direct.cpp b/src/mongo/db/keys_collection_client_direct.cpp
index bd97a92a20f..4148b589bab 100644
--- a/src/mongo/db/keys_collection_client_direct.cpp
+++ b/src/mongo/db/keys_collection_client_direct.cpp
@@ -83,7 +83,7 @@ StatusWith<std::vector<KeysCollectionDocument>> KeysCollectionClientDirect::getN
auto findStatus = _query(opCtx,
ReadPreferenceSetting(ReadPreference::Nearest, TagSet{}),
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(KeysCollectionDocument::ConfigNS),
+ KeysCollectionDocument::ConfigNS,
queryBuilder.obj(),
BSON("expiresAt" << 1),
boost::none);
@@ -130,10 +130,9 @@ StatusWith<Shard::QueryResponse> KeysCollectionClientDirect::_query(
}
Status KeysCollectionClientDirect::_insert(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& doc,
const WriteConcernOptions& writeConcern) {
- const NamespaceString nss(ns);
BatchedCommandRequest request([&] {
write_ops::Insert insertOp(nss);
insertOp.setDocuments({doc});
diff --git a/src/mongo/db/keys_collection_client_direct.h b/src/mongo/db/keys_collection_client_direct.h
index f588fcc9d6e..ff5784770cd 100644
--- a/src/mongo/db/keys_collection_client_direct.h
+++ b/src/mongo/db/keys_collection_client_direct.h
@@ -73,7 +73,7 @@ private:
boost::optional<long long> limit);
Status _insert(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& doc,
const WriteConcernOptions& writeConcern);
diff --git a/src/mongo/db/keys_collection_document.cpp b/src/mongo/db/keys_collection_document.cpp
index 155eb730a31..f9444250d57 100644
--- a/src/mongo/db/keys_collection_document.cpp
+++ b/src/mongo/db/keys_collection_document.cpp
@@ -47,7 +47,7 @@ const char kExpiresAtFieldName[] = "expiresAt";
} // namespace
-const std::string KeysCollectionDocument::ConfigNS = "admin.system.keys";
+const NamespaceString KeysCollectionDocument::ConfigNS("admin.system.keys");
StatusWith<KeysCollectionDocument> KeysCollectionDocument::fromBSON(const BSONObj& source) {
long long keyId;
diff --git a/src/mongo/db/keys_collection_document.h b/src/mongo/db/keys_collection_document.h
index 5571982fdac..c7fbae7ed79 100644
--- a/src/mongo/db/keys_collection_document.h
+++ b/src/mongo/db/keys_collection_document.h
@@ -30,6 +30,7 @@
#include "mongo/base/status.h"
#include "mongo/db/logical_time.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/db/time_proof_service.h"
namespace mongo {
@@ -47,7 +48,7 @@ namespace mongo {
*/
class KeysCollectionDocument {
public:
- static const std::string ConfigNS;
+ static const NamespaceString ConfigNS;
KeysCollectionDocument(long long keyId,
std::string purpose,
diff --git a/src/mongo/db/keys_collection_manager_sharding_test.cpp b/src/mongo/db/keys_collection_manager_sharding_test.cpp
index ccdd0b653b8..4653653c836 100644
--- a/src/mongo/db/keys_collection_manager_sharding_test.cpp
+++ b/src/mongo/db/keys_collection_manager_sharding_test.cpp
@@ -109,7 +109,7 @@ TEST_F(KeysManagerShardedTest, GetKeyWithSingleKey) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
auto keyStatus =
keyManager()->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0)));
@@ -127,12 +127,12 @@ TEST_F(KeysManagerShardedTest, GetKeyWithMultipleKeys) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
KeysCollectionDocument origKey2(
2, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(205, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey2.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON()));
auto keyStatus =
keyManager()->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0)));
@@ -159,7 +159,7 @@ TEST_F(KeysManagerShardedTest, GetKeyShouldErrorIfKeyIdMismatchKey) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
auto keyStatus =
keyManager()->getKeyForValidation(operationContext(), 2, LogicalTime(Timestamp(100, 0)));
@@ -172,11 +172,11 @@ TEST_F(KeysManagerShardedTest, GetKeyWithoutRefreshShouldReturnRightKey) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
KeysCollectionDocument origKey2(
2, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey2.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON()));
{
auto keyStatus = keyManager()->getKeyForValidation(
@@ -207,7 +207,7 @@ TEST_F(KeysManagerShardedTest, GetKeyForSigningShouldReturnRightKey) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
keyManager()->refreshNow(operationContext());
@@ -226,11 +226,11 @@ TEST_F(KeysManagerShardedTest, GetKeyForSigningShouldReturnRightOldKey) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
KeysCollectionDocument origKey2(
2, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey2.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON()));
keyManager()->refreshNow(operationContext());
@@ -297,7 +297,7 @@ TEST_F(KeysManagerShardedTest, ShouldStillBeAbleToUpdateCacheEvenIfItCantCreateK
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
// Set the time to be very ahead so the updater will be forced to create new keys.
const LogicalTime fakeTime(Timestamp(20000, 0));
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index 99fed7b8737..d537260c666 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -605,7 +605,7 @@ int Balancer::_moveChunks(OperationContext* opCtx,
log() << "Performing a split because migration " << redact(requestIt->toString())
<< " failed for size reasons" << causedBy(redact(status));
- _splitOrMarkJumbo(opCtx, NamespaceString(requestIt->ns), requestIt->minKey);
+ _splitOrMarkJumbo(opCtx, requestIt->nss, requestIt->minKey);
continue;
}
@@ -650,7 +650,7 @@ void Balancer::_splitOrMarkJumbo(OperationContext* opCtx,
chunk->markAsJumbo();
- const std::string chunkName = ChunkType::genID(nss.ns(), chunk->getMin());
+ const std::string chunkName = ChunkType::genID(nss, chunk->getMin());
auto status = Grid::get(opCtx)->catalogClient()->updateConfigDocument(
opCtx,
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index c00455f05cd..5e89baca4e1 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -86,12 +86,11 @@ StatusWith<DistributionStatus> createCollectionDistributionStatus(
Grid::get(opCtx)->catalogClient()->getTagsForCollection(opCtx, chunkMgr->getns());
if (!swCollectionTags.isOK()) {
return swCollectionTags.getStatus().withContext(
- str::stream() << "Unable to load tags for collection " << chunkMgr->getns());
+ str::stream() << "Unable to load tags for collection " << chunkMgr->getns().ns());
}
const auto& collectionTags = swCollectionTags.getValue();
- DistributionStatus distribution(NamespaceString(chunkMgr->getns()),
- std::move(shardToChunksMap));
+ DistributionStatus distribution(chunkMgr->getns(), std::move(shardToChunksMap));
// Cache the collection tags
const auto& keyPattern = chunkMgr->getShardKeyPattern().getKeyPattern();
@@ -298,8 +297,8 @@ BalancerChunkSelectionPolicyImpl::selectSpecificChunkToMove(OperationContext* op
const auto& shardStats = shardStatsStatus.getValue();
auto routingInfoStatus =
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(
- opCtx, NamespaceString(chunk.getNS()));
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
+ chunk.getNS());
if (!routingInfoStatus.isOK()) {
return routingInfoStatus.getStatus();
}
@@ -327,8 +326,8 @@ Status BalancerChunkSelectionPolicyImpl::checkMoveAllowed(OperationContext* opCt
auto shardStats = std::move(shardStatsStatus.getValue());
auto routingInfoStatus =
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(
- opCtx, NamespaceString(chunk.getNS()));
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
+ chunk.getNS());
if (!routingInfoStatus.isOK()) {
return routingInfoStatus.getStatus();
}
diff --git a/src/mongo/db/s/balancer/balancer_policy.cpp b/src/mongo/db/s/balancer/balancer_policy.cpp
index 46a20de8676..7239f9b9e61 100644
--- a/src/mongo/db/s/balancer/balancer_policy.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy.cpp
@@ -537,7 +537,7 @@ MigrateInfo::MigrateInfo(const ShardId& a_to, const ChunkType& a_chunk) {
to = a_to;
- ns = a_chunk.getNS();
+ nss = a_chunk.getNS();
from = a_chunk.getShard();
minKey = a_chunk.getMin();
maxKey = a_chunk.getMax();
@@ -545,12 +545,12 @@ MigrateInfo::MigrateInfo(const ShardId& a_to, const ChunkType& a_chunk) {
}
std::string MigrateInfo::getName() const {
- return ChunkType::genID(ns, minKey);
+ return ChunkType::genID(nss, minKey);
}
string MigrateInfo::toString() const {
- return str::stream() << ns << ": [" << minKey << ", " << maxKey << "), from " << from << ", to "
- << to;
+ return str::stream() << nss.ns() << ": [" << minKey << ", " << maxKey << "), from " << from
+ << ", to " << to;
}
} // namespace mongo
diff --git a/src/mongo/db/s/balancer/balancer_policy.h b/src/mongo/db/s/balancer/balancer_policy.h
index afba1db4915..ed70c40d3c9 100644
--- a/src/mongo/db/s/balancer/balancer_policy.h
+++ b/src/mongo/db/s/balancer/balancer_policy.h
@@ -34,6 +34,7 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/simple_bsonobj_comparator.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/db/s/balancer/cluster_statistics.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/shard_id.h"
@@ -57,7 +58,7 @@ struct MigrateInfo {
std::string toString() const;
- std::string ns;
+ NamespaceString nss;
ShardId to;
ShardId from;
BSONObj minKey;
diff --git a/src/mongo/db/s/balancer/balancer_policy_test.cpp b/src/mongo/db/s/balancer/balancer_policy_test.cpp
index f6abd7c8acf..14e34cf298a 100644
--- a/src/mongo/db/s/balancer/balancer_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy_test.cpp
@@ -91,7 +91,7 @@ std::pair<ShardStatisticsVector, ShardToChunksMap> generateCluster(
for (size_t i = 0; i < numChunks; i++, currentChunk++) {
ChunkType chunk;
- chunk.setNS(kNamespace.ns());
+ chunk.setNS(kNamespace);
chunk.setMin(currentChunk == 0 ? shardKeyPattern.globalMin()
: BSON("x" << currentChunk));
chunk.setMax(currentChunk == totalNumChunks - 1 ? shardKeyPattern.globalMax()
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index 0beffad2508..ce7ed13d743 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -183,7 +183,7 @@ Status MigrationManager::executeManualMigration(
auto routingInfoStatus =
Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(
- opCtx, NamespaceString(migrateInfo.ns));
+ opCtx, migrateInfo.nss);
if (!routingInfoStatus.isOK()) {
return routingInfoStatus.getStatus();
}
@@ -228,7 +228,7 @@ void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* opCtx)
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(MigrationType::ConfigNS),
+ MigrationType::ConfigNS,
BSONObj(),
BSONObj(),
boost::none);
@@ -417,7 +417,7 @@ shared_ptr<Notification<RemoteCommandResponse>> MigrationManager::_schedule(
uint64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle,
bool waitForDelete) {
- const NamespaceString nss(migrateInfo.ns);
+ const NamespaceString& nss = migrateInfo.nss;
// Ensure we are not stopped in order to avoid doing the extra work
{
diff --git a/src/mongo/db/s/balancer/migration_manager_test.cpp b/src/mongo/db/s/balancer/migration_manager_test.cpp
index 70718a25504..515ebceb7b2 100644
--- a/src/mongo/db/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/db/s/balancer/migration_manager_test.cpp
@@ -99,13 +99,13 @@ protected:
* Inserts a document into the config.collections collection to indicate that "collName" is
* sharded with version "version". The shard key pattern defaults to "_id".
*/
- void setUpCollection(const std::string collName, ChunkVersion version);
+ void setUpCollection(const NamespaceString& collName, ChunkVersion version);
/**
* Inserts a document into the config.chunks collection so that the chunk defined by the
* parameters exists. Returns a ChunkType defined by the parameters.
*/
- ChunkType setUpChunk(const std::string& collName,
+ ChunkType setUpChunk(const NamespaceString& collName,
const BSONObj& chunkMin,
const BSONObj& chunkMax,
const ShardId& shardId,
@@ -188,9 +188,9 @@ void MigrationManagerTest::setUpDatabase(const std::string& dbName, const ShardI
operationContext(), DatabaseType::ConfigNS, db.toBSON(), kMajorityWriteConcern));
}
-void MigrationManagerTest::setUpCollection(const std::string collName, ChunkVersion version) {
+void MigrationManagerTest::setUpCollection(const NamespaceString& collName, ChunkVersion version) {
CollectionType coll;
- coll.setNs(NamespaceString(collName));
+ coll.setNs(collName);
coll.setEpoch(version.epoch());
coll.setUpdatedAt(Date_t::fromMillisSinceEpoch(version.toLong()));
coll.setKeyPattern(kKeyPattern);
@@ -199,7 +199,7 @@ void MigrationManagerTest::setUpCollection(const std::string collName, ChunkVers
operationContext(), CollectionType::ConfigNS, coll.toBSON(), kMajorityWriteConcern));
}
-ChunkType MigrationManagerTest::setUpChunk(const std::string& collName,
+ChunkType MigrationManagerTest::setUpChunk(const NamespaceString& collName,
const BSONObj& chunkMin,
const BSONObj& chunkMax,
const ShardId& shardId,
@@ -217,7 +217,7 @@ ChunkType MigrationManagerTest::setUpChunk(const std::string& collName,
void MigrationManagerTest::setUpMigration(const ChunkType& chunk, const ShardId& toShard) {
BSONObjBuilder builder;
- builder.append(MigrationType::ns(), chunk.getNS());
+ builder.append(MigrationType::ns(), chunk.getNS().ns());
builder.append(MigrationType::min(), chunk.getMin());
builder.append(MigrationType::max(), chunk.getMax());
builder.append(MigrationType::toShard(), toShard.toString());
@@ -237,7 +237,7 @@ void MigrationManagerTest::checkMigrationsCollectionIsEmptyAndLocksAreUnlocked()
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(MigrationType::ConfigNS),
+ MigrationType::ConfigNS,
BSONObj(),
BSONObj(),
boost::none);
@@ -249,7 +249,7 @@ void MigrationManagerTest::checkMigrationsCollectionIsEmptyAndLocksAreUnlocked()
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(LocksType::ConfigNS),
+ LocksType::ConfigNS,
BSON(LocksType::state(LocksType::LOCKED) << LocksType::name("{ '$ne' : 'balancer'}")),
BSONObj(),
boost::none);
@@ -262,13 +262,13 @@ void MigrationManagerTest::expectMoveChunkCommand(const ChunkType& chunk,
const BSONObj& response) {
onCommand([&chunk, &toShardId, &response](const RemoteCommandRequest& request) {
NamespaceString nss(request.cmdObj.firstElement().valueStringData());
- ASSERT_EQ(chunk.getNS(), nss.ns());
+ ASSERT_EQ(chunk.getNS(), nss);
const StatusWith<MoveChunkRequest> moveChunkRequestWithStatus =
MoveChunkRequest::createFromCommand(nss, request.cmdObj);
ASSERT_OK(moveChunkRequestWithStatus.getStatus());
- ASSERT_EQ(chunk.getNS(), moveChunkRequestWithStatus.getValue().getNss().ns());
+ ASSERT_EQ(chunk.getNS(), moveChunkRequestWithStatus.getValue().getNss());
ASSERT_BSONOBJ_EQ(chunk.getMin(), moveChunkRequestWithStatus.getValue().getMinKey());
ASSERT_BSONOBJ_EQ(chunk.getMax(), moveChunkRequestWithStatus.getValue().getMaxKey());
ASSERT_EQ(chunk.getShard(), moveChunkRequestWithStatus.getValue().getFromShardId());
@@ -295,8 +295,8 @@ TEST_F(MigrationManagerTest, OneCollectionTwoMigrations) {
operationContext(), ShardType::ConfigNS, kShard2, kMajorityWriteConcern));
// Set up the database and collection as sharded in the metadata.
- std::string dbName = "foo";
- std::string collName = "foo.bar";
+ const std::string dbName = "foo";
+ const NamespaceString collName(dbName, "bar");
ChunkVersion version(2, 0, OID::gen());
setUpDatabase(dbName, kShardId0);
@@ -347,8 +347,8 @@ TEST_F(MigrationManagerTest, TwoCollectionsTwoMigrationsEach) {
// Set up a database and two collections as sharded in the metadata.
std::string dbName = "foo";
- std::string collName1 = "foo.bar";
- std::string collName2 = "foo.baz";
+ const NamespaceString collName1(dbName, "bar");
+ const NamespaceString collName2(dbName, "baz");
ChunkVersion version1(2, 0, OID::gen());
ChunkVersion version2(2, 0, OID::gen());
@@ -413,8 +413,8 @@ TEST_F(MigrationManagerTest, SourceShardNotFound) {
operationContext(), ShardType::ConfigNS, kShard2, kMajorityWriteConcern));
// Set up the database and collection as sharded in the metadata.
- std::string dbName = "foo";
- std::string collName = "foo.bar";
+ const std::string dbName = "foo";
+ const NamespaceString collName(dbName, "bar");
ChunkVersion version(2, 0, OID::gen());
setUpDatabase(dbName, kShardId0);
@@ -463,8 +463,8 @@ TEST_F(MigrationManagerTest, JumboChunkResponseBackwardsCompatibility) {
operationContext(), ShardType::ConfigNS, kShard0, kMajorityWriteConcern));
// Set up the database and collection as sharded in the metadata.
- std::string dbName = "foo";
- std::string collName = "foo.bar";
+ const std::string dbName = "foo";
+ const NamespaceString collName(dbName, "bar");
ChunkVersion version(2, 0, OID::gen());
setUpDatabase(dbName, kShardId0);
@@ -505,8 +505,8 @@ TEST_F(MigrationManagerTest, InterruptMigration) {
operationContext(), ShardType::ConfigNS, kShard0, kMajorityWriteConcern));
// Set up the database and collection as sharded in the metadata.
- std::string dbName = "foo";
- std::string collName = "foo.bar";
+ const std::string dbName = "foo";
+ const NamespaceString collName(dbName, "bar");
ChunkVersion version(2, 0, OID::gen());
setUpDatabase(dbName, kShardId0);
@@ -564,7 +564,7 @@ TEST_F(MigrationManagerTest, InterruptMigration) {
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(MigrationType::ConfigNS),
+ MigrationType::ConfigNS,
BSON(MigrationType::name(chunk.getName())),
BSONObj(),
boost::none);
@@ -588,8 +588,8 @@ TEST_F(MigrationManagerTest, RestartMigrationManager) {
operationContext(), ShardType::ConfigNS, kShard0, kMajorityWriteConcern));
// Set up the database and collection as sharded in the metadata.
- std::string dbName = "foo";
- std::string collName = "foo.bar";
+ const std::string dbName = "foo";
+ const NamespaceString collName(dbName, "bar");
ChunkVersion version(2, 0, OID::gen());
setUpDatabase(dbName, kShardId0);
@@ -633,8 +633,8 @@ TEST_F(MigrationManagerTest, MigrationRecovery) {
operationContext(), ShardType::ConfigNS, kShard2, kMajorityWriteConcern));
// Set up the database and collection as sharded in the metadata.
- std::string dbName = "foo";
- std::string collName = "foo.bar";
+ const std::string dbName = "foo";
+ const NamespaceString collName(dbName, "bar");
ChunkVersion version(1, 0, OID::gen());
setUpDatabase(dbName, kShardId0);
@@ -689,8 +689,8 @@ TEST_F(MigrationManagerTest, FailMigrationRecovery) {
operationContext(), ShardType::ConfigNS, kShard2, kMajorityWriteConcern));
// Set up the database and collection as sharded in the metadata.
- std::string dbName = "foo";
- std::string collName = "foo.bar";
+ const std::string dbName = "foo";
+ const NamespaceString collName(dbName, "bar");
ChunkVersion version(1, 0, OID::gen());
setUpDatabase(dbName, kShardId0);
@@ -725,7 +725,7 @@ TEST_F(MigrationManagerTest, FailMigrationRecovery) {
// session ID used here doesn't matter.
ASSERT_OK(catalogClient()->getDistLockManager()->lockWithSessionID(
operationContext(),
- collName,
+ collName.ns(),
"MigrationManagerTest",
OID::gen(),
DistLockManager::kSingleLockAttemptTimeout));
@@ -748,8 +748,8 @@ TEST_F(MigrationManagerTest, RemoteCallErrorConversionToOperationFailed) {
operationContext(), ShardType::ConfigNS, kShard2, kMajorityWriteConcern));
// Set up the database and collection as sharded in the metadata.
- std::string dbName = "foo";
- std::string collName = "foo.bar";
+ const std::string dbName = "foo";
+ const NamespaceString collName(dbName, "bar");
ChunkVersion version(1, 0, OID::gen());
setUpDatabase(dbName, kShardId0);
diff --git a/src/mongo/db/s/balancer/scoped_migration_request.cpp b/src/mongo/db/s/balancer/scoped_migration_request.cpp
index d55855b14b8..6cf19c1f4bf 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request.cpp
+++ b/src/mongo/db/s/balancer/scoped_migration_request.cpp
@@ -110,7 +110,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(MigrationType::ConfigNS),
+ MigrationType::ConfigNS,
BSON(MigrationType::name(migrateInfo.getName())),
BSONObj(),
boost::none);
@@ -156,8 +156,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
// As long as there isn't a DuplicateKey error, the document may have been written, and it's
// safe (won't delete another migration's document) and necessary to try to clean up the
// document via the destructor.
- ScopedMigrationRequest scopedMigrationRequest(
- opCtx, NamespaceString(migrateInfo.ns), migrateInfo.minKey);
+ ScopedMigrationRequest scopedMigrationRequest(opCtx, migrateInfo.nss, migrateInfo.minKey);
// If there was a write error, let the object go out of scope and clean up in the
// destructor.
@@ -173,7 +172,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
<< "number of retries. Chunk '"
<< ChunkRange(migrateInfo.minKey, migrateInfo.maxKey).toString()
<< "' in collection '"
- << migrateInfo.ns
+ << migrateInfo.nss.ns()
<< "' was being moved (somewhere) by another operation.");
}
diff --git a/src/mongo/db/s/balancer/scoped_migration_request_test.cpp b/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
index 1aab0157210..1d9b7ba6c7d 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
+++ b/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
@@ -71,7 +71,7 @@ void ScopedMigrationRequestTest::checkMigrationsCollectionForDocument(
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(MigrationType::ConfigNS),
+ MigrationType::ConfigNS,
BSON(MigrationType::name(chunkName)),
BSONObj(),
boost::none);
@@ -161,7 +161,7 @@ TEST_F(ScopedMigrationRequestTest, CreateScopedMigrationRequestOnRecovery) {
// still removes the document corresponding to the MigrationRequest.
{
ScopedMigrationRequest scopedMigrationRequest = ScopedMigrationRequest::createForRecovery(
- operationContext(), NamespaceString(migrateInfo.ns), migrateInfo.minKey);
+ operationContext(), migrateInfo.nss, migrateInfo.minKey);
checkMigrationsCollectionForDocument(migrateInfo.getName(), 1);
}
diff --git a/src/mongo/db/s/balancer/type_migration.cpp b/src/mongo/db/s/balancer/type_migration.cpp
index efaf453048d..e6341b77685 100644
--- a/src/mongo/db/s/balancer/type_migration.cpp
+++ b/src/mongo/db/s/balancer/type_migration.cpp
@@ -40,7 +40,7 @@ const StringData kChunkVersion = "chunkVersion"_sd;
} // namespace
-const std::string MigrationType::ConfigNS = "config.migrations";
+const NamespaceString MigrationType::ConfigNS("config.migrations");
const BSONField<std::string> MigrationType::name("_id");
const BSONField<std::string> MigrationType::ns("ns");
@@ -53,7 +53,7 @@ const BSONField<bool> MigrationType::waitForDelete("waitForDelete");
MigrationType::MigrationType() = default;
MigrationType::MigrationType(MigrateInfo info, bool waitForDelete)
- : _nss(NamespaceString(info.ns)),
+ : _nss(info.nss),
_min(info.minKey),
_max(info.maxKey),
_fromShard(info.from),
@@ -138,7 +138,7 @@ BSONObj MigrationType::toBSON() const {
MigrateInfo MigrationType::toMigrateInfo() const {
ChunkType chunk;
- chunk.setNS(_nss.ns());
+ chunk.setNS(_nss);
chunk.setShard(_fromShard);
chunk.setMin(_min);
chunk.setMax(_max);
@@ -148,7 +148,7 @@ MigrateInfo MigrationType::toMigrateInfo() const {
}
std::string MigrationType::getName() const {
- return ChunkType::genID(_nss.ns(), _min);
+ return ChunkType::genID(_nss, _min);
}
} // namespace mongo
diff --git a/src/mongo/db/s/balancer/type_migration.h b/src/mongo/db/s/balancer/type_migration.h
index dd35bd9b844..000145c0941 100644
--- a/src/mongo/db/s/balancer/type_migration.h
+++ b/src/mongo/db/s/balancer/type_migration.h
@@ -29,6 +29,7 @@
#pragma once
#include "mongo/bson/bsonobj.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/db/s/balancer/balancer_policy.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard.h"
@@ -43,7 +44,7 @@ namespace mongo {
class MigrationType {
public:
// Name of the migrations collection in the config server.
- static const std::string ConfigNS;
+ static const NamespaceString ConfigNS;
// Field names and types in the migrations collection type.
static const BSONField<std::string> name;
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index caea3e11fed..f36ed19a0ea 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -117,7 +117,7 @@ void moveChunk(OperationContext* opCtx, const NamespaceString& nss, const BSONOb
const auto suggestedChunk = routingInfo.cm()->findIntersectingChunkWithSimpleCollation(minKey);
ChunkType chunkToMove;
- chunkToMove.setNS(nss.ns());
+ chunkToMove.setNS(nss);
chunkToMove.setShard(suggestedChunk->getShardId());
chunkToMove.setMin(suggestedChunk->getMin());
chunkToMove.setMax(suggestedChunk->getMax());
@@ -201,7 +201,7 @@ bool isAutoBalanceEnabled(OperationContext* opCtx,
if (!balancerConfig->shouldBalanceForAutoSplit())
return false;
- auto collStatus = Grid::get(opCtx)->catalogClient()->getCollection(opCtx, nss.ns());
+ auto collStatus = Grid::get(opCtx)->catalogClient()->getCollection(opCtx, nss);
if (!collStatus.isOK()) {
log() << "Auto-split for " << nss << " failed to load collection metadata"
<< causedBy(redact(collStatus.getStatus()));
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index e94547ccfe9..78e9c8b4d55 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -430,7 +430,7 @@ void CollectionShardingState::onDeleteOp(OperationContext* opCtx,
}
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
- if (_nss.ns() == VersionType::ConfigNS) {
+ if (_nss == VersionType::ConfigNS) {
if (!repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback()) {
uasserted(40302, "cannot delete config.version document while in --configsvr mode");
} else {
@@ -467,7 +467,7 @@ void CollectionShardingState::onDropCollection(OperationContext* opCtx,
}
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
- if (_nss.ns() == VersionType::ConfigNS) {
+ if (_nss == VersionType::ConfigNS) {
if (!repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback()) {
uasserted(40303, "cannot drop config.version document while in --configsvr mode");
} else {
diff --git a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
index fd2c5ca9b93..c5fd2ecfc0f 100644
--- a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
@@ -130,7 +130,7 @@ public:
[opCtx, nss] { Grid::get(opCtx)->catalogCache()->invalidateShardedCollection(nss); });
auto collStatus = catalogClient->getCollection(
- opCtx, nss.toString(), repl::ReadConcernLevel::kLocalReadConcern);
+ opCtx, nss, repl::ReadConcernLevel::kLocalReadConcern);
if (collStatus == ErrorCodes::NamespaceNotFound) {
// We checked the sharding catalog and found that this collection doesn't exist.
// This may be because it never existed, or because a drop command was sent
diff --git a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
index 63b6f00a48d..d221a719bb8 100644
--- a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
@@ -261,7 +261,7 @@ boost::optional<CollectionType> checkIfAlreadyShardedWithSameOptions(
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(CollectionType::ConfigNS),
+ CollectionType::ConfigNS,
BSON("_id" << nss.ns() << "dropped" << false),
BSONObj(),
1))
@@ -569,7 +569,7 @@ void migrateAndFurtherSplitInitialChunks(OperationContext* opCtx,
}
ChunkType chunkType;
- chunkType.setNS(nss.ns());
+ chunkType.setNS(nss);
chunkType.setMin(chunk->getMin());
chunkType.setMax(chunk->getMax());
chunkType.setShard(chunk->getShardId());
@@ -881,7 +881,7 @@ public:
// Step 6. Actually shard the collection.
catalogManager->shardCollection(opCtx,
- nss.ns(),
+ nss,
uuid,
shardKeyPattern,
*request.getCollation(),
diff --git a/src/mongo/db/s/shard_metadata_util.cpp b/src/mongo/db/s/shard_metadata_util.cpp
index 88a3d761cee..de8140a76e1 100644
--- a/src/mongo/db/s/shard_metadata_util.cpp
+++ b/src/mongo/db/s/shard_metadata_util.cpp
@@ -148,11 +148,11 @@ StatusWith<ShardCollectionType> readShardCollectionsEntry(OperationContext* opCt
try {
DBDirectClient client(opCtx);
std::unique_ptr<DBClientCursor> cursor =
- client.query(ShardCollectionType::ConfigNS.c_str(), fullQuery, 1);
+ client.query(ShardCollectionType::ConfigNS.ns(), fullQuery, 1);
if (!cursor) {
return Status(ErrorCodes::OperationFailed,
str::stream() << "Failed to establish a cursor for reading "
- << ShardCollectionType::ConfigNS
+ << ShardCollectionType::ConfigNS.ns()
<< " from local storage");
}
diff --git a/src/mongo/db/s/shard_metadata_util_test.cpp b/src/mongo/db/s/shard_metadata_util_test.cpp
index fa86f74a4ec..0fc7d24d5b6 100644
--- a/src/mongo/db/s/shard_metadata_util_test.cpp
+++ b/src/mongo/db/s/shard_metadata_util_test.cpp
@@ -322,7 +322,7 @@ TEST_F(ShardMetadataUtilTest, DropChunksAndDeleteCollectionsEntry) {
ASSERT_OK(dropChunksAndDeleteCollectionsEntry(operationContext(), kNss));
checkCollectionIsEmpty(kChunkMetadataNss);
// Collections collection should be empty because it only had one entry.
- checkCollectionIsEmpty(NamespaceString(ShardCollectionType::ConfigNS));
+ checkCollectionIsEmpty(ShardCollectionType::ConfigNS);
}
} // namespace
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
index 8db0430fdfe..8eb3818c90e 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
@@ -125,7 +125,7 @@ vector<ChunkType> ShardServerCatalogCacheLoaderTest::makeFiveChunks(
collVersion.incMajor();
ChunkType chunk;
- chunk.setNS(kNss.ns());
+ chunk.setNS(kNss);
chunk.setMin(mins[i]);
chunk.setMax(maxs[i]);
chunk.setShard(kShardId);
@@ -148,7 +148,7 @@ vector<ChunkType> ShardServerCatalogCacheLoaderTest::makeThreeUpdatedChunksDiff(
// dependent on a race between persistence and retrieving data because it combines enqueued and
// persisted results without applying modifications.
ChunkType oldChunk;
- oldChunk.setNS(kNss.ns());
+ oldChunk.setNS(kNss);
oldChunk.setMin(BSON("a" << 200));
oldChunk.setMax(BSON("a" << MAXKEY));
oldChunk.setShard(kShardId);
@@ -164,7 +164,7 @@ vector<ChunkType> ShardServerCatalogCacheLoaderTest::makeThreeUpdatedChunksDiff(
collVersion.incMinor();
ChunkType chunk;
- chunk.setNS(kNss.ns());
+ chunk.setNS(kNss);
chunk.setMin(mins[i]);
chunk.setMax(maxs[i]);
chunk.setShard(kShardId);