summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorKevin Pulo <kevin.pulo@mongodb.com>2018-02-01 07:39:32 +0000
committerKevin Pulo <kevin.pulo@mongodb.com>2018-02-01 07:39:32 +0000
commit6476d7205b2bf4a8d02bf877bb84f1290878bb04 (patch)
tree643d0826ea220b521e605d3f537b7b978eec3a83 /src
parentad7235f3b739c3aeb0c21ddaba58fd1cb494cc61 (diff)
downloadmongo-6476d7205b2bf4a8d02bf877bb84f1290878bb04.tar.gz
SERVER-18137 Use NamespaceString for fully-qualified ns in sharding
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp2
-rw-r--r--src/mongo/db/key_generator_update_test.cpp14
-rw-r--r--src/mongo/db/keys_collection_cache_test.cpp20
-rw-r--r--src/mongo/db/keys_collection_client_direct.cpp5
-rw-r--r--src/mongo/db/keys_collection_client_direct.h2
-rw-r--r--src/mongo/db/keys_collection_document.cpp2
-rw-r--r--src/mongo/db/keys_collection_document.h3
-rw-r--r--src/mongo/db/keys_collection_manager_sharding_test.cpp20
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp4
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp13
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.cpp8
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.h3
-rw-r--r--src/mongo/db/s/balancer/balancer_policy_test.cpp2
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp6
-rw-r--r--src/mongo/db/s/balancer/migration_manager_test.cpp60
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request.cpp7
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request_test.cpp4
-rw-r--r--src/mongo/db/s/balancer/type_migration.cpp8
-rw-r--r--src/mongo/db/s/balancer/type_migration.h3
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp4
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_drop_collection_command.cpp2
-rw-r--r--src/mongo/db/s/config/configsvr_shard_collection_command.cpp6
-rw-r--r--src/mongo/db/s/shard_metadata_util.cpp4
-rw-r--r--src/mongo/db/s/shard_metadata_util_test.cpp2
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp6
-rw-r--r--src/mongo/s/balancer_configuration.cpp2
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp104
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client.h16
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp83
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.h20
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.cpp16
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.h14
-rw-r--r--src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp48
-rw-r--r--src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp77
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager.cpp33
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager.h14
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_chunk_operations.cpp58
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_collection_operations.cpp66
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_database_operations.cpp4
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_shard_operations.cpp28
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_zone_operations.cpp48
-rw-r--r--src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp30
-rw-r--r--src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp12
-rw-r--r--src/mongo/s/catalog/sharding_catalog_remove_shard_test.cpp4
-rw-r--r--src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp68
-rw-r--r--src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp22
-rw-r--r--src/mongo/s/catalog/sharding_catalog_test.cpp93
-rw-r--r--src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp38
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp26
-rw-r--r--src/mongo/s/catalog/type_chunk.h12
-rw-r--r--src/mongo/s/catalog/type_chunk_test.cpp4
-rw-r--r--src/mongo/s/catalog/type_collection.cpp2
-rw-r--r--src/mongo/s/catalog/type_collection.h2
-rw-r--r--src/mongo/s/catalog/type_config_version.cpp2
-rw-r--r--src/mongo/s/catalog/type_config_version.h3
-rw-r--r--src/mongo/s/catalog/type_database.cpp2
-rw-r--r--src/mongo/s/catalog/type_database.h3
-rw-r--r--src/mongo/s/catalog/type_lockpings.cpp2
-rw-r--r--src/mongo/s/catalog/type_lockpings.h3
-rw-r--r--src/mongo/s/catalog/type_locks.cpp2
-rw-r--r--src/mongo/s/catalog/type_locks.h3
-rw-r--r--src/mongo/s/catalog/type_mongos.cpp2
-rw-r--r--src/mongo/s/catalog/type_mongos.h3
-rw-r--r--src/mongo/s/catalog/type_shard.cpp2
-rw-r--r--src/mongo/s/catalog/type_shard.h3
-rw-r--r--src/mongo/s/catalog/type_shard_collection.cpp4
-rw-r--r--src/mongo/s/catalog/type_shard_collection.h2
-rw-r--r--src/mongo/s/catalog/type_tags.cpp12
-rw-r--r--src/mongo/s/catalog/type_tags.h9
-rw-r--r--src/mongo/s/catalog/type_tags_test.cpp2
-rw-r--r--src/mongo/s/catalog_cache.cpp4
-rw-r--r--src/mongo/s/chunk_manager.h4
-rw-r--r--src/mongo/s/client/parallel.cpp4
-rw-r--r--src/mongo/s/client/shard_connection.cpp2
-rw-r--r--src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp3
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_move_chunk_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp12
-rw-r--r--src/mongo/s/commands/cluster_write.cpp4
-rw-r--r--src/mongo/s/commands/commands_public.cpp2
-rw-r--r--src/mongo/s/config_server_catalog_cache_loader.cpp2
-rw-r--r--src/mongo/s/config_server_test_fixture.cpp23
-rw-r--r--src/mongo/s/request_types/balance_chunk_request_test.cpp4
-rw-r--r--src/mongo/s/sharding_test_fixture.cpp10
87 files changed, 623 insertions, 680 deletions
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 0f9ea8a543a..ee1de8b7627 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -595,7 +595,7 @@ void updateUUIDSchemaVersion(OperationContext* opCtx, bool upgrade) {
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(CollectionType::ConfigNS),
+ CollectionType::ConfigNS,
BSON("dropped" << false), // query
BSONObj(), // sort
boost::none // limit
diff --git a/src/mongo/db/key_generator_update_test.cpp b/src/mongo/db/key_generator_update_test.cpp
index 79b26326f18..ee33e8204bf 100644
--- a/src/mongo/db/key_generator_update_test.cpp
+++ b/src/mongo/db/key_generator_update_test.cpp
@@ -120,7 +120,7 @@ TEST_F(KeyGeneratorUpdateTest, ShouldCreateAnotherKeyIfOnlyOneKeyExists) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
{
auto allKeys = getKeys(operationContext());
@@ -167,12 +167,12 @@ TEST_F(KeyGeneratorUpdateTest, ShouldCreateAnotherKeyIfNoValidKeyAfterCurrent) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
KeysCollectionDocument origKey2(
2, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey2.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON()));
{
auto allKeys = getKeys(operationContext());
@@ -250,12 +250,12 @@ TEST_F(KeyGeneratorUpdateTest, ShouldCreate2KeysIfAllKeysAreExpired) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
KeysCollectionDocument origKey2(
2, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey2.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON()));
{
auto allKeys = getKeys(operationContext());
@@ -347,12 +347,12 @@ TEST_F(KeyGeneratorUpdateTest, ShouldNotCreateNewKeyIfThereAre2UnexpiredKeys) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
KeysCollectionDocument origKey2(
2, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey2.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON()));
{
auto allKeys = getKeys(operationContext());
diff --git a/src/mongo/db/keys_collection_cache_test.cpp b/src/mongo/db/keys_collection_cache_test.cpp
index 6f50180ac8d..5ecbd828915 100644
--- a/src/mongo/db/keys_collection_cache_test.cpp
+++ b/src/mongo/db/keys_collection_cache_test.cpp
@@ -78,7 +78,7 @@ TEST_F(CacheTest, GetKeyShouldReturnCorrectKeyAfterRefresh) {
KeysCollectionDocument origKey1(
1, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
auto refreshStatus = cache.refresh(operationContext());
ASSERT_OK(refreshStatus.getStatus());
@@ -109,7 +109,7 @@ TEST_F(CacheTest, GetKeyShouldReturnErrorIfNoKeyIsValidForGivenTime) {
KeysCollectionDocument origKey1(
1, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
auto refreshStatus = cache.refresh(operationContext());
ASSERT_OK(refreshStatus.getStatus());
@@ -132,17 +132,17 @@ TEST_F(CacheTest, GetKeyShouldReturnOldestKeyPossible) {
KeysCollectionDocument origKey0(
0, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(100, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey0.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey0.toBSON()));
KeysCollectionDocument origKey1(
1, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
KeysCollectionDocument origKey2(
2, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey2.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON()));
auto refreshStatus = cache.refresh(operationContext());
ASSERT_OK(refreshStatus.getStatus());
@@ -173,7 +173,7 @@ TEST_F(CacheTest, RefreshShouldNotGetKeysForOtherPurpose) {
KeysCollectionDocument origKey0(
0, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(100, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey0.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey0.toBSON()));
{
auto refreshStatus = cache.refresh(operationContext());
@@ -186,7 +186,7 @@ TEST_F(CacheTest, RefreshShouldNotGetKeysForOtherPurpose) {
KeysCollectionDocument origKey1(
1, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
{
auto refreshStatus = cache.refresh(operationContext());
@@ -217,7 +217,7 @@ TEST_F(CacheTest, RefreshCanIncrementallyGetNewKeys) {
KeysCollectionDocument origKey0(
0, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(100, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey0.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey0.toBSON()));
{
auto refreshStatus = cache.refresh(operationContext());
@@ -237,12 +237,12 @@ TEST_F(CacheTest, RefreshCanIncrementallyGetNewKeys) {
KeysCollectionDocument origKey1(
1, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
KeysCollectionDocument origKey2(
2, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey2.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON()));
{
auto refreshStatus = cache.refresh(operationContext());
diff --git a/src/mongo/db/keys_collection_client_direct.cpp b/src/mongo/db/keys_collection_client_direct.cpp
index bd97a92a20f..4148b589bab 100644
--- a/src/mongo/db/keys_collection_client_direct.cpp
+++ b/src/mongo/db/keys_collection_client_direct.cpp
@@ -83,7 +83,7 @@ StatusWith<std::vector<KeysCollectionDocument>> KeysCollectionClientDirect::getN
auto findStatus = _query(opCtx,
ReadPreferenceSetting(ReadPreference::Nearest, TagSet{}),
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(KeysCollectionDocument::ConfigNS),
+ KeysCollectionDocument::ConfigNS,
queryBuilder.obj(),
BSON("expiresAt" << 1),
boost::none);
@@ -130,10 +130,9 @@ StatusWith<Shard::QueryResponse> KeysCollectionClientDirect::_query(
}
Status KeysCollectionClientDirect::_insert(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& doc,
const WriteConcernOptions& writeConcern) {
- const NamespaceString nss(ns);
BatchedCommandRequest request([&] {
write_ops::Insert insertOp(nss);
insertOp.setDocuments({doc});
diff --git a/src/mongo/db/keys_collection_client_direct.h b/src/mongo/db/keys_collection_client_direct.h
index f588fcc9d6e..ff5784770cd 100644
--- a/src/mongo/db/keys_collection_client_direct.h
+++ b/src/mongo/db/keys_collection_client_direct.h
@@ -73,7 +73,7 @@ private:
boost::optional<long long> limit);
Status _insert(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& doc,
const WriteConcernOptions& writeConcern);
diff --git a/src/mongo/db/keys_collection_document.cpp b/src/mongo/db/keys_collection_document.cpp
index 155eb730a31..f9444250d57 100644
--- a/src/mongo/db/keys_collection_document.cpp
+++ b/src/mongo/db/keys_collection_document.cpp
@@ -47,7 +47,7 @@ const char kExpiresAtFieldName[] = "expiresAt";
} // namespace
-const std::string KeysCollectionDocument::ConfigNS = "admin.system.keys";
+const NamespaceString KeysCollectionDocument::ConfigNS("admin.system.keys");
StatusWith<KeysCollectionDocument> KeysCollectionDocument::fromBSON(const BSONObj& source) {
long long keyId;
diff --git a/src/mongo/db/keys_collection_document.h b/src/mongo/db/keys_collection_document.h
index 5571982fdac..c7fbae7ed79 100644
--- a/src/mongo/db/keys_collection_document.h
+++ b/src/mongo/db/keys_collection_document.h
@@ -30,6 +30,7 @@
#include "mongo/base/status.h"
#include "mongo/db/logical_time.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/db/time_proof_service.h"
namespace mongo {
@@ -47,7 +48,7 @@ namespace mongo {
*/
class KeysCollectionDocument {
public:
- static const std::string ConfigNS;
+ static const NamespaceString ConfigNS;
KeysCollectionDocument(long long keyId,
std::string purpose,
diff --git a/src/mongo/db/keys_collection_manager_sharding_test.cpp b/src/mongo/db/keys_collection_manager_sharding_test.cpp
index ccdd0b653b8..4653653c836 100644
--- a/src/mongo/db/keys_collection_manager_sharding_test.cpp
+++ b/src/mongo/db/keys_collection_manager_sharding_test.cpp
@@ -109,7 +109,7 @@ TEST_F(KeysManagerShardedTest, GetKeyWithSingleKey) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
auto keyStatus =
keyManager()->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0)));
@@ -127,12 +127,12 @@ TEST_F(KeysManagerShardedTest, GetKeyWithMultipleKeys) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
KeysCollectionDocument origKey2(
2, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(205, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey2.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON()));
auto keyStatus =
keyManager()->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0)));
@@ -159,7 +159,7 @@ TEST_F(KeysManagerShardedTest, GetKeyShouldErrorIfKeyIdMismatchKey) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
auto keyStatus =
keyManager()->getKeyForValidation(operationContext(), 2, LogicalTime(Timestamp(100, 0)));
@@ -172,11 +172,11 @@ TEST_F(KeysManagerShardedTest, GetKeyWithoutRefreshShouldReturnRightKey) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
KeysCollectionDocument origKey2(
2, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey2.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON()));
{
auto keyStatus = keyManager()->getKeyForValidation(
@@ -207,7 +207,7 @@ TEST_F(KeysManagerShardedTest, GetKeyForSigningShouldReturnRightKey) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
keyManager()->refreshNow(operationContext());
@@ -226,11 +226,11 @@ TEST_F(KeysManagerShardedTest, GetKeyForSigningShouldReturnRightOldKey) {
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
KeysCollectionDocument origKey2(
2, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey2.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON()));
keyManager()->refreshNow(operationContext());
@@ -297,7 +297,7 @@ TEST_F(KeysManagerShardedTest, ShouldStillBeAbleToUpdateCacheEvenIfItCantCreateK
KeysCollectionDocument origKey1(
1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));
+ operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON()));
// Set the time to be very ahead so the updater will be forced to create new keys.
const LogicalTime fakeTime(Timestamp(20000, 0));
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index 99fed7b8737..d537260c666 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -605,7 +605,7 @@ int Balancer::_moveChunks(OperationContext* opCtx,
log() << "Performing a split because migration " << redact(requestIt->toString())
<< " failed for size reasons" << causedBy(redact(status));
- _splitOrMarkJumbo(opCtx, NamespaceString(requestIt->ns), requestIt->minKey);
+ _splitOrMarkJumbo(opCtx, requestIt->nss, requestIt->minKey);
continue;
}
@@ -650,7 +650,7 @@ void Balancer::_splitOrMarkJumbo(OperationContext* opCtx,
chunk->markAsJumbo();
- const std::string chunkName = ChunkType::genID(nss.ns(), chunk->getMin());
+ const std::string chunkName = ChunkType::genID(nss, chunk->getMin());
auto status = Grid::get(opCtx)->catalogClient()->updateConfigDocument(
opCtx,
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index c00455f05cd..5e89baca4e1 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -86,12 +86,11 @@ StatusWith<DistributionStatus> createCollectionDistributionStatus(
Grid::get(opCtx)->catalogClient()->getTagsForCollection(opCtx, chunkMgr->getns());
if (!swCollectionTags.isOK()) {
return swCollectionTags.getStatus().withContext(
- str::stream() << "Unable to load tags for collection " << chunkMgr->getns());
+ str::stream() << "Unable to load tags for collection " << chunkMgr->getns().ns());
}
const auto& collectionTags = swCollectionTags.getValue();
- DistributionStatus distribution(NamespaceString(chunkMgr->getns()),
- std::move(shardToChunksMap));
+ DistributionStatus distribution(chunkMgr->getns(), std::move(shardToChunksMap));
// Cache the collection tags
const auto& keyPattern = chunkMgr->getShardKeyPattern().getKeyPattern();
@@ -298,8 +297,8 @@ BalancerChunkSelectionPolicyImpl::selectSpecificChunkToMove(OperationContext* op
const auto& shardStats = shardStatsStatus.getValue();
auto routingInfoStatus =
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(
- opCtx, NamespaceString(chunk.getNS()));
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
+ chunk.getNS());
if (!routingInfoStatus.isOK()) {
return routingInfoStatus.getStatus();
}
@@ -327,8 +326,8 @@ Status BalancerChunkSelectionPolicyImpl::checkMoveAllowed(OperationContext* opCt
auto shardStats = std::move(shardStatsStatus.getValue());
auto routingInfoStatus =
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(
- opCtx, NamespaceString(chunk.getNS()));
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
+ chunk.getNS());
if (!routingInfoStatus.isOK()) {
return routingInfoStatus.getStatus();
}
diff --git a/src/mongo/db/s/balancer/balancer_policy.cpp b/src/mongo/db/s/balancer/balancer_policy.cpp
index 46a20de8676..7239f9b9e61 100644
--- a/src/mongo/db/s/balancer/balancer_policy.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy.cpp
@@ -537,7 +537,7 @@ MigrateInfo::MigrateInfo(const ShardId& a_to, const ChunkType& a_chunk) {
to = a_to;
- ns = a_chunk.getNS();
+ nss = a_chunk.getNS();
from = a_chunk.getShard();
minKey = a_chunk.getMin();
maxKey = a_chunk.getMax();
@@ -545,12 +545,12 @@ MigrateInfo::MigrateInfo(const ShardId& a_to, const ChunkType& a_chunk) {
}
std::string MigrateInfo::getName() const {
- return ChunkType::genID(ns, minKey);
+ return ChunkType::genID(nss, minKey);
}
string MigrateInfo::toString() const {
- return str::stream() << ns << ": [" << minKey << ", " << maxKey << "), from " << from << ", to "
- << to;
+ return str::stream() << nss.ns() << ": [" << minKey << ", " << maxKey << "), from " << from
+ << ", to " << to;
}
} // namespace mongo
diff --git a/src/mongo/db/s/balancer/balancer_policy.h b/src/mongo/db/s/balancer/balancer_policy.h
index afba1db4915..ed70c40d3c9 100644
--- a/src/mongo/db/s/balancer/balancer_policy.h
+++ b/src/mongo/db/s/balancer/balancer_policy.h
@@ -34,6 +34,7 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/simple_bsonobj_comparator.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/db/s/balancer/cluster_statistics.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/shard_id.h"
@@ -57,7 +58,7 @@ struct MigrateInfo {
std::string toString() const;
- std::string ns;
+ NamespaceString nss;
ShardId to;
ShardId from;
BSONObj minKey;
diff --git a/src/mongo/db/s/balancer/balancer_policy_test.cpp b/src/mongo/db/s/balancer/balancer_policy_test.cpp
index f6abd7c8acf..14e34cf298a 100644
--- a/src/mongo/db/s/balancer/balancer_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy_test.cpp
@@ -91,7 +91,7 @@ std::pair<ShardStatisticsVector, ShardToChunksMap> generateCluster(
for (size_t i = 0; i < numChunks; i++, currentChunk++) {
ChunkType chunk;
- chunk.setNS(kNamespace.ns());
+ chunk.setNS(kNamespace);
chunk.setMin(currentChunk == 0 ? shardKeyPattern.globalMin()
: BSON("x" << currentChunk));
chunk.setMax(currentChunk == totalNumChunks - 1 ? shardKeyPattern.globalMax()
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index 0beffad2508..ce7ed13d743 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -183,7 +183,7 @@ Status MigrationManager::executeManualMigration(
auto routingInfoStatus =
Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(
- opCtx, NamespaceString(migrateInfo.ns));
+ opCtx, migrateInfo.nss);
if (!routingInfoStatus.isOK()) {
return routingInfoStatus.getStatus();
}
@@ -228,7 +228,7 @@ void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* opCtx)
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(MigrationType::ConfigNS),
+ MigrationType::ConfigNS,
BSONObj(),
BSONObj(),
boost::none);
@@ -417,7 +417,7 @@ shared_ptr<Notification<RemoteCommandResponse>> MigrationManager::_schedule(
uint64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle,
bool waitForDelete) {
- const NamespaceString nss(migrateInfo.ns);
+ const NamespaceString& nss = migrateInfo.nss;
// Ensure we are not stopped in order to avoid doing the extra work
{
diff --git a/src/mongo/db/s/balancer/migration_manager_test.cpp b/src/mongo/db/s/balancer/migration_manager_test.cpp
index 70718a25504..515ebceb7b2 100644
--- a/src/mongo/db/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/db/s/balancer/migration_manager_test.cpp
@@ -99,13 +99,13 @@ protected:
* Inserts a document into the config.collections collection to indicate that "collName" is
* sharded with version "version". The shard key pattern defaults to "_id".
*/
- void setUpCollection(const std::string collName, ChunkVersion version);
+ void setUpCollection(const NamespaceString& collName, ChunkVersion version);
/**
* Inserts a document into the config.chunks collection so that the chunk defined by the
* parameters exists. Returns a ChunkType defined by the parameters.
*/
- ChunkType setUpChunk(const std::string& collName,
+ ChunkType setUpChunk(const NamespaceString& collName,
const BSONObj& chunkMin,
const BSONObj& chunkMax,
const ShardId& shardId,
@@ -188,9 +188,9 @@ void MigrationManagerTest::setUpDatabase(const std::string& dbName, const ShardI
operationContext(), DatabaseType::ConfigNS, db.toBSON(), kMajorityWriteConcern));
}
-void MigrationManagerTest::setUpCollection(const std::string collName, ChunkVersion version) {
+void MigrationManagerTest::setUpCollection(const NamespaceString& collName, ChunkVersion version) {
CollectionType coll;
- coll.setNs(NamespaceString(collName));
+ coll.setNs(collName);
coll.setEpoch(version.epoch());
coll.setUpdatedAt(Date_t::fromMillisSinceEpoch(version.toLong()));
coll.setKeyPattern(kKeyPattern);
@@ -199,7 +199,7 @@ void MigrationManagerTest::setUpCollection(const std::string collName, ChunkVers
operationContext(), CollectionType::ConfigNS, coll.toBSON(), kMajorityWriteConcern));
}
-ChunkType MigrationManagerTest::setUpChunk(const std::string& collName,
+ChunkType MigrationManagerTest::setUpChunk(const NamespaceString& collName,
const BSONObj& chunkMin,
const BSONObj& chunkMax,
const ShardId& shardId,
@@ -217,7 +217,7 @@ ChunkType MigrationManagerTest::setUpChunk(const std::string& collName,
void MigrationManagerTest::setUpMigration(const ChunkType& chunk, const ShardId& toShard) {
BSONObjBuilder builder;
- builder.append(MigrationType::ns(), chunk.getNS());
+ builder.append(MigrationType::ns(), chunk.getNS().ns());
builder.append(MigrationType::min(), chunk.getMin());
builder.append(MigrationType::max(), chunk.getMax());
builder.append(MigrationType::toShard(), toShard.toString());
@@ -237,7 +237,7 @@ void MigrationManagerTest::checkMigrationsCollectionIsEmptyAndLocksAreUnlocked()
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(MigrationType::ConfigNS),
+ MigrationType::ConfigNS,
BSONObj(),
BSONObj(),
boost::none);
@@ -249,7 +249,7 @@ void MigrationManagerTest::checkMigrationsCollectionIsEmptyAndLocksAreUnlocked()
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(LocksType::ConfigNS),
+ LocksType::ConfigNS,
BSON(LocksType::state(LocksType::LOCKED) << LocksType::name("{ '$ne' : 'balancer'}")),
BSONObj(),
boost::none);
@@ -262,13 +262,13 @@ void MigrationManagerTest::expectMoveChunkCommand(const ChunkType& chunk,
const BSONObj& response) {
onCommand([&chunk, &toShardId, &response](const RemoteCommandRequest& request) {
NamespaceString nss(request.cmdObj.firstElement().valueStringData());
- ASSERT_EQ(chunk.getNS(), nss.ns());
+ ASSERT_EQ(chunk.getNS(), nss);
const StatusWith<MoveChunkRequest> moveChunkRequestWithStatus =
MoveChunkRequest::createFromCommand(nss, request.cmdObj);
ASSERT_OK(moveChunkRequestWithStatus.getStatus());
- ASSERT_EQ(chunk.getNS(), moveChunkRequestWithStatus.getValue().getNss().ns());
+ ASSERT_EQ(chunk.getNS(), moveChunkRequestWithStatus.getValue().getNss());
ASSERT_BSONOBJ_EQ(chunk.getMin(), moveChunkRequestWithStatus.getValue().getMinKey());
ASSERT_BSONOBJ_EQ(chunk.getMax(), moveChunkRequestWithStatus.getValue().getMaxKey());
ASSERT_EQ(chunk.getShard(), moveChunkRequestWithStatus.getValue().getFromShardId());
@@ -295,8 +295,8 @@ TEST_F(MigrationManagerTest, OneCollectionTwoMigrations) {
operationContext(), ShardType::ConfigNS, kShard2, kMajorityWriteConcern));
// Set up the database and collection as sharded in the metadata.
- std::string dbName = "foo";
- std::string collName = "foo.bar";
+ const std::string dbName = "foo";
+ const NamespaceString collName(dbName, "bar");
ChunkVersion version(2, 0, OID::gen());
setUpDatabase(dbName, kShardId0);
@@ -347,8 +347,8 @@ TEST_F(MigrationManagerTest, TwoCollectionsTwoMigrationsEach) {
// Set up a database and two collections as sharded in the metadata.
std::string dbName = "foo";
- std::string collName1 = "foo.bar";
- std::string collName2 = "foo.baz";
+ const NamespaceString collName1(dbName, "bar");
+ const NamespaceString collName2(dbName, "baz");
ChunkVersion version1(2, 0, OID::gen());
ChunkVersion version2(2, 0, OID::gen());
@@ -413,8 +413,8 @@ TEST_F(MigrationManagerTest, SourceShardNotFound) {
operationContext(), ShardType::ConfigNS, kShard2, kMajorityWriteConcern));
// Set up the database and collection as sharded in the metadata.
- std::string dbName = "foo";
- std::string collName = "foo.bar";
+ const std::string dbName = "foo";
+ const NamespaceString collName(dbName, "bar");
ChunkVersion version(2, 0, OID::gen());
setUpDatabase(dbName, kShardId0);
@@ -463,8 +463,8 @@ TEST_F(MigrationManagerTest, JumboChunkResponseBackwardsCompatibility) {
operationContext(), ShardType::ConfigNS, kShard0, kMajorityWriteConcern));
// Set up the database and collection as sharded in the metadata.
- std::string dbName = "foo";
- std::string collName = "foo.bar";
+ const std::string dbName = "foo";
+ const NamespaceString collName(dbName, "bar");
ChunkVersion version(2, 0, OID::gen());
setUpDatabase(dbName, kShardId0);
@@ -505,8 +505,8 @@ TEST_F(MigrationManagerTest, InterruptMigration) {
operationContext(), ShardType::ConfigNS, kShard0, kMajorityWriteConcern));
// Set up the database and collection as sharded in the metadata.
- std::string dbName = "foo";
- std::string collName = "foo.bar";
+ const std::string dbName = "foo";
+ const NamespaceString collName(dbName, "bar");
ChunkVersion version(2, 0, OID::gen());
setUpDatabase(dbName, kShardId0);
@@ -564,7 +564,7 @@ TEST_F(MigrationManagerTest, InterruptMigration) {
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(MigrationType::ConfigNS),
+ MigrationType::ConfigNS,
BSON(MigrationType::name(chunk.getName())),
BSONObj(),
boost::none);
@@ -588,8 +588,8 @@ TEST_F(MigrationManagerTest, RestartMigrationManager) {
operationContext(), ShardType::ConfigNS, kShard0, kMajorityWriteConcern));
// Set up the database and collection as sharded in the metadata.
- std::string dbName = "foo";
- std::string collName = "foo.bar";
+ const std::string dbName = "foo";
+ const NamespaceString collName(dbName, "bar");
ChunkVersion version(2, 0, OID::gen());
setUpDatabase(dbName, kShardId0);
@@ -633,8 +633,8 @@ TEST_F(MigrationManagerTest, MigrationRecovery) {
operationContext(), ShardType::ConfigNS, kShard2, kMajorityWriteConcern));
// Set up the database and collection as sharded in the metadata.
- std::string dbName = "foo";
- std::string collName = "foo.bar";
+ const std::string dbName = "foo";
+ const NamespaceString collName(dbName, "bar");
ChunkVersion version(1, 0, OID::gen());
setUpDatabase(dbName, kShardId0);
@@ -689,8 +689,8 @@ TEST_F(MigrationManagerTest, FailMigrationRecovery) {
operationContext(), ShardType::ConfigNS, kShard2, kMajorityWriteConcern));
// Set up the database and collection as sharded in the metadata.
- std::string dbName = "foo";
- std::string collName = "foo.bar";
+ const std::string dbName = "foo";
+ const NamespaceString collName(dbName, "bar");
ChunkVersion version(1, 0, OID::gen());
setUpDatabase(dbName, kShardId0);
@@ -725,7 +725,7 @@ TEST_F(MigrationManagerTest, FailMigrationRecovery) {
// session ID used here doesn't matter.
ASSERT_OK(catalogClient()->getDistLockManager()->lockWithSessionID(
operationContext(),
- collName,
+ collName.ns(),
"MigrationManagerTest",
OID::gen(),
DistLockManager::kSingleLockAttemptTimeout));
@@ -748,8 +748,8 @@ TEST_F(MigrationManagerTest, RemoteCallErrorConversionToOperationFailed) {
operationContext(), ShardType::ConfigNS, kShard2, kMajorityWriteConcern));
// Set up the database and collection as sharded in the metadata.
- std::string dbName = "foo";
- std::string collName = "foo.bar";
+ const std::string dbName = "foo";
+ const NamespaceString collName(dbName, "bar");
ChunkVersion version(1, 0, OID::gen());
setUpDatabase(dbName, kShardId0);
diff --git a/src/mongo/db/s/balancer/scoped_migration_request.cpp b/src/mongo/db/s/balancer/scoped_migration_request.cpp
index d55855b14b8..6cf19c1f4bf 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request.cpp
+++ b/src/mongo/db/s/balancer/scoped_migration_request.cpp
@@ -110,7 +110,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(MigrationType::ConfigNS),
+ MigrationType::ConfigNS,
BSON(MigrationType::name(migrateInfo.getName())),
BSONObj(),
boost::none);
@@ -156,8 +156,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
// As long as there isn't a DuplicateKey error, the document may have been written, and it's
// safe (won't delete another migration's document) and necessary to try to clean up the
// document via the destructor.
- ScopedMigrationRequest scopedMigrationRequest(
- opCtx, NamespaceString(migrateInfo.ns), migrateInfo.minKey);
+ ScopedMigrationRequest scopedMigrationRequest(opCtx, migrateInfo.nss, migrateInfo.minKey);
// If there was a write error, let the object go out of scope and clean up in the
// destructor.
@@ -173,7 +172,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
<< "number of retries. Chunk '"
<< ChunkRange(migrateInfo.minKey, migrateInfo.maxKey).toString()
<< "' in collection '"
- << migrateInfo.ns
+ << migrateInfo.nss.ns()
<< "' was being moved (somewhere) by another operation.");
}
diff --git a/src/mongo/db/s/balancer/scoped_migration_request_test.cpp b/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
index 1aab0157210..1d9b7ba6c7d 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
+++ b/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
@@ -71,7 +71,7 @@ void ScopedMigrationRequestTest::checkMigrationsCollectionForDocument(
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(MigrationType::ConfigNS),
+ MigrationType::ConfigNS,
BSON(MigrationType::name(chunkName)),
BSONObj(),
boost::none);
@@ -161,7 +161,7 @@ TEST_F(ScopedMigrationRequestTest, CreateScopedMigrationRequestOnRecovery) {
// still removes the document corresponding to the MigrationRequest.
{
ScopedMigrationRequest scopedMigrationRequest = ScopedMigrationRequest::createForRecovery(
- operationContext(), NamespaceString(migrateInfo.ns), migrateInfo.minKey);
+ operationContext(), migrateInfo.nss, migrateInfo.minKey);
checkMigrationsCollectionForDocument(migrateInfo.getName(), 1);
}
diff --git a/src/mongo/db/s/balancer/type_migration.cpp b/src/mongo/db/s/balancer/type_migration.cpp
index efaf453048d..e6341b77685 100644
--- a/src/mongo/db/s/balancer/type_migration.cpp
+++ b/src/mongo/db/s/balancer/type_migration.cpp
@@ -40,7 +40,7 @@ const StringData kChunkVersion = "chunkVersion"_sd;
} // namespace
-const std::string MigrationType::ConfigNS = "config.migrations";
+const NamespaceString MigrationType::ConfigNS("config.migrations");
const BSONField<std::string> MigrationType::name("_id");
const BSONField<std::string> MigrationType::ns("ns");
@@ -53,7 +53,7 @@ const BSONField<bool> MigrationType::waitForDelete("waitForDelete");
MigrationType::MigrationType() = default;
MigrationType::MigrationType(MigrateInfo info, bool waitForDelete)
- : _nss(NamespaceString(info.ns)),
+ : _nss(info.nss),
_min(info.minKey),
_max(info.maxKey),
_fromShard(info.from),
@@ -138,7 +138,7 @@ BSONObj MigrationType::toBSON() const {
MigrateInfo MigrationType::toMigrateInfo() const {
ChunkType chunk;
- chunk.setNS(_nss.ns());
+ chunk.setNS(_nss);
chunk.setShard(_fromShard);
chunk.setMin(_min);
chunk.setMax(_max);
@@ -148,7 +148,7 @@ MigrateInfo MigrationType::toMigrateInfo() const {
}
std::string MigrationType::getName() const {
- return ChunkType::genID(_nss.ns(), _min);
+ return ChunkType::genID(_nss, _min);
}
} // namespace mongo
diff --git a/src/mongo/db/s/balancer/type_migration.h b/src/mongo/db/s/balancer/type_migration.h
index dd35bd9b844..000145c0941 100644
--- a/src/mongo/db/s/balancer/type_migration.h
+++ b/src/mongo/db/s/balancer/type_migration.h
@@ -29,6 +29,7 @@
#pragma once
#include "mongo/bson/bsonobj.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/db/s/balancer/balancer_policy.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard.h"
@@ -43,7 +44,7 @@ namespace mongo {
class MigrationType {
public:
// Name of the migrations collection in the config server.
- static const std::string ConfigNS;
+ static const NamespaceString ConfigNS;
// Field names and types in the migrations collection type.
static const BSONField<std::string> name;
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index caea3e11fed..f36ed19a0ea 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -117,7 +117,7 @@ void moveChunk(OperationContext* opCtx, const NamespaceString& nss, const BSONOb
const auto suggestedChunk = routingInfo.cm()->findIntersectingChunkWithSimpleCollation(minKey);
ChunkType chunkToMove;
- chunkToMove.setNS(nss.ns());
+ chunkToMove.setNS(nss);
chunkToMove.setShard(suggestedChunk->getShardId());
chunkToMove.setMin(suggestedChunk->getMin());
chunkToMove.setMax(suggestedChunk->getMax());
@@ -201,7 +201,7 @@ bool isAutoBalanceEnabled(OperationContext* opCtx,
if (!balancerConfig->shouldBalanceForAutoSplit())
return false;
- auto collStatus = Grid::get(opCtx)->catalogClient()->getCollection(opCtx, nss.ns());
+ auto collStatus = Grid::get(opCtx)->catalogClient()->getCollection(opCtx, nss);
if (!collStatus.isOK()) {
log() << "Auto-split for " << nss << " failed to load collection metadata"
<< causedBy(redact(collStatus.getStatus()));
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index e94547ccfe9..78e9c8b4d55 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -430,7 +430,7 @@ void CollectionShardingState::onDeleteOp(OperationContext* opCtx,
}
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
- if (_nss.ns() == VersionType::ConfigNS) {
+ if (_nss == VersionType::ConfigNS) {
if (!repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback()) {
uasserted(40302, "cannot delete config.version document while in --configsvr mode");
} else {
@@ -467,7 +467,7 @@ void CollectionShardingState::onDropCollection(OperationContext* opCtx,
}
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
- if (_nss.ns() == VersionType::ConfigNS) {
+ if (_nss == VersionType::ConfigNS) {
if (!repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback()) {
uasserted(40303, "cannot drop config.version document while in --configsvr mode");
} else {
diff --git a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
index fd2c5ca9b93..c5fd2ecfc0f 100644
--- a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
@@ -130,7 +130,7 @@ public:
[opCtx, nss] { Grid::get(opCtx)->catalogCache()->invalidateShardedCollection(nss); });
auto collStatus = catalogClient->getCollection(
- opCtx, nss.toString(), repl::ReadConcernLevel::kLocalReadConcern);
+ opCtx, nss, repl::ReadConcernLevel::kLocalReadConcern);
if (collStatus == ErrorCodes::NamespaceNotFound) {
// We checked the sharding catalog and found that this collection doesn't exist.
// This may be because it never existed, or because a drop command was sent
diff --git a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
index 63b6f00a48d..d221a719bb8 100644
--- a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
@@ -261,7 +261,7 @@ boost::optional<CollectionType> checkIfAlreadyShardedWithSameOptions(
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(CollectionType::ConfigNS),
+ CollectionType::ConfigNS,
BSON("_id" << nss.ns() << "dropped" << false),
BSONObj(),
1))
@@ -569,7 +569,7 @@ void migrateAndFurtherSplitInitialChunks(OperationContext* opCtx,
}
ChunkType chunkType;
- chunkType.setNS(nss.ns());
+ chunkType.setNS(nss);
chunkType.setMin(chunk->getMin());
chunkType.setMax(chunk->getMax());
chunkType.setShard(chunk->getShardId());
@@ -881,7 +881,7 @@ public:
// Step 6. Actually shard the collection.
catalogManager->shardCollection(opCtx,
- nss.ns(),
+ nss,
uuid,
shardKeyPattern,
*request.getCollation(),
diff --git a/src/mongo/db/s/shard_metadata_util.cpp b/src/mongo/db/s/shard_metadata_util.cpp
index 88a3d761cee..de8140a76e1 100644
--- a/src/mongo/db/s/shard_metadata_util.cpp
+++ b/src/mongo/db/s/shard_metadata_util.cpp
@@ -148,11 +148,11 @@ StatusWith<ShardCollectionType> readShardCollectionsEntry(OperationContext* opCt
try {
DBDirectClient client(opCtx);
std::unique_ptr<DBClientCursor> cursor =
- client.query(ShardCollectionType::ConfigNS.c_str(), fullQuery, 1);
+ client.query(ShardCollectionType::ConfigNS.ns(), fullQuery, 1);
if (!cursor) {
return Status(ErrorCodes::OperationFailed,
str::stream() << "Failed to establish a cursor for reading "
- << ShardCollectionType::ConfigNS
+ << ShardCollectionType::ConfigNS.ns()
<< " from local storage");
}
diff --git a/src/mongo/db/s/shard_metadata_util_test.cpp b/src/mongo/db/s/shard_metadata_util_test.cpp
index fa86f74a4ec..0fc7d24d5b6 100644
--- a/src/mongo/db/s/shard_metadata_util_test.cpp
+++ b/src/mongo/db/s/shard_metadata_util_test.cpp
@@ -322,7 +322,7 @@ TEST_F(ShardMetadataUtilTest, DropChunksAndDeleteCollectionsEntry) {
ASSERT_OK(dropChunksAndDeleteCollectionsEntry(operationContext(), kNss));
checkCollectionIsEmpty(kChunkMetadataNss);
// Collections collection should be empty because it only had one entry.
- checkCollectionIsEmpty(NamespaceString(ShardCollectionType::ConfigNS));
+ checkCollectionIsEmpty(ShardCollectionType::ConfigNS);
}
} // namespace
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
index 8db0430fdfe..8eb3818c90e 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
@@ -125,7 +125,7 @@ vector<ChunkType> ShardServerCatalogCacheLoaderTest::makeFiveChunks(
collVersion.incMajor();
ChunkType chunk;
- chunk.setNS(kNss.ns());
+ chunk.setNS(kNss);
chunk.setMin(mins[i]);
chunk.setMax(maxs[i]);
chunk.setShard(kShardId);
@@ -148,7 +148,7 @@ vector<ChunkType> ShardServerCatalogCacheLoaderTest::makeThreeUpdatedChunksDiff(
// dependent on a race between persistence and retrieving data because it combines enqueued and
// persisted results without applying modifications.
ChunkType oldChunk;
- oldChunk.setNS(kNss.ns());
+ oldChunk.setNS(kNss);
oldChunk.setMin(BSON("a" << 200));
oldChunk.setMax(BSON("a" << MAXKEY));
oldChunk.setShard(kShardId);
@@ -164,7 +164,7 @@ vector<ChunkType> ShardServerCatalogCacheLoaderTest::makeThreeUpdatedChunksDiff(
collVersion.incMinor();
ChunkType chunk;
- chunk.setNS(kNss.ns());
+ chunk.setNS(kNss);
chunk.setMin(mins[i]);
chunk.setMax(maxs[i]);
chunk.setShard(kShardId);
diff --git a/src/mongo/s/balancer_configuration.cpp b/src/mongo/s/balancer_configuration.cpp
index 50d7c219b11..9ab44694485 100644
--- a/src/mongo/s/balancer_configuration.cpp
+++ b/src/mongo/s/balancer_configuration.cpp
@@ -81,7 +81,7 @@ Status BalancerConfiguration::setBalancerMode(OperationContext* opCtx,
BalancerSettingsType::BalancerMode mode) {
auto updateStatus = Grid::get(opCtx)->catalogClient()->updateConfigDocument(
opCtx,
- kSettingsNamespace.ns(),
+ kSettingsNamespace,
BSON("_id" << BalancerSettingsType::kKey),
BSON("$set" << BSON(kStopped << (mode == BalancerSettingsType::kOff) << kMode
<< BalancerSettingsType::kBalancerModes[mode])),
diff --git a/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp b/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp
index 693ccf67b30..e584944658d 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp
@@ -1126,7 +1126,7 @@ TEST_F(DistLockCatalogFixture, BasicUnlockAll) {
ASSERT_BSONOBJ_EQ(BSON("w" << 1 << "wtimeout" << 0), commandRequest.getWriteConcern());
const auto& updateOp = commandRequest.getUpdateRequest();
- ASSERT_EQUALS(LocksType::ConfigNS, updateOp.getNamespace().ns());
+ ASSERT_EQUALS(LocksType::ConfigNS, updateOp.getNamespace());
const auto& updates = updateOp.getUpdates();
ASSERT_EQUALS(1U, updates.size());
diff --git a/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp b/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp
index 50ff6368f2a..fa65e3bd070 100644
--- a/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp
@@ -66,17 +66,16 @@ public:
shardedCollection.setEpoch(OID::gen());
shardedCollection.setKeyPattern(BSON("x" << 1));
- ASSERT_OK(insertToConfigCollection(operationContext(),
- NamespaceString(CollectionType::ConfigNS),
- shardedCollection.toBSON()));
+ ASSERT_OK(insertToConfigCollection(
+ operationContext(), CollectionType::ConfigNS, shardedCollection.toBSON()));
}
/**
* Asserts that the config.tags collection is empty.
*/
void assertNoZoneDoc() {
- auto findStatus = findOneOnConfigCollection(
- operationContext(), NamespaceString(TagsType::ConfigNS), BSONObj());
+ auto findStatus =
+ findOneOnConfigCollection(operationContext(), TagsType::ConfigNS, BSONObj());
ASSERT_EQ(ErrorCodes::NoMatchingDocument, findStatus);
}
@@ -90,7 +89,7 @@ public:
getConfigShard()->exhaustiveFindOnConfig(operationContext(),
kReadPref,
repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(TagsType::ConfigNS),
+ TagsType::ConfigNS,
BSONObj(),
BSONObj(),
1);
@@ -103,7 +102,7 @@ public:
ASSERT_OK(tagDocStatus.getStatus());
auto tagDoc = tagDocStatus.getValue();
- ASSERT_EQ(ns.ns(), tagDoc.getNS());
+ ASSERT_EQ(ns, tagDoc.getNS());
ASSERT_BSONOBJ_EQ(range.getMin(), tagDoc.getMinKey());
ASSERT_BSONOBJ_EQ(range.getMax(), tagDoc.getMaxKey());
ASSERT_EQ(zoneName, tagDoc.getTag());
@@ -145,9 +144,8 @@ TEST_F(AssignKeyRangeToZoneTestFixture, AssignKeyRangeOnDroppedShardedCollShould
unshardedCollection.setKeyPattern(BSON("x" << 1));
unshardedCollection.setDropped(true);
- ASSERT_OK(insertToConfigCollection(operationContext(),
- NamespaceString(CollectionType::ConfigNS),
- unshardedCollection.toBSON()));
+ ASSERT_OK(insertToConfigCollection(
+ operationContext(), CollectionType::ConfigNS, unshardedCollection.toBSON()));
auto status =
ShardingCatalogManager::get(operationContext())
@@ -199,7 +197,7 @@ TEST_F(AssignKeyRangeToZoneTestFixture, MinThatIsAShardKeyPrefixShouldConvertToF
shardedCollection.setKeyPattern(BSON("x" << 1 << "y" << 1));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(CollectionType::ConfigNS), shardedCollection.toBSON()));
+ operationContext(), CollectionType::ConfigNS, shardedCollection.toBSON()));
const ChunkRange newRange(BSON("x" << 0), BSON("x" << 10 << "y" << 10));
ASSERT_OK(ShardingCatalogManager::get(operationContext())
@@ -218,7 +216,7 @@ TEST_F(AssignKeyRangeToZoneTestFixture, MaxThatIsAShardKeyPrefixShouldConvertToF
shardedCollection.setKeyPattern(BSON("x" << 1 << "y" << 1));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(CollectionType::ConfigNS), shardedCollection.toBSON()));
+ operationContext(), CollectionType::ConfigNS, shardedCollection.toBSON()));
const ChunkRange newRange(BSON("x" << 0 << "y" << 0), BSON("x" << 10));
ASSERT_OK(ShardingCatalogManager::get(operationContext())
@@ -272,7 +270,7 @@ TEST_F(AssignKeyRangeToZoneTestFixture, MinMaxThatIsAShardKeyPrefixShouldSucceed
shardedCollection.setKeyPattern(BSON("x" << 1 << "y" << 1));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(CollectionType::ConfigNS), shardedCollection.toBSON()));
+ operationContext(), CollectionType::ConfigNS, shardedCollection.toBSON()));
const ChunkRange newRange(BSON("x" << 0 << "y" << 0), BSON("x" << 10 << "y" << 10));
ASSERT_OK(ShardingCatalogManager::get(operationContext())
@@ -314,14 +312,14 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, NewMaxAlignsWithExistingMinShouldSucce
{
auto findStatus = findOneOnConfigCollection(
- operationContext(), NamespaceString(TagsType::ConfigNS), BSON("min" << BSON("x" << 2)));
+ operationContext(), TagsType::ConfigNS, BSON("min" << BSON("x" << 2)));
ASSERT_OK(findStatus);
auto tagDocStatus = TagsType::fromBSON(findStatus.getValue());
ASSERT_OK(tagDocStatus.getStatus());
auto tagDoc = tagDocStatus.getValue();
- ASSERT_EQ(shardedNS().ns(), tagDoc.getNS());
+ ASSERT_EQ(shardedNS(), tagDoc.getNS());
ASSERT_BSONOBJ_EQ(BSON("x" << 2), tagDoc.getMinKey());
ASSERT_BSONOBJ_EQ(BSON("x" << 4), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
@@ -329,16 +327,15 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, NewMaxAlignsWithExistingMinShouldSucce
{
const auto existingRange = getExistingRange();
- auto findStatus = findOneOnConfigCollection(operationContext(),
- NamespaceString(TagsType::ConfigNS),
- BSON("min" << existingRange.getMin()));
+ auto findStatus = findOneOnConfigCollection(
+ operationContext(), TagsType::ConfigNS, BSON("min" << existingRange.getMin()));
ASSERT_OK(findStatus);
auto tagDocStatus = TagsType::fromBSON(findStatus.getValue());
ASSERT_OK(tagDocStatus.getStatus());
auto tagDoc = tagDocStatus.getValue();
- ASSERT_EQ(shardedNS().ns(), tagDoc.getNS());
+ ASSERT_EQ(shardedNS(), tagDoc.getNS());
ASSERT_BSONOBJ_EQ(existingRange.getMin(), tagDoc.getMinKey());
ASSERT_BSONOBJ_EQ(existingRange.getMax(), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
@@ -389,7 +386,7 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, NewRangeOverlappingWithDifferentNSShou
shardedCollection.setKeyPattern(BSON("x" << 1));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(CollectionType::ConfigNS), shardedCollection.toBSON()));
+ operationContext(), CollectionType::ConfigNS, shardedCollection.toBSON()));
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->assignKeyRangeToZone(operationContext(),
@@ -399,30 +396,29 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, NewRangeOverlappingWithDifferentNSShou
{
const auto existingRange = getExistingRange();
- auto findStatus = findOneOnConfigCollection(operationContext(),
- NamespaceString(TagsType::ConfigNS),
- BSON("min" << existingRange.getMin()));
+ auto findStatus = findOneOnConfigCollection(
+ operationContext(), TagsType::ConfigNS, BSON("min" << existingRange.getMin()));
ASSERT_OK(findStatus);
auto tagDocStatus = TagsType::fromBSON(findStatus.getValue());
ASSERT_OK(tagDocStatus.getStatus());
auto tagDoc = tagDocStatus.getValue();
- ASSERT_EQ(shardedNS().ns(), tagDoc.getNS());
+ ASSERT_EQ(shardedNS(), tagDoc.getNS());
ASSERT_BSONOBJ_EQ(existingRange.getMin(), tagDoc.getMinKey());
ASSERT_BSONOBJ_EQ(existingRange.getMax(), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
}
{
auto findStatus = findOneOnConfigCollection(
- operationContext(), NamespaceString(TagsType::ConfigNS), BSON("min" << BSON("x" << 5)));
+ operationContext(), TagsType::ConfigNS, BSON("min" << BSON("x" << 5)));
ASSERT_OK(findStatus);
auto tagDocStatus = TagsType::fromBSON(findStatus.getValue());
ASSERT_OK(tagDocStatus.getStatus());
auto tagDoc = tagDocStatus.getValue();
- ASSERT_EQ(shardedCollection.getNs().ns(), tagDoc.getNS());
+ ASSERT_EQ(shardedCollection.getNs(), tagDoc.getNS());
ASSERT_BSONOBJ_EQ(BSON("x" << 5), tagDoc.getMinKey());
ASSERT_BSONOBJ_EQ(BSON("x" << 7), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
@@ -454,8 +450,7 @@ TEST_F(AssignKeyRangeWithOneRangeFixture,
shard.setHost("b:1234");
shard.setTags({"y"});
- ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(ShardType::ConfigNS), shard.toBSON()));
+ ASSERT_OK(insertToConfigCollection(operationContext(), ShardType::ConfigNS, shard.toBSON()));
auto status =
ShardingCatalogManager::get(operationContext())
@@ -495,16 +490,15 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, NewMinAlignsWithExistingMaxShouldSucce
{
const auto existingRange = getExistingRange();
- auto findStatus = findOneOnConfigCollection(operationContext(),
- NamespaceString(TagsType::ConfigNS),
- BSON("min" << existingRange.getMin()));
+ auto findStatus = findOneOnConfigCollection(
+ operationContext(), TagsType::ConfigNS, BSON("min" << existingRange.getMin()));
ASSERT_OK(findStatus);
auto tagDocStatus = TagsType::fromBSON(findStatus.getValue());
ASSERT_OK(tagDocStatus.getStatus());
auto tagDoc = tagDocStatus.getValue();
- ASSERT_EQ(shardedNS().ns(), tagDoc.getNS());
+ ASSERT_EQ(shardedNS(), tagDoc.getNS());
ASSERT_BSONOBJ_EQ(existingRange.getMin(), tagDoc.getMinKey());
ASSERT_BSONOBJ_EQ(existingRange.getMax(), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
@@ -512,14 +506,14 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, NewMinAlignsWithExistingMaxShouldSucce
{
auto findStatus = findOneOnConfigCollection(
- operationContext(), NamespaceString(TagsType::ConfigNS), BSON("min" << BSON("x" << 8)));
+ operationContext(), TagsType::ConfigNS, BSON("min" << BSON("x" << 8)));
ASSERT_OK(findStatus);
auto tagDocStatus = TagsType::fromBSON(findStatus.getValue());
ASSERT_OK(tagDocStatus.getStatus());
auto tagDoc = tagDocStatus.getValue();
- ASSERT_EQ(shardedNS().ns(), tagDoc.getNS());
+ ASSERT_EQ(shardedNS(), tagDoc.getNS());
ASSERT_BSONOBJ_EQ(BSON("x" << 8), tagDoc.getMinKey());
ASSERT_BSONOBJ_EQ(BSON("x" << 10), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
@@ -551,13 +545,12 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, NewRangeIsSuperSetOfExistingShouldFail
*/
TEST_F(AssignKeyRangeWithOneRangeFixture, AssignWithExistingOveralpShouldFail) {
TagsType tagDoc;
- tagDoc.setNS(shardedNS().ns());
+ tagDoc.setNS(shardedNS());
tagDoc.setMinKey(BSON("x" << 0));
tagDoc.setMaxKey(BSON("x" << 2));
tagDoc.setTag("z");
- ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(TagsType::ConfigNS), tagDoc.toBSON()));
+ ASSERT_OK(insertToConfigCollection(operationContext(), TagsType::ConfigNS, tagDoc.toBSON()));
auto status = ShardingCatalogManager::get(operationContext())
->assignKeyRangeToZone(operationContext(),
@@ -593,9 +586,8 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveKeyRangeOnDroppedShardedCollShou
unshardedCollection.setKeyPattern(BSON("x" << 1));
unshardedCollection.setDropped(true);
- ASSERT_OK(insertToConfigCollection(operationContext(),
- NamespaceString(CollectionType::ConfigNS),
- unshardedCollection.toBSON()));
+ ASSERT_OK(insertToConfigCollection(
+ operationContext(), CollectionType::ConfigNS, unshardedCollection.toBSON()));
auto status = ShardingCatalogManager::get(operationContext())
->removeKeyRangeFromZone(
@@ -633,7 +625,7 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveThatIsOnlyMinPrefixOfExistingSho
shardedCollection.setKeyPattern(BSON("x" << 1 << "y" << 1));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(CollectionType::ConfigNS), shardedCollection.toBSON()));
+ operationContext(), CollectionType::ConfigNS, shardedCollection.toBSON()));
const ChunkRange existingRange(fromjson("{ x: 0, y: { $minKey: 1 }}"),
BSON("x" << 10 << "y" << 10));
@@ -646,16 +638,15 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveThatIsOnlyMinPrefixOfExistingSho
operationContext(), ns, ChunkRange(BSON("x" << 0), BSON("x" << 10 << "y" << 10))));
{
- auto findStatus = findOneOnConfigCollection(operationContext(),
- NamespaceString(TagsType::ConfigNS),
- BSON("min" << existingRange.getMin()));
+ auto findStatus = findOneOnConfigCollection(
+ operationContext(), TagsType::ConfigNS, BSON("min" << existingRange.getMin()));
ASSERT_OK(findStatus);
auto tagDocStatus = TagsType::fromBSON(findStatus.getValue());
ASSERT_OK(tagDocStatus.getStatus());
auto tagDoc = tagDocStatus.getValue();
- ASSERT_EQ(ns.ns(), tagDoc.getNS());
+ ASSERT_EQ(ns, tagDoc.getNS());
ASSERT_BSONOBJ_EQ(existingRange.getMin(), tagDoc.getMinKey());
ASSERT_BSONOBJ_EQ(existingRange.getMax(), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
@@ -663,16 +654,15 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveThatIsOnlyMinPrefixOfExistingSho
{
const auto existingRange = getExistingRange();
- auto findStatus = findOneOnConfigCollection(operationContext(),
- NamespaceString(TagsType::ConfigNS),
- BSON("min" << existingRange.getMin()));
+ auto findStatus = findOneOnConfigCollection(
+ operationContext(), TagsType::ConfigNS, BSON("min" << existingRange.getMin()));
ASSERT_OK(findStatus);
auto tagDocStatus = TagsType::fromBSON(findStatus.getValue());
ASSERT_OK(tagDocStatus.getStatus());
auto tagDoc = tagDocStatus.getValue();
- ASSERT_EQ(shardedNS().ns(), tagDoc.getNS());
+ ASSERT_EQ(shardedNS(), tagDoc.getNS());
ASSERT_BSONOBJ_EQ(existingRange.getMin(), tagDoc.getMinKey());
ASSERT_BSONOBJ_EQ(existingRange.getMax(), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
@@ -687,7 +677,7 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveThatIsOnlyMaxPrefixOfExistingSho
shardedCollection.setKeyPattern(BSON("x" << 1 << "y" << 1));
ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(CollectionType::ConfigNS), shardedCollection.toBSON()));
+ operationContext(), CollectionType::ConfigNS, shardedCollection.toBSON()));
const ChunkRange existingRange(BSON("x" << 0 << "y" << 0),
fromjson("{ x: 10, y: { $minKey: 1 }}"));
@@ -700,16 +690,15 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveThatIsOnlyMaxPrefixOfExistingSho
operationContext(), ns, ChunkRange(BSON("x" << 0 << "y" << 0), BSON("x" << 10))));
{
- auto findStatus = findOneOnConfigCollection(operationContext(),
- NamespaceString(TagsType::ConfigNS),
- BSON("min" << existingRange.getMin()));
+ auto findStatus = findOneOnConfigCollection(
+ operationContext(), TagsType::ConfigNS, BSON("min" << existingRange.getMin()));
ASSERT_OK(findStatus);
auto tagDocStatus = TagsType::fromBSON(findStatus.getValue());
ASSERT_OK(tagDocStatus.getStatus());
auto tagDoc = tagDocStatus.getValue();
- ASSERT_EQ(ns.ns(), tagDoc.getNS());
+ ASSERT_EQ(ns, tagDoc.getNS());
ASSERT_BSONOBJ_EQ(existingRange.getMin(), tagDoc.getMinKey());
ASSERT_BSONOBJ_EQ(existingRange.getMax(), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
@@ -717,16 +706,15 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveThatIsOnlyMaxPrefixOfExistingSho
{
const auto existingRange = getExistingRange();
- auto findStatus = findOneOnConfigCollection(operationContext(),
- NamespaceString(TagsType::ConfigNS),
- BSON("min" << existingRange.getMin()));
+ auto findStatus = findOneOnConfigCollection(
+ operationContext(), TagsType::ConfigNS, BSON("min" << existingRange.getMin()));
ASSERT_OK(findStatus);
auto tagDocStatus = TagsType::fromBSON(findStatus.getValue());
ASSERT_OK(tagDocStatus.getStatus());
auto tagDoc = tagDocStatus.getValue();
- ASSERT_EQ(shardedNS().ns(), tagDoc.getNS());
+ ASSERT_EQ(shardedNS(), tagDoc.getNS());
ASSERT_BSONOBJ_EQ(existingRange.getMin(), tagDoc.getMinKey());
ASSERT_BSONOBJ_EQ(existingRange.getMax(), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h
index d4eac9da595..c5cf22a61eb 100644
--- a/src/mongo/s/catalog/sharding_catalog_client.h
+++ b/src/mongo/s/catalog/sharding_catalog_client.h
@@ -133,7 +133,7 @@ public:
/**
* Retrieves the metadata for a given collection, if it exists.
*
- * @param collectionNs fully qualified name of the collection (case sensitive)
+ * @param nss fully qualified name of the collection (case sensitive)
*
* Returns Status::OK along with the collection information and the OpTime of the config server
* which the collection information was based upon. Otherwise, returns an error code indicating
@@ -142,7 +142,7 @@ public:
*/
virtual StatusWith<repl::OpTimeWith<CollectionType>> getCollection(
OperationContext* opCtx,
- const std::string& collNs,
+ const NamespaceString& nss,
repl::ReadConcernLevel readConcernLevel = repl::ReadConcernLevel::kMajorityReadConcern) = 0;
/**
@@ -195,8 +195,8 @@ public:
*
* Returns a !OK status if an error occurs.
*/
- virtual StatusWith<std::vector<TagsType>> getTagsForCollection(
- OperationContext* opCtx, const std::string& collectionNs) = 0;
+ virtual StatusWith<std::vector<TagsType>> getTagsForCollection(OperationContext* opCtx,
+ const NamespaceString& nss) = 0;
/**
* Retrieves all shards in this sharded cluster.
@@ -250,7 +250,7 @@ public:
virtual Status applyChunkOpsDeprecated(OperationContext* opCtx,
const BSONArray& updateOps,
const BSONArray& preCondition,
- const std::string& nss,
+ const NamespaceString& nss,
const ChunkVersion& lastChunkVersion,
const WriteConcernOptions& writeConcern,
repl::ReadConcernLevel readConcern) = 0;
@@ -321,7 +321,7 @@ public:
* NOTE: Should not be used in new code outside the ShardingCatalogManager.
*/
virtual Status insertConfigDocument(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& doc,
const WriteConcernOptions& writeConcern) = 0;
@@ -340,7 +340,7 @@ public:
* NOTE: Should not be used in new code outside the ShardingCatalogManager.
*/
virtual StatusWith<bool> updateConfigDocument(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& query,
const BSONObj& update,
bool upsert,
@@ -353,7 +353,7 @@ public:
* NOTE: Should not be used in new code outside the ShardingCatalogManager.
*/
virtual Status removeConfigDocuments(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& query,
const WriteConcernOptions& writeConcern) = 0;
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index 19e83ed737d..7da59f057c3 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -144,14 +144,14 @@ void ShardingCatalogClientImpl::shutDown(OperationContext* opCtx) {
Status ShardingCatalogClientImpl::updateShardingCatalogEntryForCollection(
OperationContext* opCtx,
- const std::string& collNs,
+ const NamespaceString& nss,
const CollectionType& coll,
const bool upsert) {
fassert(28634, coll.validate());
auto status = _updateConfigDocument(opCtx,
CollectionType::ConfigNS,
- BSON(CollectionType::fullNs(collNs)),
+ BSON(CollectionType::fullNs(nss.ns())),
coll.toBSON(),
upsert,
ShardingCatalogClient::kMajorityWriteConcern);
@@ -241,7 +241,7 @@ Status ShardingCatalogClientImpl::_log(OperationContext* opCtx,
log() << "about to log metadata event into " << logCollName << ": " << redact(changeLogBSON);
const NamespaceString nss("config", logCollName);
- Status result = insertConfigDocument(opCtx, nss.ns(), changeLogBSON, writeConcern);
+ Status result = insertConfigDocument(opCtx, nss, changeLogBSON, writeConcern);
if (!result.isOK()) {
warning() << "Error encountered while logging config change with ID [" << changeId
@@ -302,7 +302,7 @@ StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::_fetchData
auto findStatus = _exhaustiveFindOnConfig(opCtx,
readPref,
readConcernLevel,
- NamespaceString(DatabaseType::ConfigNS),
+ DatabaseType::ConfigNS,
BSON(DatabaseType::name(dbName)),
BSONObj(),
boost::none);
@@ -326,12 +326,12 @@ StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::_fetchData
}
StatusWith<repl::OpTimeWith<CollectionType>> ShardingCatalogClientImpl::getCollection(
- OperationContext* opCtx, const std::string& collNs, repl::ReadConcernLevel readConcernLevel) {
+ OperationContext* opCtx, const NamespaceString& nss, repl::ReadConcernLevel readConcernLevel) {
auto statusFind = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
readConcernLevel,
- NamespaceString(CollectionType::ConfigNS),
- BSON(CollectionType::fullNs(collNs)),
+ CollectionType::ConfigNS,
+ BSON(CollectionType::fullNs(nss.ns())),
BSONObj(),
1);
if (!statusFind.isOK()) {
@@ -342,7 +342,7 @@ StatusWith<repl::OpTimeWith<CollectionType>> ShardingCatalogClientImpl::getColle
const auto& retVal = retOpTimePair.value;
if (retVal.empty()) {
return Status(ErrorCodes::NamespaceNotFound,
- stream() << "collection " << collNs << " not found");
+ stream() << "collection " << nss.ns() << " not found");
}
invariant(retVal.size() == 1);
@@ -355,7 +355,7 @@ StatusWith<repl::OpTimeWith<CollectionType>> ShardingCatalogClientImpl::getColle
auto collType = parseStatus.getValue();
if (collType.getDropped()) {
return Status(ErrorCodes::NamespaceNotFound,
- stream() << "collection " << collNs << " was dropped");
+ stream() << "collection " << nss.ns() << " was dropped");
}
return repl::OpTimeWith<CollectionType>(collType, retOpTimePair.opTime);
@@ -376,7 +376,7 @@ StatusWith<std::vector<CollectionType>> ShardingCatalogClientImpl::getCollection
auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
readConcernLevel,
- NamespaceString(CollectionType::ConfigNS),
+ CollectionType::ConfigNS,
b.obj(),
BSONObj(),
boost::none); // no limit
@@ -391,7 +391,7 @@ StatusWith<std::vector<CollectionType>> ShardingCatalogClientImpl::getCollection
const auto collectionResult = CollectionType::fromBSON(obj);
if (!collectionResult.isOK()) {
return {ErrorCodes::FailedToParse,
- str::stream() << "error while parsing " << CollectionType::ConfigNS
+ str::stream() << "error while parsing " << CollectionType::ConfigNS.ns()
<< " document: "
<< obj
<< " : "
@@ -437,7 +437,7 @@ StatusWith<VersionType> ShardingCatalogClientImpl::getConfigVersion(
opCtx,
kConfigReadSelector,
readConcern,
- NamespaceString(VersionType::ConfigNS),
+ VersionType::ConfigNS,
BSONObj(),
BSONObj(),
boost::none /* no limit */);
@@ -449,7 +449,7 @@ StatusWith<VersionType> ShardingCatalogClientImpl::getConfigVersion(
if (queryResults.size() > 1) {
return {ErrorCodes::TooManyMatchingDocuments,
- str::stream() << "should only have 1 document in " << VersionType::ConfigNS};
+ str::stream() << "should only have 1 document in " << VersionType::ConfigNS.ns()};
}
if (queryResults.empty()) {
@@ -481,7 +481,7 @@ StatusWith<std::vector<std::string>> ShardingCatalogClientImpl::getDatabasesForS
auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(DatabaseType::ConfigNS),
+ DatabaseType::ConfigNS,
BSON(DatabaseType::primary(shardId.toString())),
BSONObj(),
boost::none); // no limit
@@ -515,13 +515,8 @@ StatusWith<std::vector<ChunkType>> ShardingCatalogClientImpl::getChunks(
// Convert boost::optional<int> to boost::optional<long long>.
auto longLimit = limit ? boost::optional<long long>(*limit) : boost::none;
- auto findStatus = _exhaustiveFindOnConfig(opCtx,
- kConfigReadSelector,
- readConcern,
- NamespaceString(ChunkType::ConfigNS),
- query,
- sort,
- longLimit);
+ auto findStatus = _exhaustiveFindOnConfig(
+ opCtx, kConfigReadSelector, readConcern, ChunkType::ConfigNS, query, sort, longLimit);
if (!findStatus.isOK()) {
return findStatus.getStatus().withContext("Failed to load chunks");
}
@@ -547,12 +542,12 @@ StatusWith<std::vector<ChunkType>> ShardingCatalogClientImpl::getChunks(
}
StatusWith<std::vector<TagsType>> ShardingCatalogClientImpl::getTagsForCollection(
- OperationContext* opCtx, const std::string& collectionNs) {
+ OperationContext* opCtx, const NamespaceString& nss) {
auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(TagsType::ConfigNS),
- BSON(TagsType::ns(collectionNs)),
+ TagsType::ConfigNS,
+ BSON(TagsType::ns(nss.ns())),
BSON(TagsType::min() << 1),
boost::none); // no limit
if (!findStatus.isOK()) {
@@ -581,7 +576,7 @@ StatusWith<repl::OpTimeWith<std::vector<ShardType>>> ShardingCatalogClientImpl::
auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
readConcern,
- NamespaceString(ShardType::ConfigNS),
+ ShardType::ConfigNS,
BSONObj(), // no query filter
BSONObj(), // no sort
boost::none); // no limit
@@ -725,7 +720,7 @@ bool ShardingCatalogClientImpl::runUserManagementReadCommand(OperationContext* o
Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* opCtx,
const BSONArray& updateOps,
const BSONArray& preCondition,
- const std::string& nss,
+ const NamespaceString& nss,
const ChunkVersion& lastChunkVersion,
const WriteConcernOptions& writeConcern,
repl::ReadConcernLevel readConcern) {
@@ -775,7 +770,7 @@ Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* opCt
// mod made it to the config server, then transaction was successful.
BSONObjBuilder query;
lastChunkVersion.addToBSON(query, ChunkType::lastmod());
- query.append(ChunkType::ns(), nss);
+ query.append(ChunkType::ns(), nss.ns());
auto swChunks = getChunks(opCtx, query.obj(), BSONObj(), 1, nullptr, readConcern);
const auto& newestChunk = swChunks.getValue();
@@ -791,7 +786,7 @@ Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* opCt
} else {
errMsg = str::stream() << "chunk operation commit failed: version "
<< lastChunkVersion.toString()
- << " doesn't exist in namespace: " << nss
+ << " doesn't exist in namespace: " << nss.ns()
<< ". Unable to save chunk ops. Command: " << cmd
<< ". Result: " << response.getValue().response;
}
@@ -818,10 +813,9 @@ void ShardingCatalogClientImpl::writeConfigServerDirect(OperationContext* opCtx,
}
Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& doc,
const WriteConcernOptions& writeConcern) {
- const NamespaceString nss(ns);
invariant(nss.db() == NamespaceString::kAdminDb || nss.db() == NamespaceString::kConfigDb);
const BSONElement idField = doc.getField("_id");
@@ -894,23 +888,22 @@ Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* opCtx,
StatusWith<bool> ShardingCatalogClientImpl::updateConfigDocument(
OperationContext* opCtx,
- const string& ns,
+ const NamespaceString& nss,
const BSONObj& query,
const BSONObj& update,
bool upsert,
const WriteConcernOptions& writeConcern) {
- return _updateConfigDocument(opCtx, ns, query, update, upsert, writeConcern);
+ return _updateConfigDocument(opCtx, nss, query, update, upsert, writeConcern);
}
StatusWith<bool> ShardingCatalogClientImpl::_updateConfigDocument(
OperationContext* opCtx,
- const string& ns,
+ const NamespaceString& nss,
const BSONObj& query,
const BSONObj& update,
bool upsert,
const WriteConcernOptions& writeConcern) {
- const NamespaceString nss(ns);
- invariant(nss.db() == "config");
+ invariant(nss.db() == NamespaceString::kConfigDb);
const BSONElement idField = query.getField("_id");
invariant(!idField.eoo());
@@ -944,11 +937,10 @@ StatusWith<bool> ShardingCatalogClientImpl::_updateConfigDocument(
}
Status ShardingCatalogClientImpl::removeConfigDocuments(OperationContext* opCtx,
- const string& ns,
+ const NamespaceString& nss,
const BSONObj& query,
const WriteConcernOptions& writeConcern) {
- const NamespaceString nss(ns);
- invariant(nss.db() == "config");
+ invariant(nss.db() == NamespaceString::kConfigDb);
BatchedCommandRequest request([&] {
write_ops::Delete deleteOp(nss);
@@ -1040,14 +1032,13 @@ StatusWith<std::vector<KeysCollectionDocument>> ShardingCatalogClientImpl::getNe
queryBuilder.append("purpose", purpose);
queryBuilder.append("expiresAt", BSON("$gt" << newerThanThis.asTimestamp()));
- auto findStatus =
- config->exhaustiveFindOnConfig(opCtx,
- kConfigReadSelector,
- readConcernLevel,
- NamespaceString(KeysCollectionDocument::ConfigNS),
- queryBuilder.obj(),
- BSON("expiresAt" << 1),
- boost::none);
+ auto findStatus = config->exhaustiveFindOnConfig(opCtx,
+ kConfigReadSelector,
+ readConcernLevel,
+ KeysCollectionDocument::ConfigNS,
+ queryBuilder.obj(),
+ BSON("expiresAt" << 1),
+ boost::none);
if (!findStatus.isOK()) {
return findStatus.getStatus();
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.h b/src/mongo/s/catalog/sharding_catalog_client_impl.h
index 0766a44b279..0c258666bb7 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.h
@@ -56,7 +56,7 @@ public:
* described by "coll."
*/
static Status updateShardingCatalogEntryForCollection(OperationContext* opCtx,
- const std::string& collNs,
+ const NamespaceString& nss,
const CollectionType& coll,
const bool upsert);
@@ -93,7 +93,7 @@ public:
StatusWith<repl::OpTimeWith<CollectionType>> getCollection(
OperationContext* opCtx,
- const std::string& collNs,
+ const NamespaceString& nss,
repl::ReadConcernLevel readConcernLevel) override;
StatusWith<std::vector<CollectionType>> getCollections(
@@ -112,8 +112,8 @@ public:
repl::OpTime* opTime,
repl::ReadConcernLevel readConcern) override;
- StatusWith<std::vector<TagsType>> getTagsForCollection(
- OperationContext* opCtx, const std::string& collectionNs) override;
+ StatusWith<std::vector<TagsType>> getTagsForCollection(OperationContext* opCtx,
+ const NamespaceString& nss) override;
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
OperationContext* opCtx, repl::ReadConcernLevel readConcern) override;
@@ -132,7 +132,7 @@ public:
Status applyChunkOpsDeprecated(OperationContext* opCtx,
const BSONArray& updateOps,
const BSONArray& preCondition,
- const std::string& nss,
+ const NamespaceString& nss,
const ChunkVersion& lastChunkVersion,
const WriteConcernOptions& writeConcern,
repl::ReadConcernLevel readConcern) override;
@@ -147,19 +147,19 @@ public:
BatchedCommandResponse* response) override;
Status insertConfigDocument(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& doc,
const WriteConcernOptions& writeConcern) override;
StatusWith<bool> updateConfigDocument(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& query,
const BSONObj& update,
bool upsert,
const WriteConcernOptions& writeConcern) override;
Status removeConfigDocuments(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& query,
const WriteConcernOptions& writeConcern) override;
@@ -193,7 +193,7 @@ private:
* returns whether the update command's response update.n value is > 0).
*/
static StatusWith<bool> _updateConfigDocument(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& query,
const BSONObj& update,
bool upsert,
@@ -246,7 +246,7 @@ private:
Status _log(OperationContext* opCtx,
const StringData& logCollName,
const std::string& what,
- const std::string& operationNS,
+ const std::string& operationNSS,
const BSONObj& detail,
const WriteConcernOptions& writeConcern);
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
index 2fe043be4e4..0cc54ef7df7 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
@@ -80,7 +80,7 @@ StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientMock::getDatabas
}
StatusWith<repl::OpTimeWith<CollectionType>> ShardingCatalogClientMock::getCollection(
- OperationContext* opCtx, const string& collNs, repl::ReadConcernLevel readConcernLevel) {
+ OperationContext* opCtx, const NamespaceString& nss, repl::ReadConcernLevel readConcernLevel) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
@@ -108,7 +108,7 @@ StatusWith<std::vector<ChunkType>> ShardingCatalogClientMock::getChunks(
}
StatusWith<std::vector<TagsType>> ShardingCatalogClientMock::getTagsForCollection(
- OperationContext* opCtx, const string& collectionNs) {
+ OperationContext* opCtx, const NamespaceString& nss) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
@@ -135,7 +135,7 @@ bool ShardingCatalogClientMock::runUserManagementReadCommand(OperationContext* o
Status ShardingCatalogClientMock::applyChunkOpsDeprecated(OperationContext* opCtx,
const BSONArray& updateOps,
const BSONArray& preCondition,
- const std::string& nss,
+ const NamespaceString& nss,
const ChunkVersion& lastChunkVersion,
const WriteConcernOptions& writeConcern,
repl::ReadConcernLevel readConcern) {
@@ -150,8 +150,8 @@ Status ShardingCatalogClientMock::logAction(OperationContext* opCtx,
}
Status ShardingCatalogClientMock::logChange(OperationContext* opCtx,
- const string& what,
- const string& ns,
+ const std::string& what,
+ const std::string& ns,
const BSONObj& detail,
const WriteConcernOptions& writeConcern) {
return {ErrorCodes::InternalError, "Method not implemented"};
@@ -172,7 +172,7 @@ void ShardingCatalogClientMock::writeConfigServerDirect(OperationContext* opCtx,
BatchedCommandResponse* response) {}
Status ShardingCatalogClientMock::insertConfigDocument(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& doc,
const WriteConcernOptions& writeConcern) {
return {ErrorCodes::InternalError, "Method not implemented"};
@@ -180,7 +180,7 @@ Status ShardingCatalogClientMock::insertConfigDocument(OperationContext* opCtx,
StatusWith<bool> ShardingCatalogClientMock::updateConfigDocument(
OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& query,
const BSONObj& update,
bool upsert,
@@ -189,7 +189,7 @@ StatusWith<bool> ShardingCatalogClientMock::updateConfigDocument(
}
Status ShardingCatalogClientMock::removeConfigDocuments(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& query,
const WriteConcernOptions& writeConcern) {
return {ErrorCodes::InternalError, "Method not implemented"};
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.h b/src/mongo/s/catalog/sharding_catalog_client_mock.h
index 159d5d8c1b8..7ab8c63ddef 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.h
@@ -58,7 +58,7 @@ public:
StatusWith<repl::OpTimeWith<CollectionType>> getCollection(
OperationContext* opCtx,
- const std::string& collNs,
+ const NamespaceString& nss,
repl::ReadConcernLevel readConcernLevel) override;
StatusWith<std::vector<CollectionType>> getCollections(
@@ -77,8 +77,8 @@ public:
repl::OpTime* opTime,
repl::ReadConcernLevel readConcern) override;
- StatusWith<std::vector<TagsType>> getTagsForCollection(
- OperationContext* opCtx, const std::string& collectionNs) override;
+ StatusWith<std::vector<TagsType>> getTagsForCollection(OperationContext* opCtx,
+ const NamespaceString& nss) override;
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
OperationContext* opCtx, repl::ReadConcernLevel readConcern) override;
@@ -97,7 +97,7 @@ public:
Status applyChunkOpsDeprecated(OperationContext* opCtx,
const BSONArray& updateOps,
const BSONArray& preCondition,
- const std::string& nss,
+ const NamespaceString& nss,
const ChunkVersion& lastChunkVersion,
const WriteConcernOptions& writeConcern,
repl::ReadConcernLevel readConcern) override;
@@ -123,19 +123,19 @@ public:
BatchedCommandResponse* response) override;
Status insertConfigDocument(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& doc,
const WriteConcernOptions& writeConcern) override;
StatusWith<bool> updateConfigDocument(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& query,
const BSONObj& update,
bool upsert,
const WriteConcernOptions& writeConcern) override;
Status removeConfigDocuments(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const BSONObj& query,
const WriteConcernOptions& writeConcern) override;
diff --git a/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp b/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp
index 7904b697ea5..c703bb6e9b1 100644
--- a/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp
@@ -43,9 +43,9 @@ namespace {
using CommitChunkMigrate = ConfigServerTestFixture;
-TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandWithCtl) {
+const NamespaceString kNamespace("TestDB.TestColl");
- std::string const nss = "TestDB.TestColl";
+TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandWithCtl) {
ShardType shard0;
shard0.setName("shard0");
@@ -61,7 +61,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandWithCtl) {
auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
ChunkType chunk0;
- chunk0.setNS(nss);
+ chunk0.setNS(kNamespace);
chunk0.setVersion(origVersion);
chunk0.setShard(shard0.getName());
@@ -72,7 +72,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandWithCtl) {
chunk0.setMax(chunkMax);
ChunkType chunk1;
- chunk1.setNS(nss);
+ chunk1.setNS(kNamespace);
chunk1.setVersion(origVersion);
chunk1.setShard(shard0.getName());
@@ -88,7 +88,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandWithCtl) {
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
->commitChunkMigration(operationContext(),
- NamespaceString(chunk0.getNS()),
+ chunk0.getNS(),
chunk0cref,
chunk1cref,
origVersion.epoch(),
@@ -119,8 +119,6 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandWithCtl) {
TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtl) {
- std::string const nss = "TestDB.TestColl";
-
ShardType shard0;
shard0.setName("shard0");
shard0.setHost("shard0:12");
@@ -135,7 +133,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtl) {
auto const origVersion = ChunkVersion(origMajorVersion, 4, OID::gen());
ChunkType chunk0;
- chunk0.setNS(nss);
+ chunk0.setNS(kNamespace);
chunk0.setVersion(origVersion);
chunk0.setShard(shard0.getName());
@@ -149,7 +147,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtl) {
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
->commitChunkMigration(operationContext(),
- NamespaceString(chunk0.getNS()),
+ chunk0.getNS(),
chunk0,
boost::none,
origVersion.epoch(),
@@ -175,8 +173,6 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtl) {
TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
- std::string const nss = "TestDB.TestColl";
-
ShardType shard0;
shard0.setName("shard0");
shard0.setHost("shard0:12");
@@ -191,7 +187,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
ChunkType chunk0;
- chunk0.setNS(nss);
+ chunk0.setNS(kNamespace);
chunk0.setVersion(origVersion);
chunk0.setShard(shard0.getName());
@@ -202,7 +198,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
chunk0.setMax(chunkMax);
ChunkType chunk1;
- chunk1.setNS(nss);
+ chunk1.setNS(kNamespace);
chunk1.setVersion(origVersion);
chunk1.setShard(shard0.getName());
@@ -214,7 +210,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
->commitChunkMigration(operationContext(),
- NamespaceString(chunk0.getNS()),
+ chunk0.getNS(),
chunk0,
chunk1,
OID::gen(),
@@ -226,8 +222,6 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
- std::string const nss = "TestDB.TestColl";
-
ShardType shard0;
shard0.setName("shard0");
shard0.setHost("shard0:12");
@@ -243,7 +237,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
auto const otherVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
ChunkType chunk0;
- chunk0.setNS(nss);
+ chunk0.setNS(kNamespace);
chunk0.setVersion(origVersion);
chunk0.setShard(shard0.getName());
@@ -254,7 +248,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
chunk0.setMax(chunkMax);
ChunkType chunk1;
- chunk1.setNS(nss);
+ chunk1.setNS(kNamespace);
chunk1.setVersion(otherVersion);
chunk1.setShard(shard0.getName());
@@ -267,7 +261,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
->commitChunkMigration(operationContext(),
- NamespaceString(chunk0.getNS()),
+ chunk0.getNS(),
chunk0,
chunk1,
origVersion.epoch(),
@@ -279,8 +273,6 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
- std::string const nss = "TestDB.TestColl";
-
ShardType shard0;
shard0.setName("shard0");
shard0.setHost("shard0:12");
@@ -295,7 +287,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
ChunkType chunk0;
- chunk0.setNS(nss);
+ chunk0.setNS(kNamespace);
chunk0.setVersion(origVersion);
chunk0.setShard(shard0.getName());
@@ -306,7 +298,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
chunk0.setMax(chunkMax);
ChunkType chunk1;
- chunk1.setNS(nss);
+ chunk1.setNS(kNamespace);
chunk1.setVersion(origVersion);
chunk1.setShard(shard0.getName());
@@ -318,7 +310,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
->commitChunkMigration(operationContext(),
- NamespaceString(chunk0.getNS()),
+ chunk0.getNS(),
chunk0,
chunk1,
origVersion.epoch(),
@@ -330,8 +322,6 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
TEST_F(CommitChunkMigrate, RejectChunkMissing1) {
- std::string const nss = "TestDB.TestColl";
-
ShardType shard0;
shard0.setName("shard0");
shard0.setHost("shard0:12");
@@ -346,7 +336,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing1) {
auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
ChunkType chunk0;
- chunk0.setNS(nss);
+ chunk0.setNS(kNamespace);
chunk0.setVersion(origVersion);
chunk0.setShard(shard0.getName());
@@ -357,7 +347,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing1) {
chunk0.setMax(chunkMax);
ChunkType chunk1;
- chunk1.setNS(nss);
+ chunk1.setNS(kNamespace);
chunk1.setVersion(origVersion);
chunk1.setShard(shard0.getName());
@@ -369,7 +359,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing1) {
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
->commitChunkMigration(operationContext(),
- NamespaceString(chunk0.getNS()),
+ chunk0.getNS(),
chunk0,
chunk1,
origVersion.epoch(),
diff --git a/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp b/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
index 71705b712eb..bfb5234823a 100644
--- a/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
@@ -84,14 +84,14 @@ TEST_F(ConfigInitializationTest, UpgradeNotNeeded) {
version.setClusterId(OID::gen());
version.setCurrentVersion(CURRENT_CONFIG_VERSION);
version.setMinCompatibleVersion(MIN_COMPATIBLE_CONFIG_VERSION);
- ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(VersionType::ConfigNS), version.toBSON()));
+ ASSERT_OK(
+ insertToConfigCollection(operationContext(), VersionType::ConfigNS, version.toBSON()));
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->initializeConfigDatabaseIfNeeded(operationContext()));
- auto versionDoc = assertGet(findOneOnConfigCollection(
- operationContext(), NamespaceString(VersionType::ConfigNS), BSONObj()));
+ auto versionDoc =
+ assertGet(findOneOnConfigCollection(operationContext(), VersionType::ConfigNS, BSONObj()));
VersionType foundVersion = assertGet(VersionType::fromBSON(versionDoc));
@@ -105,15 +105,15 @@ TEST_F(ConfigInitializationTest, InitIncompatibleVersion) {
version.setClusterId(OID::gen());
version.setCurrentVersion(MIN_COMPATIBLE_CONFIG_VERSION - 1);
version.setMinCompatibleVersion(MIN_COMPATIBLE_CONFIG_VERSION - 2);
- ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(VersionType::ConfigNS), version.toBSON()));
+ ASSERT_OK(
+ insertToConfigCollection(operationContext(), VersionType::ConfigNS, version.toBSON()));
ASSERT_EQ(ErrorCodes::IncompatibleShardingConfigVersion,
ShardingCatalogManager::get(operationContext())
->initializeConfigDatabaseIfNeeded(operationContext()));
- auto versionDoc = assertGet(findOneOnConfigCollection(
- operationContext(), NamespaceString(VersionType::ConfigNS), BSONObj()));
+ auto versionDoc =
+ assertGet(findOneOnConfigCollection(operationContext(), VersionType::ConfigNS, BSONObj()));
VersionType foundVersion = assertGet(VersionType::fromBSON(versionDoc));
@@ -127,11 +127,11 @@ TEST_F(ConfigInitializationTest, InitClusterMultipleVersionDocs) {
version.setClusterId(OID::gen());
version.setCurrentVersion(MIN_COMPATIBLE_CONFIG_VERSION - 2);
version.setMinCompatibleVersion(MIN_COMPATIBLE_CONFIG_VERSION - 3);
- ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(VersionType::ConfigNS), version.toBSON()));
+ ASSERT_OK(
+ insertToConfigCollection(operationContext(), VersionType::ConfigNS, version.toBSON()));
ASSERT_OK(insertToConfigCollection(operationContext(),
- NamespaceString(VersionType::ConfigNS),
+ VersionType::ConfigNS,
BSON("_id"
<< "a second document")));
@@ -147,8 +147,7 @@ TEST_F(ConfigInitializationTest, InitInvalidConfigVersionDoc) {
currentVersion: 7,
clusterId: ObjectId("55919cc6dbe86ce7ac056427")
})"));
- ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(VersionType::ConfigNS), versionDoc));
+ ASSERT_OK(insertToConfigCollection(operationContext(), VersionType::ConfigNS, versionDoc));
ASSERT_EQ(ErrorCodes::TypeMismatch,
ShardingCatalogManager::get(operationContext())
@@ -159,14 +158,13 @@ TEST_F(ConfigInitializationTest, InitInvalidConfigVersionDoc) {
TEST_F(ConfigInitializationTest, InitNoVersionDocEmptyConfig) {
// Make sure there is no existing document
ASSERT_EQUALS(ErrorCodes::NoMatchingDocument,
- findOneOnConfigCollection(
- operationContext(), NamespaceString(VersionType::ConfigNS), BSONObj()));
+ findOneOnConfigCollection(operationContext(), VersionType::ConfigNS, BSONObj()));
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->initializeConfigDatabaseIfNeeded(operationContext()));
- auto versionDoc = assertGet(findOneOnConfigCollection(
- operationContext(), NamespaceString(VersionType::ConfigNS), BSONObj()));
+ auto versionDoc =
+ assertGet(findOneOnConfigCollection(operationContext(), VersionType::ConfigNS, BSONObj()));
VersionType foundVersion = assertGet(VersionType::fromBSON(versionDoc));
@@ -180,8 +178,8 @@ TEST_F(ConfigInitializationTest, InitVersionTooHigh) {
version.setClusterId(OID::gen());
version.setCurrentVersion(10000);
version.setMinCompatibleVersion(10000);
- ASSERT_OK(insertToConfigCollection(
- operationContext(), NamespaceString(VersionType::ConfigNS), version.toBSON()));
+ ASSERT_OK(
+ insertToConfigCollection(operationContext(), VersionType::ConfigNS, version.toBSON()));
ASSERT_EQ(ErrorCodes::IncompatibleShardingConfigVersion,
ShardingCatalogManager::get(operationContext())
@@ -192,8 +190,8 @@ TEST_F(ConfigInitializationTest, OnlyRunsOnce) {
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->initializeConfigDatabaseIfNeeded(operationContext()));
- auto versionDoc = assertGet(findOneOnConfigCollection(
- operationContext(), NamespaceString(VersionType::ConfigNS), BSONObj()));
+ auto versionDoc =
+ assertGet(findOneOnConfigCollection(operationContext(), VersionType::ConfigNS, BSONObj()));
VersionType foundVersion = assertGet(VersionType::fromBSON(versionDoc));
@@ -210,8 +208,8 @@ TEST_F(ConfigInitializationTest, ReRunsIfDocRolledBackThenReElected) {
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->initializeConfigDatabaseIfNeeded(operationContext()));
- auto versionDoc = assertGet(findOneOnConfigCollection(
- operationContext(), NamespaceString(VersionType::ConfigNS), BSONObj()));
+ auto versionDoc =
+ assertGet(findOneOnConfigCollection(operationContext(), VersionType::ConfigNS, BSONObj()));
VersionType foundVersion = assertGet(VersionType::fromBSON(versionDoc));
@@ -228,7 +226,7 @@ TEST_F(ConfigInitializationTest, ReRunsIfDocRolledBackThenReElected) {
ASSERT_OK(replicationCoordinator()->setFollowerMode(repl::MemberState::RS_ROLLBACK));
auto opCtx = operationContext();
repl::UnreplicatedWritesBlock uwb(opCtx);
- auto nss = NamespaceString(VersionType::ConfigNS);
+ auto nss = VersionType::ConfigNS;
writeConflictRetry(opCtx, "removeConfigDocuments", nss.ns(), [&] {
AutoGetCollection autoColl(opCtx, nss, MODE_IX);
auto coll = autoColl.getCollection();
@@ -249,15 +247,14 @@ TEST_F(ConfigInitializationTest, ReRunsIfDocRolledBackThenReElected) {
// Verify the document was actually removed.
ASSERT_EQUALS(ErrorCodes::NoMatchingDocument,
- findOneOnConfigCollection(
- operationContext(), NamespaceString(VersionType::ConfigNS), BSONObj()));
+ findOneOnConfigCollection(operationContext(), VersionType::ConfigNS, BSONObj()));
// Re-create the config.version document.
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->initializeConfigDatabaseIfNeeded(operationContext()));
- auto newVersionDoc = assertGet(findOneOnConfigCollection(
- operationContext(), NamespaceString(VersionType::ConfigNS), BSONObj()));
+ auto newVersionDoc =
+ assertGet(findOneOnConfigCollection(operationContext(), VersionType::ConfigNS, BSONObj()));
VersionType newFoundVersion = assertGet(VersionType::fromBSON(newVersionDoc));
@@ -334,31 +331,25 @@ TEST_F(ConfigInitializationTest, BuildsNecessaryIndexes) {
<< "ns"
<< "config.tags")};
- auto foundChunksIndexes =
- assertGet(getIndexes(operationContext(), NamespaceString(ChunkType::ConfigNS)));
+ auto foundChunksIndexes = assertGet(getIndexes(operationContext(), ChunkType::ConfigNS));
assertBSONObjsSame(expectedChunksIndexes, foundChunksIndexes);
- auto foundLockpingsIndexes =
- assertGet(getIndexes(operationContext(), NamespaceString(LockpingsType::ConfigNS)));
+ auto foundLockpingsIndexes = assertGet(getIndexes(operationContext(), LockpingsType::ConfigNS));
assertBSONObjsSame(expectedLockpingsIndexes, foundLockpingsIndexes);
- auto foundLocksIndexes =
- assertGet(getIndexes(operationContext(), NamespaceString(LocksType::ConfigNS)));
+ auto foundLocksIndexes = assertGet(getIndexes(operationContext(), LocksType::ConfigNS));
assertBSONObjsSame(expectedLocksIndexes, foundLocksIndexes);
- auto foundShardsIndexes =
- assertGet(getIndexes(operationContext(), NamespaceString(ShardType::ConfigNS)));
+ auto foundShardsIndexes = assertGet(getIndexes(operationContext(), ShardType::ConfigNS));
assertBSONObjsSame(expectedShardsIndexes, foundShardsIndexes);
- auto foundTagsIndexes =
- assertGet(getIndexes(operationContext(), NamespaceString(TagsType::ConfigNS)));
+ auto foundTagsIndexes = assertGet(getIndexes(operationContext(), TagsType::ConfigNS));
assertBSONObjsSame(expectedTagsIndexes, foundTagsIndexes);
}
TEST_F(ConfigInitializationTest, CompatibleIndexAlreadyExists) {
getConfigShard()
- ->createIndexOnConfig(
- operationContext(), NamespaceString(ShardType::ConfigNS), BSON("host" << 1), true)
+ ->createIndexOnConfig(operationContext(), ShardType::ConfigNS, BSON("host" << 1), true)
.transitional_ignore();
ASSERT_OK(ShardingCatalogManager::get(operationContext())
@@ -375,8 +366,7 @@ TEST_F(ConfigInitializationTest, CompatibleIndexAlreadyExists) {
<< "config.shards")};
- auto foundShardsIndexes =
- assertGet(getIndexes(operationContext(), NamespaceString(ShardType::ConfigNS)));
+ auto foundShardsIndexes = assertGet(getIndexes(operationContext(), ShardType::ConfigNS));
assertBSONObjsSame(expectedShardsIndexes, foundShardsIndexes);
}
@@ -384,8 +374,7 @@ TEST_F(ConfigInitializationTest, IncompatibleIndexAlreadyExists) {
// Make the index non-unique even though its supposed to be unique, make sure initialization
// fails
getConfigShard()
- ->createIndexOnConfig(
- operationContext(), NamespaceString(ShardType::ConfigNS), BSON("host" << 1), false)
+ ->createIndexOnConfig(operationContext(), ShardType::ConfigNS, BSON("host" << 1), false)
.transitional_ignore();
ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict,
diff --git a/src/mongo/s/catalog/sharding_catalog_manager.cpp b/src/mongo/s/catalog/sharding_catalog_manager.cpp
index b2fb3521f1d..18f616516e3 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager.cpp
@@ -210,18 +210,15 @@ Status ShardingCatalogManager::_initConfigIndexes(OperationContext* opCtx) {
const bool unique = true;
auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- Status result =
- configShard->createIndexOnConfig(opCtx,
- NamespaceString(ChunkType::ConfigNS),
- BSON(ChunkType::ns() << 1 << ChunkType::min() << 1),
- unique);
+ Status result = configShard->createIndexOnConfig(
+ opCtx, ChunkType::ConfigNS, BSON(ChunkType::ns() << 1 << ChunkType::min() << 1), unique);
if (!result.isOK()) {
return result.withContext("couldn't create ns_1_min_1 index on config db");
}
result = configShard->createIndexOnConfig(
opCtx,
- NamespaceString(ChunkType::ConfigNS),
+ ChunkType::ConfigNS,
BSON(ChunkType::ns() << 1 << ChunkType::shard() << 1 << ChunkType::min() << 1),
unique);
if (!result.isOK()) {
@@ -230,7 +227,7 @@ Status ShardingCatalogManager::_initConfigIndexes(OperationContext* opCtx) {
result =
configShard->createIndexOnConfig(opCtx,
- NamespaceString(ChunkType::ConfigNS),
+ ChunkType::ConfigNS,
BSON(ChunkType::ns() << 1 << ChunkType::lastmod() << 1),
unique);
if (!result.isOK()) {
@@ -239,7 +236,7 @@ Status ShardingCatalogManager::_initConfigIndexes(OperationContext* opCtx) {
result = configShard->createIndexOnConfig(
opCtx,
- NamespaceString(MigrationType::ConfigNS),
+ MigrationType::ConfigNS,
BSON(MigrationType::ns() << 1 << MigrationType::min() << 1),
unique);
if (!result.isOK()) {
@@ -247,20 +244,20 @@ Status ShardingCatalogManager::_initConfigIndexes(OperationContext* opCtx) {
}
result = configShard->createIndexOnConfig(
- opCtx, NamespaceString(ShardType::ConfigNS), BSON(ShardType::host() << 1), unique);
+ opCtx, ShardType::ConfigNS, BSON(ShardType::host() << 1), unique);
if (!result.isOK()) {
return result.withContext("couldn't create host_1 index on config db");
}
result = configShard->createIndexOnConfig(
- opCtx, NamespaceString(LocksType::ConfigNS), BSON(LocksType::lockID() << 1), !unique);
+ opCtx, LocksType::ConfigNS, BSON(LocksType::lockID() << 1), !unique);
if (!result.isOK()) {
return result.withContext("couldn't create lock id index on config db");
}
result =
configShard->createIndexOnConfig(opCtx,
- NamespaceString(LocksType::ConfigNS),
+ LocksType::ConfigNS,
BSON(LocksType::state() << 1 << LocksType::process() << 1),
!unique);
if (!result.isOK()) {
@@ -268,23 +265,19 @@ Status ShardingCatalogManager::_initConfigIndexes(OperationContext* opCtx) {
}
result = configShard->createIndexOnConfig(
- opCtx, NamespaceString(LockpingsType::ConfigNS), BSON(LockpingsType::ping() << 1), !unique);
+ opCtx, LockpingsType::ConfigNS, BSON(LockpingsType::ping() << 1), !unique);
if (!result.isOK()) {
return result.withContext("couldn't create lockping ping time index on config db");
}
- result = configShard->createIndexOnConfig(opCtx,
- NamespaceString(TagsType::ConfigNS),
- BSON(TagsType::ns() << 1 << TagsType::min() << 1),
- unique);
+ result = configShard->createIndexOnConfig(
+ opCtx, TagsType::ConfigNS, BSON(TagsType::ns() << 1 << TagsType::min() << 1), unique);
if (!result.isOK()) {
return result.withContext("couldn't create ns_1_min_1 index on config db");
}
- result = configShard->createIndexOnConfig(opCtx,
- NamespaceString(TagsType::ConfigNS),
- BSON(TagsType::ns() << 1 << TagsType::tag() << 1),
- !unique);
+ result = configShard->createIndexOnConfig(
+ opCtx, TagsType::ConfigNS, BSON(TagsType::ns() << 1 << TagsType::tag() << 1), !unique);
if (!result.isOK()) {
return result.withContext("couldn't create ns_1_tag_1 index on config db");
}
diff --git a/src/mongo/s/catalog/sharding_catalog_manager.h b/src/mongo/s/catalog/sharding_catalog_manager.h
index 4c535825c7e..8893561f5c8 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager.h
+++ b/src/mongo/s/catalog/sharding_catalog_manager.h
@@ -138,7 +138,7 @@ public:
* MinKey values.
*/
Status assignKeyRangeToZone(OperationContext* opCtx,
- const NamespaceString& ns,
+ const NamespaceString& nss,
const ChunkRange& range,
const std::string& zoneName);
@@ -149,7 +149,7 @@ public:
* full shard key.
*/
Status removeKeyRangeFromZone(OperationContext* opCtx,
- const NamespaceString& ns,
+ const NamespaceString& nss,
const ChunkRange& range);
//
@@ -161,7 +161,7 @@ public:
* smaller chunks at the specified split points.
*/
Status commitChunkSplit(OperationContext* opCtx,
- const NamespaceString& ns,
+ const NamespaceString& nss,
const OID& requestEpoch,
const ChunkRange& range,
const std::vector<BSONObj>& splitPoints,
@@ -172,7 +172,7 @@ public:
* merged into a single larger chunk.
*/
Status commitChunkMerge(OperationContext* opCtx,
- const NamespaceString& ns,
+ const NamespaceString& nss,
const OID& requestEpoch,
const std::vector<BSONObj>& chunkBoundaries,
const std::string& shardName);
@@ -229,7 +229,7 @@ public:
* some of the known failures:
* - NamespaceNotFound - collection does not exist
*/
- Status dropCollection(OperationContext* opCtx, const NamespaceString& ns);
+ Status dropCollection(OperationContext* opCtx, const NamespaceString& nss);
/**
@@ -248,7 +248,7 @@ public:
* Otherwise all chunks will be assigned to the primary shard for the database.
*/
void shardCollection(OperationContext* opCtx,
- const std::string& ns,
+ const NamespaceString& nss,
const boost::optional<UUID> uuid,
const ShardKeyPattern& fieldsAndOrder,
const BSONObj& defaultCollation,
@@ -430,7 +430,7 @@ private:
* handling.
*/
StatusWith<long long> _runCountCommandOnConfig(OperationContext* opCtx,
- const NamespaceString& ns,
+ const NamespaceString& nss,
BSONObj query);
/**
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations.cpp
index 41b5c5973e2..72c92bc6625 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations.cpp
@@ -78,7 +78,7 @@ BSONArray buildMergeChunksTransactionUpdates(const std::vector<ChunkType>& chunk
BSONObjBuilder op;
op.append("op", "u");
op.appendBool("b", false); // no upsert
- op.append("ns", ChunkType::ConfigNS);
+ op.append("ns", ChunkType::ConfigNS.ns());
// expand first chunk into newly merged chunk
ChunkType mergedChunk(chunksToMerge.front());
@@ -101,7 +101,7 @@ BSONArray buildMergeChunksTransactionUpdates(const std::vector<ChunkType>& chunk
for (size_t i = 1; i < chunksToMerge.size(); ++i) {
BSONObjBuilder op;
op.append("op", "d");
- op.append("ns", ChunkType::ConfigNS);
+ op.append("ns", ChunkType::ConfigNS.ns());
op.append("o", BSON(ChunkType::name(chunksToMerge[i].getName())));
@@ -117,13 +117,13 @@ BSONArray buildMergeChunksTransactionPrecond(const std::vector<ChunkType>& chunk
for (auto chunk : chunksToMerge) {
BSONObjBuilder b;
- b.append("ns", ChunkType::ConfigNS);
- b.append(
- "q",
- BSON("query" << BSON(ChunkType::ns(chunk.getNS()) << ChunkType::min(chunk.getMin())
- << ChunkType::max(chunk.getMax()))
- << "orderby"
- << BSON(ChunkType::lastmod() << -1)));
+ b.append("ns", ChunkType::ConfigNS.ns());
+ b.append("q",
+ BSON("query" << BSON(ChunkType::ns(chunk.getNS().ns())
+ << ChunkType::min(chunk.getMin())
+ << ChunkType::max(chunk.getMax()))
+ << "orderby"
+ << BSON(ChunkType::lastmod() << -1)));
b.append("res",
BSON(ChunkType::epoch(collVersion.epoch())
<< ChunkType::shard(chunk.getShard().toString())));
@@ -148,7 +148,7 @@ Status checkChunkIsOnShard(OperationContext* opCtx,
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(ChunkType::ConfigNS),
+ ChunkType::ConfigNS,
chunkQuery,
BSONObj(),
1);
@@ -179,10 +179,10 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
BSONObjBuilder op;
op.append("op", "u");
op.appendBool("b", false); // No upserting
- op.append("ns", ChunkType::ConfigNS);
+ op.append("ns", ChunkType::ConfigNS.ns());
BSONObjBuilder n(op.subobjStart("o"));
- n.append(ChunkType::name(), ChunkType::genID(nss.ns(), migratedChunk.getMin()));
+ n.append(ChunkType::name(), ChunkType::genID(nss, migratedChunk.getMin()));
migratedChunk.getVersion().addToBSON(n, ChunkType::lastmod());
n.append(ChunkType::ns(), nss.ns());
n.append(ChunkType::min(), migratedChunk.getMin());
@@ -191,7 +191,7 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
n.done();
BSONObjBuilder q(op.subobjStart("o2"));
- q.append(ChunkType::name(), ChunkType::genID(nss.ns(), migratedChunk.getMin()));
+ q.append(ChunkType::name(), ChunkType::genID(nss, migratedChunk.getMin()));
q.done();
updates.append(op.obj());
@@ -202,10 +202,10 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
BSONObjBuilder op;
op.append("op", "u");
op.appendBool("b", false);
- op.append("ns", ChunkType::ConfigNS);
+ op.append("ns", ChunkType::ConfigNS.ns());
BSONObjBuilder n(op.subobjStart("o"));
- n.append(ChunkType::name(), ChunkType::genID(nss.ns(), controlChunk->getMin()));
+ n.append(ChunkType::name(), ChunkType::genID(nss, controlChunk->getMin()));
controlChunk->getVersion().addToBSON(n, ChunkType::lastmod());
n.append(ChunkType::ns(), nss.ns());
n.append(ChunkType::min(), controlChunk->getMin());
@@ -214,7 +214,7 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
n.done();
BSONObjBuilder q(op.subobjStart("o2"));
- q.append(ChunkType::name(), ChunkType::genID(nss.ns(), controlChunk->getMin()));
+ q.append(ChunkType::name(), ChunkType::genID(nss, controlChunk->getMin()));
q.done();
updates.append(op.obj());
@@ -247,7 +247,7 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(ChunkType::ConfigNS),
+ ChunkType::ConfigNS,
BSON("ns" << nss.ns()),
BSON(ChunkType::lastmod << -1),
1);
@@ -326,11 +326,11 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
BSONObjBuilder op;
op.append("op", "u");
op.appendBool("b", true);
- op.append("ns", ChunkType::ConfigNS);
+ op.append("ns", ChunkType::ConfigNS.ns());
// add the modified (new) chunk information as the update object
BSONObjBuilder n(op.subobjStart("o"));
- n.append(ChunkType::name(), ChunkType::genID(nss.ns(), startKey));
+ n.append(ChunkType::name(), ChunkType::genID(nss, startKey));
currentMaxVersion.addToBSON(n, ChunkType::lastmod());
n.append(ChunkType::ns(), nss.ns());
n.append(ChunkType::min(), startKey);
@@ -340,7 +340,7 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
// add the chunk's _id as the query part of the update statement
BSONObjBuilder q(op.subobjStart("o2"));
- q.append(ChunkType::name(), ChunkType::genID(nss.ns(), startKey));
+ q.append(ChunkType::name(), ChunkType::genID(nss, startKey));
q.done();
updates.append(op.obj());
@@ -359,7 +359,7 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
BSONArrayBuilder preCond;
{
BSONObjBuilder b;
- b.append("ns", ChunkType::ConfigNS);
+ b.append("ns", ChunkType::ConfigNS.ns());
b.append("q",
BSON("query" << BSON(ChunkType::ns(nss.ns()) << ChunkType::min() << range.getMin()
<< ChunkType::max()
@@ -379,7 +379,7 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
opCtx,
updates.arr(),
preCond.arr(),
- nss.ns(),
+ nss,
currentMaxVersion,
WriteConcernOptions(),
repl::ReadConcernLevel::kLocalReadConcern);
@@ -428,7 +428,7 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
}
Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
- const NamespaceString& ns,
+ const NamespaceString& nss,
const OID& requestEpoch,
const std::vector<BSONObj>& chunkBoundaries,
const std::string& shardName) {
@@ -446,8 +446,8 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(ChunkType::ConfigNS),
- BSON("ns" << ns.ns()),
+ ChunkType::ConfigNS,
+ BSON("ns" << nss.ns()),
BSON(ChunkType::lastmod << -1),
1);
@@ -474,7 +474,7 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
ChunkType itChunk;
itChunk.setMax(chunkBoundaries.front());
- itChunk.setNS(ns.ns());
+ itChunk.setNS(nss);
itChunk.setShard(shardName);
// Do not use the first chunk boundary as a max bound while building chunks
@@ -508,7 +508,7 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
opCtx,
updates,
preCond,
- ns.ns(),
+ nss,
mergeVersion,
WriteConcernOptions(),
repl::ReadConcernLevel::kLocalReadConcern);
@@ -529,7 +529,7 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
Grid::get(opCtx)
->catalogClient()
- ->logChange(opCtx, "merge", ns.ns(), logDetail.obj(), WriteConcernOptions())
+ ->logChange(opCtx, "merge", nss.ns(), logDetail.obj(), WriteConcernOptions())
.transitional_ignore();
return doTxnStatus;
@@ -563,7 +563,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
configShard->exhaustiveFindOnConfig(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(ChunkType::ConfigNS),
+ ChunkType::ConfigNS,
BSON("ns" << nss.ns()),
BSON(ChunkType::lastmod << -1),
1);
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_collection_operations.cpp b/src/mongo/s/catalog/sharding_catalog_manager_collection_operations.cpp
index 0576f4d59bd..50895470fc8 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_collection_operations.cpp
@@ -169,7 +169,7 @@ ChunkVersion createFirstChunks(OperationContext* opCtx,
}
ChunkType chunk;
- chunk.setNS(nss.ns());
+ chunk.setNS(nss);
chunk.setMin(min);
chunk.setMax(max);
chunk.setShard(shardIds[i % shardIds.size()]);
@@ -185,10 +185,10 @@ ChunkVersion createFirstChunks(OperationContext* opCtx,
return version;
}
-void checkForExistingChunks(OperationContext* opCtx, const string& ns) {
+void checkForExistingChunks(OperationContext* opCtx, const NamespaceString& nss) {
BSONObjBuilder countBuilder;
- countBuilder.append("count", NamespaceString(ChunkType::ConfigNS).coll());
- countBuilder.append("query", BSON(ChunkType::ns(ns)));
+ countBuilder.append("count", ChunkType::ConfigNS.coll());
+ countBuilder.append("query", BSON(ChunkType::ns(nss.ns())));
// OK to use limit=1, since if any chunks exist, we will fail.
countBuilder.append("limit", 1);
@@ -203,7 +203,7 @@ void checkForExistingChunks(OperationContext* opCtx, const string& ns) {
Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
opCtx,
kConfigReadSelector,
- NamespaceString(ChunkType::ConfigNS).db().toString(),
+ ChunkType::ConfigNS.db().toString(),
countBuilder.done(),
Shard::kDefaultConfigCommandTimeout,
Shard::RetryPolicy::kIdempotent));
@@ -212,22 +212,22 @@ void checkForExistingChunks(OperationContext* opCtx, const string& ns) {
long long numChunks;
uassertStatusOK(bsonExtractIntegerField(cmdResponse.response, "n", &numChunks));
uassert(ErrorCodes::ManualInterventionRequired,
- str::stream() << "A previous attempt to shard collection " << ns
+ str::stream() << "A previous attempt to shard collection " << nss.ns()
<< " failed after writing some initial chunks to config.chunks. Please "
"manually delete the partially written chunks for collection "
- << ns
+ << nss.ns()
<< " from config.chunks",
numChunks == 0);
}
} // namespace
-Status ShardingCatalogManager::dropCollection(OperationContext* opCtx, const NamespaceString& ns) {
+Status ShardingCatalogManager::dropCollection(OperationContext* opCtx, const NamespaceString& nss) {
const auto catalogClient = Grid::get(opCtx)->catalogClient();
catalogClient
->logChange(opCtx,
"dropCollection.start",
- ns.ns(),
+ nss.ns(),
BSONObj(),
ShardingCatalogClientImpl::kMajorityWriteConcern)
.ignore();
@@ -239,11 +239,11 @@ Status ShardingCatalogManager::dropCollection(OperationContext* opCtx, const Nam
}
vector<ShardType> allShards = std::move(shardsStatus.getValue().value);
- LOG(1) << "dropCollection " << ns << " started";
+ LOG(1) << "dropCollection " << nss.ns() << " started";
- const auto dropCommandBSON = [opCtx, &ns] {
+ const auto dropCommandBSON = [opCtx, &nss] {
BSONObjBuilder builder;
- builder.append("drop", ns.coll());
+ builder.append("drop", nss.coll());
if (!opCtx->getWriteConcern().usedDefault) {
builder.append(WriteConcernOptions::kWriteConcernField,
@@ -267,7 +267,7 @@ Status ShardingCatalogManager::dropCollection(OperationContext* opCtx, const Nam
auto swDropResult = shard->runCommandWithFixedRetryAttempts(
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- ns.db().toString(),
+ nss.db().toString(),
dropCommandBSON,
Shard::RetryPolicy::kIdempotent);
@@ -311,35 +311,35 @@ Status ShardingCatalogManager::dropCollection(OperationContext* opCtx, const Nam
return {ErrorCodes::OperationFailed, sb.str()};
}
- LOG(1) << "dropCollection " << ns << " shard data deleted";
+ LOG(1) << "dropCollection " << nss.ns() << " shard data deleted";
// Remove chunk data
Status result =
catalogClient->removeConfigDocuments(opCtx,
ChunkType::ConfigNS,
- BSON(ChunkType::ns(ns.ns())),
+ BSON(ChunkType::ns(nss.ns())),
ShardingCatalogClient::kMajorityWriteConcern);
if (!result.isOK()) {
return result;
}
- LOG(1) << "dropCollection " << ns << " chunk data deleted";
+ LOG(1) << "dropCollection " << nss.ns() << " chunk data deleted";
// Mark the collection as dropped
CollectionType coll;
- coll.setNs(ns);
+ coll.setNs(nss);
coll.setDropped(true);
coll.setEpoch(ChunkVersion::DROPPED().epoch());
coll.setUpdatedAt(Grid::get(opCtx)->getNetwork()->now());
const bool upsert = false;
result = ShardingCatalogClientImpl::updateShardingCatalogEntryForCollection(
- opCtx, ns.ns(), coll, upsert);
+ opCtx, nss, coll, upsert);
if (!result.isOK()) {
return result;
}
- LOG(1) << "dropCollection " << ns << " collection marked as dropped";
+ LOG(1) << "dropCollection " << nss.ns() << " collection marked as dropped";
for (const auto& shardEntry : allShards) {
auto swShard = shardRegistry->getShard(opCtx, shardEntry.getName());
@@ -353,7 +353,7 @@ Status ShardingCatalogManager::dropCollection(OperationContext* opCtx, const Nam
shardRegistry->getConfigServerConnectionString(),
shardEntry.getName(),
fassertStatusOK(28781, ConnectionString::parse(shardEntry.getHost())),
- ns,
+ nss,
ChunkVersion::DROPPED(),
true);
@@ -390,12 +390,12 @@ Status ShardingCatalogManager::dropCollection(OperationContext* opCtx, const Nam
}
}
- LOG(1) << "dropCollection " << ns << " completed";
+ LOG(1) << "dropCollection " << nss.ns() << " completed";
catalogClient
->logChange(opCtx,
"dropCollection",
- ns.ns(),
+ nss.ns(),
BSONObj(),
ShardingCatalogClientImpl::kMajorityWriteConcern)
.ignore();
@@ -404,7 +404,7 @@ Status ShardingCatalogManager::dropCollection(OperationContext* opCtx, const Nam
}
void ShardingCatalogManager::shardCollection(OperationContext* opCtx,
- const string& ns,
+ const NamespaceString& nss,
const boost::optional<UUID> uuid,
const ShardKeyPattern& fieldsAndOrder,
const BSONObj& defaultCollation,
@@ -418,13 +418,13 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx,
const auto primaryShard = uassertStatusOK(shardRegistry->getShard(opCtx, dbPrimaryShardId));
// Fail if there are partially written chunks from a previous failed shardCollection.
- checkForExistingChunks(opCtx, ns);
+ checkForExistingChunks(opCtx, nss);
// Record start in changelog
{
BSONObjBuilder collectionDetail;
collectionDetail.append("shardKey", fieldsAndOrder.toBSON());
- collectionDetail.append("collection", ns);
+ collectionDetail.append("collection", nss.ns());
if (uuid) {
uuid->appendToBuilder(&collectionDetail, "uuid");
}
@@ -433,13 +433,13 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx,
catalogClient
->logChange(opCtx,
"shardCollection.start",
- ns,
+ nss.ns(),
collectionDetail.obj(),
ShardingCatalogClient::kMajorityWriteConcern)
.transitional_ignore();
}
- const NamespaceString nss(ns);
+ // const NamespaceString nss(ns);
// Construct the collection default collator.
std::unique_ptr<CollatorInterface> defaultCollator;
@@ -467,7 +467,7 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx,
coll.setUnique(unique);
uassertStatusOK(ShardingCatalogClientImpl::updateShardingCatalogEntryForCollection(
- opCtx, ns, coll, true /*upsert*/));
+ opCtx, nss, coll, true /*upsert*/));
}
auto shard = uassertStatusOK(shardRegistry->getShard(opCtx, dbPrimaryShardId));
@@ -480,7 +480,7 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx,
shardRegistry->getConfigServerConnectionString(),
dbPrimaryShardId,
primaryShard->getConnString(),
- NamespaceString(ns),
+ nss,
collVersion,
true);
@@ -493,14 +493,14 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx,
auto status = ssvResponse.isOK() ? std::move(ssvResponse.getValue().commandStatus)
: std::move(ssvResponse.getStatus());
if (!status.isOK()) {
- warning() << "could not update initial version of " << ns << " on shard primary "
+ warning() << "could not update initial version of " << nss.ns() << " on shard primary "
<< dbPrimaryShardId << causedBy(redact(status));
}
catalogClient
->logChange(opCtx,
"shardCollection.end",
- ns,
+ nss.ns(),
BSON("version" << collVersion.toString()),
ShardingCatalogClient::kMajorityWriteConcern)
.transitional_ignore();
@@ -515,7 +515,7 @@ void ShardingCatalogManager::generateUUIDsForExistingShardedCollections(Operatio
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(CollectionType::ConfigNS),
+ CollectionType::ConfigNS,
BSON(CollectionType::uuid.name() << BSON("$exists" << false) << "dropped" << false),
BSONObj(), // sort
boost::none // limit
@@ -545,7 +545,7 @@ void ShardingCatalogManager::generateUUIDsForExistingShardedCollections(Operatio
collType.setUUID(uuid);
uassertStatusOK(ShardingCatalogClientImpl::updateShardingCatalogEntryForCollection(
- opCtx, collType.getNs().ns(), collType, false /* upsert */));
+ opCtx, collType.getNs(), collType, false /* upsert */));
LOG(2) << "updated entry in config.collections for sharded collection " << collType.getNs()
<< " with generated UUID " << uuid;
}
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_database_operations.cpp b/src/mongo/s/catalog/sharding_catalog_manager_database_operations.cpp
index e0c4574e510..a3e5c724208 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_database_operations.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_database_operations.cpp
@@ -74,7 +74,7 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(DatabaseType::ConfigNS),
+ DatabaseType::ConfigNS,
queryBuilder.obj(),
BSONObj(),
1))
@@ -144,7 +144,7 @@ StatusWith<std::vector<std::string>> ShardingCatalogManager::getDatabasesForShar
opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(DatabaseType::ConfigNS),
+ DatabaseType::ConfigNS,
BSON(DatabaseType::primary(shardId.toString())),
BSONObj(),
boost::none); // no limit
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations.cpp b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations.cpp
index efee3b53771..ebbcb8d253a 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations.cpp
@@ -99,7 +99,7 @@ StatusWith<std::string> generateNewShardName(OperationContext* opCtx) {
opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(ShardType::ConfigNS),
+ ShardType::ConfigNS,
shardNameRegex.obj(),
BSON(ShardType::name() << -1),
1);
@@ -739,7 +739,7 @@ StatusWith<ShardDrainingStatus> ShardingCatalogManager::removeShard(OperationCon
std::string name = shardId.toString();
auto countStatus = _runCountCommandOnConfig(
opCtx,
- NamespaceString(ShardType::ConfigNS),
+ ShardType::ConfigNS,
BSON(ShardType::name() << NE << name << ShardType::draining(true)));
if (!countStatus.isOK()) {
return countStatus.getStatus();
@@ -749,8 +749,8 @@ StatusWith<ShardDrainingStatus> ShardingCatalogManager::removeShard(OperationCon
"Can't have more than one draining shard at a time");
}
- countStatus = _runCountCommandOnConfig(
- opCtx, NamespaceString(ShardType::ConfigNS), BSON(ShardType::name() << NE << name));
+ countStatus =
+ _runCountCommandOnConfig(opCtx, ShardType::ConfigNS, BSON(ShardType::name() << NE << name));
if (!countStatus.isOK()) {
return countStatus.getStatus();
}
@@ -759,10 +759,8 @@ StatusWith<ShardDrainingStatus> ShardingCatalogManager::removeShard(OperationCon
}
// Figure out if shard is already draining
- countStatus =
- _runCountCommandOnConfig(opCtx,
- NamespaceString(ShardType::ConfigNS),
- BSON(ShardType::name() << name << ShardType::draining(true)));
+ countStatus = _runCountCommandOnConfig(
+ opCtx, ShardType::ConfigNS, BSON(ShardType::name() << name << ShardType::draining(true)));
if (!countStatus.isOK()) {
return countStatus.getStatus();
}
@@ -802,15 +800,15 @@ StatusWith<ShardDrainingStatus> ShardingCatalogManager::removeShard(OperationCon
// Draining has already started, now figure out how many chunks and databases are still on the
// shard.
- countStatus = _runCountCommandOnConfig(
- opCtx, NamespaceString(ChunkType::ConfigNS), BSON(ChunkType::shard(name)));
+ countStatus =
+ _runCountCommandOnConfig(opCtx, ChunkType::ConfigNS, BSON(ChunkType::shard(name)));
if (!countStatus.isOK()) {
return countStatus.getStatus();
}
const long long chunkCount = countStatus.getValue();
- countStatus = _runCountCommandOnConfig(
- opCtx, NamespaceString(DatabaseType::ConfigNS), BSON(DatabaseType::primary(name)));
+ countStatus =
+ _runCountCommandOnConfig(opCtx, DatabaseType::ConfigNS, BSON(DatabaseType::primary(name)));
if (!countStatus.isOK()) {
return countStatus.getStatus();
}
@@ -927,17 +925,17 @@ StatusWith<ShardId> ShardingCatalogManager::_selectShardForNewDatabase(
}
StatusWith<long long> ShardingCatalogManager::_runCountCommandOnConfig(OperationContext* opCtx,
- const NamespaceString& ns,
+ const NamespaceString& nss,
BSONObj query) {
BSONObjBuilder countBuilder;
- countBuilder.append("count", ns.coll());
+ countBuilder.append("count", nss.coll());
countBuilder.append("query", query);
auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto resultStatus =
configShard->runCommandWithFixedRetryAttempts(opCtx,
kConfigReadSelector,
- ns.db().toString(),
+ nss.db().toString(),
countBuilder.done(),
Shard::kDefaultConfigCommandTimeout,
Shard::RetryPolicy::kIdempotent);
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_zone_operations.cpp b/src/mongo/s/catalog/sharding_catalog_manager_zone_operations.cpp
index 5b21ab927ab..b841c7d0ee0 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_zone_operations.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_zone_operations.cpp
@@ -60,17 +60,17 @@ const WriteConcernOptions kNoWaitWriteConcern(1, WriteConcernOptions::SyncMode::
*/
Status checkForOveralappedZonedKeyRange(OperationContext* opCtx,
Shard* configServer,
- const NamespaceString& ns,
+ const NamespaceString& nss,
const ChunkRange& range,
const std::string& zoneName,
const KeyPattern& shardKeyPattern) {
- DistributionStatus chunkDist(ns, ShardToChunksMap{});
+ DistributionStatus chunkDist(nss, ShardToChunksMap{});
auto tagStatus = configServer->exhaustiveFindOnConfig(opCtx,
kConfigPrimarySelector,
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(TagsType::ConfigNS),
- BSON(TagsType::ns(ns.ns())),
+ TagsType::ConfigNS,
+ BSON(TagsType::ns(nss.ns())),
BSONObj(),
0);
if (!tagStatus.isOK()) {
@@ -108,21 +108,21 @@ Status checkForOveralappedZonedKeyRange(OperationContext* opCtx,
/**
* Returns a new range based on the given range with the full shard key.
* Returns:
- * - ErrorCodes::NamespaceNotSharded if ns is not sharded.
+ * - ErrorCodes::NamespaceNotSharded if nss is not sharded.
* - ErrorCodes::ShardKeyNotFound if range is not compatible (for example, not a prefix of shard
- * key) with the shard key of ns.
+ * key) with the shard key of nss.
*/
StatusWith<ChunkRange> includeFullShardKey(OperationContext* opCtx,
Shard* configServer,
- const NamespaceString& ns,
+ const NamespaceString& nss,
const ChunkRange& range,
KeyPattern* shardKeyPatternOut) {
auto findCollStatus =
configServer->exhaustiveFindOnConfig(opCtx,
kConfigPrimarySelector,
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(CollectionType::ConfigNS),
- BSON(CollectionType::fullNs(ns.ns())),
+ CollectionType::ConfigNS,
+ BSON(CollectionType::fullNs(nss.ns())),
BSONObj(),
1);
@@ -133,7 +133,7 @@ StatusWith<ChunkRange> includeFullShardKey(OperationContext* opCtx,
const auto& findCollResult = findCollStatus.getValue().docs;
if (findCollResult.size() < 1) {
- return {ErrorCodes::NamespaceNotSharded, str::stream() << ns.ns() << " is not sharded"};
+ return {ErrorCodes::NamespaceNotSharded, str::stream() << nss.ns() << " is not sharded"};
}
auto parseStatus = CollectionType::fromBSON(findCollResult.front());
@@ -143,7 +143,7 @@ StatusWith<ChunkRange> includeFullShardKey(OperationContext* opCtx,
auto collDoc = parseStatus.getValue();
if (collDoc.getDropped()) {
- return {ErrorCodes::NamespaceNotSharded, str::stream() << ns.ns() << " is not sharded"};
+ return {ErrorCodes::NamespaceNotSharded, str::stream() << nss.ns() << " is not sharded"};
}
const auto& shardKeyPattern = collDoc.getKeyPattern();
@@ -155,7 +155,7 @@ StatusWith<ChunkRange> includeFullShardKey(OperationContext* opCtx,
str::stream() << "min: " << range.getMin() << " is not a prefix of the shard key "
<< shardKeyBSON
<< " of ns: "
- << ns.ns()};
+ << nss.ns()};
}
if (!range.getMax().isFieldNamePrefixOf(shardKeyBSON)) {
@@ -163,7 +163,7 @@ StatusWith<ChunkRange> includeFullShardKey(OperationContext* opCtx,
str::stream() << "max: " << range.getMax() << " is not a prefix of the shard key "
<< shardKeyBSON
<< " of ns: "
- << ns.ns()};
+ << nss.ns()};
}
return ChunkRange(shardKeyPattern.extendRangeBound(range.getMin(), false),
@@ -268,7 +268,7 @@ Status ShardingCatalogManager::removeShardFromZone(OperationContext* opCtx,
configShard->exhaustiveFindOnConfig(opCtx,
kConfigPrimarySelector,
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(TagsType::ConfigNS),
+ TagsType::ConfigNS,
BSON(TagsType::tag() << zoneName),
BSONObj(),
1);
@@ -310,7 +310,7 @@ Status ShardingCatalogManager::removeShardFromZone(OperationContext* opCtx,
Status ShardingCatalogManager::assignKeyRangeToZone(OperationContext* opCtx,
- const NamespaceString& ns,
+ const NamespaceString& nss,
const ChunkRange& givenRange,
const std::string& zoneName) {
Lock::ExclusiveLock lk(opCtx->lockState(), _kZoneOpLock);
@@ -319,7 +319,7 @@ Status ShardingCatalogManager::assignKeyRangeToZone(OperationContext* opCtx,
KeyPattern shardKeyPattern{BSONObj()};
auto fullShardKeyStatus =
- includeFullShardKey(opCtx, configServer.get(), ns, givenRange, &shardKeyPattern);
+ includeFullShardKey(opCtx, configServer.get(), nss, givenRange, &shardKeyPattern);
if (!fullShardKeyStatus.isOK()) {
return fullShardKeyStatus.getStatus();
}
@@ -330,7 +330,7 @@ Status ShardingCatalogManager::assignKeyRangeToZone(OperationContext* opCtx,
configServer->exhaustiveFindOnConfig(opCtx,
kConfigPrimarySelector,
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(ShardType::ConfigNS),
+ ShardType::ConfigNS,
BSON(ShardType::tags() << zoneName),
BSONObj(),
1);
@@ -346,18 +346,18 @@ Status ShardingCatalogManager::assignKeyRangeToZone(OperationContext* opCtx,
}
auto overlapStatus = checkForOveralappedZonedKeyRange(
- opCtx, configServer.get(), ns, fullShardKeyRange, zoneName, shardKeyPattern);
+ opCtx, configServer.get(), nss, fullShardKeyRange, zoneName, shardKeyPattern);
if (!overlapStatus.isOK()) {
return overlapStatus;
}
BSONObj updateQuery(
- BSON("_id" << BSON(TagsType::ns(ns.ns()) << TagsType::min(fullShardKeyRange.getMin()))));
+ BSON("_id" << BSON(TagsType::ns(nss.ns()) << TagsType::min(fullShardKeyRange.getMin()))));
BSONObjBuilder updateBuilder;
updateBuilder.append("_id",
- BSON(TagsType::ns(ns.ns()) << TagsType::min(fullShardKeyRange.getMin())));
- updateBuilder.append(TagsType::ns(), ns.ns());
+ BSON(TagsType::ns(nss.ns()) << TagsType::min(fullShardKeyRange.getMin())));
+ updateBuilder.append(TagsType::ns(), nss.ns());
updateBuilder.append(TagsType::min(), fullShardKeyRange.getMin());
updateBuilder.append(TagsType::max(), fullShardKeyRange.getMax());
updateBuilder.append(TagsType::tag(), zoneName);
@@ -373,7 +373,7 @@ Status ShardingCatalogManager::assignKeyRangeToZone(OperationContext* opCtx,
}
Status ShardingCatalogManager::removeKeyRangeFromZone(OperationContext* opCtx,
- const NamespaceString& ns,
+ const NamespaceString& nss,
const ChunkRange& range) {
Lock::ExclusiveLock lk(opCtx->lockState(), _kZoneOpLock);
@@ -381,13 +381,13 @@ Status ShardingCatalogManager::removeKeyRangeFromZone(OperationContext* opCtx,
KeyPattern shardKeyPattern{BSONObj()};
auto fullShardKeyStatus =
- includeFullShardKey(opCtx, configServer.get(), ns, range, &shardKeyPattern);
+ includeFullShardKey(opCtx, configServer.get(), nss, range, &shardKeyPattern);
if (!fullShardKeyStatus.isOK()) {
return fullShardKeyStatus.getStatus();
}
BSONObjBuilder removeBuilder;
- removeBuilder.append("_id", BSON(TagsType::ns(ns.ns()) << TagsType::min(range.getMin())));
+ removeBuilder.append("_id", BSON(TagsType::ns(nss.ns()) << TagsType::min(range.getMin())));
removeBuilder.append(TagsType::max(), range.getMax());
return Grid::get(opCtx)->catalogClient()->removeConfigDocuments(
diff --git a/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp b/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
index 4e02dd07dfd..32bcb66778a 100644
--- a/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
@@ -40,9 +40,11 @@ namespace {
using MergeChunkTest = ConfigServerTestFixture;
+const NamespaceString kNamespace("TestDB.TestColl");
+
TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
ChunkType chunk;
- chunk.setNS("TestDB.TestColl");
+ chunk.setNS(kNamespace);
auto origVersion = ChunkVersion(1, 0, OID::gen());
chunk.setVersion(origVersion);
@@ -76,7 +78,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
getConfigShard()->exhaustiveFindOnConfig(operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(ChunkType::ConfigNS),
+ ChunkType::ConfigNS,
BSON(ChunkType::ns() << "TestDB.TestColl"),
BSON(ChunkType::lastmod << -1),
boost::none));
@@ -100,7 +102,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
ChunkType chunk;
- chunk.setNS("TestDB.TestColl");
+ chunk.setNS(kNamespace);
auto origVersion = ChunkVersion(1, 0, OID::gen());
chunk.setVersion(origVersion);
@@ -140,7 +142,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
getConfigShard()->exhaustiveFindOnConfig(operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(ChunkType::ConfigNS),
+ ChunkType::ConfigNS,
BSON(ChunkType::ns() << "TestDB.TestColl"),
BSON(ChunkType::lastmod << -1),
boost::none));
@@ -164,8 +166,8 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
ChunkType chunk, otherChunk;
- chunk.setNS("TestDB.TestColl");
- otherChunk.setNS("TestDB.TestColl");
+ chunk.setNS(kNamespace);
+ otherChunk.setNS(kNamespace);
auto collEpoch = OID::gen();
auto origVersion = ChunkVersion(1, 2, collEpoch);
@@ -208,7 +210,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
getConfigShard()->exhaustiveFindOnConfig(operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(ChunkType::ConfigNS),
+ ChunkType::ConfigNS,
BSON(ChunkType::ns() << "TestDB.TestColl"),
BSON(ChunkType::lastmod << -1),
boost::none));
@@ -232,7 +234,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
ChunkType chunk;
- chunk.setNS("TestDB.TestColl");
+ chunk.setNS(kNamespace);
auto origVersion = ChunkVersion(1, 2, OID::gen());
chunk.setVersion(origVersion);
@@ -272,7 +274,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
getConfigShard()->exhaustiveFindOnConfig(operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(ChunkType::ConfigNS),
+ ChunkType::ConfigNS,
BSON(ChunkType::ns() << "TestDB.TestColl"),
BSON(ChunkType::lastmod << -1),
boost::none));
@@ -301,7 +303,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
TEST_F(MergeChunkTest, NonExistingNamespace) {
ChunkType chunk;
- chunk.setNS("TestDB.TestColl");
+ chunk.setNS(kNamespace);
auto origVersion = ChunkVersion(1, 0, OID::gen());
chunk.setVersion(origVersion);
@@ -335,7 +337,7 @@ TEST_F(MergeChunkTest, NonExistingNamespace) {
TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
ChunkType chunk;
- chunk.setNS("TestDB.TestColl");
+ chunk.setNS(kNamespace);
auto origVersion = ChunkVersion(1, 0, OID::gen());
chunk.setVersion(origVersion);
@@ -369,7 +371,7 @@ TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
TEST_F(MergeChunkTest, MergeAlreadyHappenedFailsPrecondition) {
ChunkType chunk;
- chunk.setNS("TestDB.TestColl");
+ chunk.setNS(kNamespace);
auto origVersion = ChunkVersion(1, 0, OID::gen());
chunk.setVersion(origVersion);
@@ -411,7 +413,7 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedFailsPrecondition) {
getConfigShard()->exhaustiveFindOnConfig(operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(ChunkType::ConfigNS),
+ ChunkType::ConfigNS,
BSON(ChunkType::ns() << "TestDB.TestColl"),
BSON(ChunkType::lastmod << -1),
boost::none));
@@ -436,7 +438,7 @@ TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFails) {
ChunkVersion version = ChunkVersion(1, 0, epoch);
ChunkType chunk;
- chunk.setNS("TestDB.TestColl");
+ chunk.setNS(kNamespace);
chunk.setShard(ShardId("shard0000"));
chunk.setVersion(version);
diff --git a/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp b/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp
index cc181595b89..5d0a9ea668a 100644
--- a/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp
@@ -108,12 +108,11 @@ TEST_F(RemoveShardFromZoneTest, RemoveLastZoneFromShardShouldSucceedWhenNoChunks
// Insert a chunk range document referring to a different zone
TagsType tagDoc;
- tagDoc.setNS("test.foo");
+ tagDoc.setNS(NamespaceString("test.foo"));
tagDoc.setMinKey(BSON("x" << 0));
tagDoc.setMaxKey(BSON("x" << 10));
tagDoc.setTag("y");
- insertToConfigCollection(
- operationContext(), NamespaceString(TagsType::ConfigNS), tagDoc.toBSON())
+ insertToConfigCollection(operationContext(), TagsType::ConfigNS, tagDoc.toBSON())
.transitional_ignore();
ASSERT_OK(ShardingCatalogManager::get(operationContext())
@@ -149,12 +148,11 @@ TEST_F(RemoveShardFromZoneTest, RemoveLastZoneFromShardShouldFailWhenAChunkRefer
setupShards({shardA, shardB}).transitional_ignore();
TagsType tagDoc;
- tagDoc.setNS("test.foo");
+ tagDoc.setNS(NamespaceString("test.foo"));
tagDoc.setMinKey(BSON("x" << 0));
tagDoc.setMaxKey(BSON("x" << 10));
tagDoc.setTag("z");
- insertToConfigCollection(
- operationContext(), NamespaceString(TagsType::ConfigNS), tagDoc.toBSON())
+ insertToConfigCollection(operationContext(), TagsType::ConfigNS, tagDoc.toBSON())
.transitional_ignore();
auto status = ShardingCatalogManager::get(operationContext())
@@ -250,7 +248,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneFromShardShouldErrorIfShardDocIsMalfor
<< "z"));
insertToConfigCollection(
- operationContext(), NamespaceString(ShardType::ConfigNS), invalidShardDoc);
+ operationContext(), ShardType::ConfigNS, invalidShardDoc);
auto status =
diff --git a/src/mongo/s/catalog/sharding_catalog_remove_shard_test.cpp b/src/mongo/s/catalog/sharding_catalog_remove_shard_test.cpp
index 644631d4459..2db9d9658e0 100644
--- a/src/mongo/s/catalog/sharding_catalog_remove_shard_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_remove_shard_test.cpp
@@ -101,7 +101,7 @@ protected:
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(ShardType::ConfigNS),
+ ShardType::ConfigNS,
BSON(ShardType::name() << shardName),
BSONObj(),
1));
@@ -314,7 +314,7 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) {
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(ShardType::ConfigNS),
+ ShardType::ConfigNS,
BSON(ShardType::name() << shard1.getName()),
BSONObj(),
1));
diff --git a/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp b/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
index 6170ce5bcb7..6bf42c0609f 100644
--- a/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
@@ -73,10 +73,12 @@ using unittest::assertGet;
const ShardId testPrimaryShard = ShardId("shard0");
+const NamespaceString kNamespace("db1.foo");
+
class ShardCollectionTest : public ConfigServerTestFixture {
public:
void expectCount(const HostAndPort& receivingHost,
- const NamespaceString& expectedNs,
+ const NamespaceString& expectedNss,
const BSONObj& expectedQuery,
const StatusWith<long long>& response) {
onCommand([&](const RemoteCommandRequest& request) {
@@ -86,7 +88,7 @@ public:
ASSERT_EQUALS("count", cmdName);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQUALS(expectedNs.toString(), nss.toString());
+ ASSERT_EQUALS(expectedNss, nss);
if (expectedQuery.isEmpty()) {
auto queryElem = request.cmdObj["query"];
@@ -112,19 +114,17 @@ private:
};
TEST_F(ShardCollectionTest, anotherMongosSharding) {
- const auto nss = NamespaceString("db1.foo");
-
ShardType shard;
shard.setName("shard0");
shard.setHost("shardHost");
ASSERT_OK(setupShards(vector<ShardType>{shard}));
- setupDatabase(nss.db().toString(), shard.getName(), true);
+ setupDatabase(kNamespace.db().toString(), shard.getName(), true);
// Set up chunks in the collection, indicating that another mongos must have already started
// sharding the collection.
ChunkType chunk;
- chunk.setNS(nss.ns());
+ chunk.setNS(kNamespace);
chunk.setVersion(ChunkVersion(2, 0, OID::gen()));
chunk.setShard(shard.getName());
chunk.setMin(BSON("_id" << 1));
@@ -136,7 +136,7 @@ TEST_F(ShardCollectionTest, anotherMongosSharding) {
ASSERT_THROWS_CODE(ShardingCatalogManager::get(operationContext())
->shardCollection(operationContext(),
- nss.ns(),
+ kNamespace,
boost::none, // UUID
shardKeyPattern,
defaultCollation,
@@ -163,9 +163,7 @@ TEST_F(ShardCollectionTest, noInitialChunksOrData) {
ASSERT_OK(setupShards(vector<ShardType>{shard}));
- const auto nss = NamespaceString("db1.foo");
-
- setupDatabase(nss.db().toString(), shard.getName(), true);
+ setupDatabase(kNamespace.db().toString(), shard.getName(), true);
ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
BSONObj defaultCollation;
@@ -177,7 +175,7 @@ TEST_F(ShardCollectionTest, noInitialChunksOrData) {
auto opCtx = cc().makeOperationContext();
ShardingCatalogManager::get(operationContext())
->shardCollection(opCtx.get(),
- nss.ns(),
+ kNamespace,
boost::none, // UUID
shardKeyPattern,
defaultCollation,
@@ -188,13 +186,13 @@ TEST_F(ShardCollectionTest, noInitialChunksOrData) {
});
// Report that no documents exist for the given collection on the primary shard
- expectCount(shardHost, nss, BSONObj(), 0);
+ expectCount(shardHost, kNamespace, BSONObj(), 0);
// Expect the set shard version for that namespace.
// We do not check for a specific ChunkVersion, because we cannot easily know the OID that was
// generated by shardCollection for the first chunk.
// TODO SERVER-29451: add hooks to the mock storage engine to expect reads and writes.
- expectSetShardVersion(shardHost, shard, nss, boost::none /* expected ChunkVersion */);
+ expectSetShardVersion(shardHost, shard, kNamespace, boost::none /* expected ChunkVersion */);
future.timed_get(kFutureTimeout);
}
@@ -235,14 +233,11 @@ TEST_F(ShardCollectionTest, withInitialChunks) {
ASSERT_OK(setupShards(vector<ShardType>{shard0, shard1, shard2}));
- const auto nss = NamespaceString("db1.foo");
- string ns = "db1.foo";
-
DatabaseType db;
db.setName("db1");
db.setPrimary(shard0.getName());
db.setSharded(true);
- setupDatabase(nss.db().toString(), shard0.getName(), true);
+ setupDatabase(kNamespace.db().toString(), shard0.getName(), true);
ShardKeyPattern keyPattern(BSON("_id" << 1));
@@ -254,7 +249,7 @@ TEST_F(ShardCollectionTest, withInitialChunks) {
ChunkVersion expectedVersion(1, 0, OID::gen());
ChunkType expectedChunk0;
- expectedChunk0.setNS(ns);
+ expectedChunk0.setNS(kNamespace);
expectedChunk0.setShard(shard0.getName());
expectedChunk0.setMin(keyPattern.getKeyPattern().globalMin());
expectedChunk0.setMax(splitPoint0);
@@ -262,7 +257,7 @@ TEST_F(ShardCollectionTest, withInitialChunks) {
expectedVersion.incMinor();
ChunkType expectedChunk1;
- expectedChunk1.setNS(ns);
+ expectedChunk1.setNS(kNamespace);
expectedChunk1.setShard(shard1.getName());
expectedChunk1.setMin(splitPoint0);
expectedChunk1.setMax(splitPoint1);
@@ -270,7 +265,7 @@ TEST_F(ShardCollectionTest, withInitialChunks) {
expectedVersion.incMinor();
ChunkType expectedChunk2;
- expectedChunk2.setNS(ns);
+ expectedChunk2.setNS(kNamespace);
expectedChunk2.setShard(shard2.getName());
expectedChunk2.setMin(splitPoint1);
expectedChunk2.setMax(splitPoint2);
@@ -278,7 +273,7 @@ TEST_F(ShardCollectionTest, withInitialChunks) {
expectedVersion.incMinor();
ChunkType expectedChunk3;
- expectedChunk3.setNS(ns);
+ expectedChunk3.setNS(kNamespace);
expectedChunk3.setShard(shard0.getName());
expectedChunk3.setMin(splitPoint2);
expectedChunk3.setMax(splitPoint3);
@@ -286,7 +281,7 @@ TEST_F(ShardCollectionTest, withInitialChunks) {
expectedVersion.incMinor();
ChunkType expectedChunk4;
- expectedChunk4.setNS(ns);
+ expectedChunk4.setNS(kNamespace);
expectedChunk4.setShard(shard1.getName());
expectedChunk4.setMin(splitPoint3);
expectedChunk4.setMax(keyPattern.getKeyPattern().globalMax());
@@ -307,7 +302,7 @@ TEST_F(ShardCollectionTest, withInitialChunks) {
auto opCtx = cc().makeOperationContext();
ShardingCatalogManager::get(operationContext())
->shardCollection(opCtx.get(),
- ns,
+ kNamespace,
boost::none, // UUID
keyPattern,
defaultCollation,
@@ -321,8 +316,7 @@ TEST_F(ShardCollectionTest, withInitialChunks) {
// We do not check for a specific ChunkVersion, because we cannot easily know the OID that was
// generated by shardCollection for the first chunk.
// TODO SERVER-29451: add hooks to the mock storage engine to expect reads and writes.
- expectSetShardVersion(
- shard0Host, shard0, NamespaceString(ns), boost::none /* expected ChunkVersion */);
+ expectSetShardVersion(shard0Host, shard0, kNamespace, boost::none /* expected ChunkVersion */);
future.timed_get(kFutureTimeout);
}
@@ -342,10 +336,7 @@ TEST_F(ShardCollectionTest, withInitialData) {
ASSERT_OK(setupShards(vector<ShardType>{shard}));
- const auto nss = NamespaceString("db1.foo");
- string ns = "db1.foo";
-
- setupDatabase(nss.db().toString(), shard.getName(), true);
+ setupDatabase(kNamespace.db().toString(), shard.getName(), true);
ShardKeyPattern keyPattern(BSON("_id" << 1));
@@ -357,7 +348,7 @@ TEST_F(ShardCollectionTest, withInitialData) {
ChunkVersion expectedVersion(1, 0, OID::gen());
ChunkType expectedChunk0;
- expectedChunk0.setNS(ns);
+ expectedChunk0.setNS(kNamespace);
expectedChunk0.setShard(shard.getName());
expectedChunk0.setMin(keyPattern.getKeyPattern().globalMin());
expectedChunk0.setMax(splitPoint0);
@@ -365,7 +356,7 @@ TEST_F(ShardCollectionTest, withInitialData) {
expectedVersion.incMinor();
ChunkType expectedChunk1;
- expectedChunk1.setNS(ns);
+ expectedChunk1.setNS(kNamespace);
expectedChunk1.setShard(shard.getName());
expectedChunk1.setMin(splitPoint0);
expectedChunk1.setMax(splitPoint1);
@@ -373,7 +364,7 @@ TEST_F(ShardCollectionTest, withInitialData) {
expectedVersion.incMinor();
ChunkType expectedChunk2;
- expectedChunk2.setNS(ns);
+ expectedChunk2.setNS(kNamespace);
expectedChunk2.setShard(shard.getName());
expectedChunk2.setMin(splitPoint1);
expectedChunk2.setMax(splitPoint2);
@@ -381,7 +372,7 @@ TEST_F(ShardCollectionTest, withInitialData) {
expectedVersion.incMinor();
ChunkType expectedChunk3;
- expectedChunk3.setNS(ns);
+ expectedChunk3.setNS(kNamespace);
expectedChunk3.setShard(shard.getName());
expectedChunk3.setMin(splitPoint2);
expectedChunk3.setMax(splitPoint3);
@@ -389,7 +380,7 @@ TEST_F(ShardCollectionTest, withInitialData) {
expectedVersion.incMinor();
ChunkType expectedChunk4;
- expectedChunk4.setNS(ns);
+ expectedChunk4.setNS(kNamespace);
expectedChunk4.setShard(shard.getName());
expectedChunk4.setMin(splitPoint3);
expectedChunk4.setMax(keyPattern.getKeyPattern().globalMax());
@@ -407,7 +398,7 @@ TEST_F(ShardCollectionTest, withInitialData) {
auto opCtx = cc().makeOperationContext();
ShardingCatalogManager::get(operationContext())
->shardCollection(opCtx.get(),
- ns,
+ kNamespace,
boost::none, // UUID
keyPattern,
defaultCollation,
@@ -419,14 +410,15 @@ TEST_F(ShardCollectionTest, withInitialData) {
// Report that documents exist for the given collection on the primary shard, so that calling
// splitVector is required for calculating the initial split points.
- expectCount(shardHost, NamespaceString(ns), BSONObj(), 1000);
+ expectCount(shardHost, kNamespace, BSONObj(), 1000);
// Respond to the splitVector command sent to the shard to figure out initial split points
onCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQUALS(shardHost, request.target);
string cmdName = request.cmdObj.firstElement().fieldName();
ASSERT_EQUALS("splitVector", cmdName);
- ASSERT_EQUALS(ns, request.cmdObj["splitVector"].String()); // splitVector uses full ns
+ ASSERT_EQUALS(kNamespace.ns(),
+ request.cmdObj["splitVector"].String()); // splitVector uses full ns
ASSERT_BSONOBJ_EQ(keyPattern.toBSON(), request.cmdObj["keyPattern"].Obj());
ASSERT_BSONOBJ_EQ(keyPattern.getKeyPattern().globalMin(), request.cmdObj["min"].Obj());
@@ -448,7 +440,7 @@ TEST_F(ShardCollectionTest, withInitialData) {
// We do not check for a specific ChunkVersion, because we cannot easily know the OID that was
// generated by shardCollection for the first chunk.
// TODO SERVER-29451: add hooks to the mock storage engine to expect reads and writes.
- expectSetShardVersion(shardHost, shard, NamespaceString(ns), boost::none);
+ expectSetShardVersion(shardHost, shard, kNamespace, boost::none);
future.timed_get(kFutureTimeout);
}
diff --git a/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp b/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp
index 1a9d8eb857c..07c2301d624 100644
--- a/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp
@@ -39,9 +39,11 @@ namespace {
using SplitChunkTest = ConfigServerTestFixture;
+const NamespaceString kNamespace("TestDB", "TestColl");
+
TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
ChunkType chunk;
- chunk.setNS("TestDB.TestColl");
+ chunk.setNS(kNamespace);
auto origVersion = ChunkVersion(1, 0, OID::gen());
chunk.setVersion(origVersion);
@@ -90,7 +92,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
ChunkType chunk;
- chunk.setNS("TestDB.TestColl");
+ chunk.setNS(kNamespace);
auto origVersion = ChunkVersion(1, 0, OID::gen());
chunk.setVersion(origVersion);
@@ -151,8 +153,8 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
ChunkType chunk, chunk2;
- chunk.setNS("TestDB.TestColl");
- chunk2.setNS("TestDB.TestColl");
+ chunk.setNS(kNamespace);
+ chunk2.setNS(kNamespace);
auto collEpoch = OID::gen();
// set up first chunk
@@ -211,7 +213,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
TEST_F(SplitChunkTest, PreConditionFailErrors) {
ChunkType chunk;
- chunk.setNS("TestDB.TestColl");
+ chunk.setNS(kNamespace);
auto origVersion = ChunkVersion(1, 0, OID::gen());
chunk.setVersion(origVersion);
@@ -240,7 +242,7 @@ TEST_F(SplitChunkTest, PreConditionFailErrors) {
TEST_F(SplitChunkTest, NonExisingNamespaceErrors) {
ChunkType chunk;
- chunk.setNS("TestDB.TestColl");
+ chunk.setNS(kNamespace);
auto origVersion = ChunkVersion(1, 0, OID::gen());
chunk.setVersion(origVersion);
@@ -267,7 +269,7 @@ TEST_F(SplitChunkTest, NonExisingNamespaceErrors) {
TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
ChunkType chunk;
- chunk.setNS("TestDB.TestColl");
+ chunk.setNS(kNamespace);
auto origVersion = ChunkVersion(1, 0, OID::gen());
chunk.setVersion(origVersion);
@@ -294,7 +296,7 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) {
ChunkType chunk;
- chunk.setNS("TestDB.TestColl");
+ chunk.setNS(kNamespace);
auto origVersion = ChunkVersion(1, 0, OID::gen());
chunk.setVersion(origVersion);
@@ -321,7 +323,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) {
TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) {
ChunkType chunk;
- chunk.setNS("TestDB.TestColl");
+ chunk.setNS(kNamespace);
auto origVersion = ChunkVersion(1, 0, OID::gen());
chunk.setVersion(origVersion);
@@ -348,7 +350,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) {
TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) {
ChunkType chunk;
- chunk.setNS("TestDB.TestColl");
+ chunk.setNS(kNamespace);
auto origVersion = ChunkVersion(1, 0, OID::gen());
chunk.setVersion(origVersion);
diff --git a/src/mongo/s/catalog/sharding_catalog_test.cpp b/src/mongo/s/catalog/sharding_catalog_test.cpp
index c460b66c93a..fbeb94d178d 100644
--- a/src/mongo/s/catalog/sharding_catalog_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_test.cpp
@@ -75,6 +75,7 @@ using unittest::assertGet;
using ShardingCatalogClientTest = ShardingCatalogTestFixture;
const int kMaxCommandRetry = 3;
+const NamespaceString kNamespace("TestDB", "TestColl");
BSONObj getReplSecondaryOkMetadata() {
BSONObjBuilder o;
@@ -95,8 +96,7 @@ TEST_F(ShardingCatalogClientTest, GetCollectionExisting) {
const OpTime newOpTime(Timestamp(7, 6), 5);
auto future = launchAsync([this, &expectedColl] {
- return assertGet(
- catalogClient()->getCollection(operationContext(), expectedColl.getNs().ns()));
+ return assertGet(catalogClient()->getCollection(operationContext(), expectedColl.getNs()));
});
onFindWithMetadataCommand(
@@ -106,12 +106,12 @@ TEST_F(ShardingCatalogClientTest, GetCollectionExisting) {
rpc::TrackingMetadata::removeTrackingData(request.metadata));
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss.ns(), CollectionType::ConfigNS);
+ ASSERT_EQ(nss, CollectionType::ConfigNS);
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
// Ensure the query is correct
- ASSERT_EQ(query->nss().ns(), CollectionType::ConfigNS);
+ ASSERT_EQ(query->nss(), CollectionType::ConfigNS);
ASSERT_BSONOBJ_EQ(query->getFilter(),
BSON(CollectionType::fullNs(expectedColl.getNs().ns())));
ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
@@ -136,7 +136,8 @@ TEST_F(ShardingCatalogClientTest, GetCollectionNotExisting) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
- auto status = catalogClient()->getCollection(operationContext(), "NonExistent");
+ auto status =
+ catalogClient()->getCollection(operationContext(), NamespaceString("NonExistent"));
ASSERT_EQUALS(status.getStatus(), ErrorCodes::NamespaceNotFound);
});
@@ -175,14 +176,14 @@ TEST_F(ShardingCatalogClientTest, GetDatabaseExisting) {
onFindWithMetadataCommand([this, &expectedDb, newOpTime](const RemoteCommandRequest& request) {
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss.ns(), DatabaseType::ConfigNS);
+ ASSERT_EQ(nss, DatabaseType::ConfigNS);
ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
- ASSERT_EQ(query->nss().ns(), DatabaseType::ConfigNS);
+ ASSERT_EQ(query->nss(), DatabaseType::ConfigNS);
ASSERT_BSONOBJ_EQ(query->getFilter(), BSON(DatabaseType::name(expectedDb.getName())));
ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
ASSERT(!query->getLimit());
@@ -304,11 +305,11 @@ TEST_F(ShardingCatalogClientTest, GetAllShardsValid) {
rpc::TrackingMetadata::removeTrackingData(request.metadata));
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss.ns(), ShardType::ConfigNS);
+ ASSERT_EQ(nss, ShardType::ConfigNS);
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
- ASSERT_EQ(query->nss().ns(), ShardType::ConfigNS);
+ ASSERT_EQ(query->nss(), ShardType::ConfigNS);
ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj());
ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
ASSERT_FALSE(query->getLimit().is_initialized());
@@ -357,14 +358,14 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
OID oid = OID::gen();
ChunkType chunkA;
- chunkA.setNS("TestDB.TestColl");
+ chunkA.setNS(kNamespace);
chunkA.setMin(BSON("a" << 1));
chunkA.setMax(BSON("a" << 100));
chunkA.setVersion({1, 2, oid});
chunkA.setShard(ShardId("shard0000"));
ChunkType chunkB;
- chunkB.setNS("TestDB.TestColl");
+ chunkB.setNS(kNamespace);
chunkB.setMin(BSON("a" << 100));
chunkB.setMax(BSON("a" << 200));
chunkB.setVersion({3, 4, oid});
@@ -401,11 +402,11 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
rpc::TrackingMetadata::removeTrackingData(request.metadata));
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss.ns(), ChunkType::ConfigNS);
+ ASSERT_EQ(nss, ChunkType::ConfigNS);
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
- ASSERT_EQ(query->nss().ns(), ChunkType::ConfigNS);
+ ASSERT_EQ(query->nss(), ChunkType::ConfigNS);
ASSERT_BSONOBJ_EQ(query->getFilter(), chunksQuery);
ASSERT_BSONOBJ_EQ(query->getSort(), BSON(ChunkType::lastmod() << -1));
ASSERT_EQ(query->getLimit().get(), 1);
@@ -453,11 +454,11 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSNoSortNoLimit) {
rpc::TrackingMetadata::removeTrackingData(request.metadata));
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss.ns(), ChunkType::ConfigNS);
+ ASSERT_EQ(nss, ChunkType::ConfigNS);
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
- ASSERT_EQ(query->nss().ns(), ChunkType::ConfigNS);
+ ASSERT_EQ(query->nss(), ChunkType::ConfigNS);
ASSERT_BSONOBJ_EQ(query->getFilter(), chunksQuery);
ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
ASSERT_FALSE(query->getLimit().is_initialized());
@@ -494,14 +495,14 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSInvalidChunk) {
onFindCommand([&chunksQuery](const RemoteCommandRequest& request) {
ChunkType chunkA;
- chunkA.setNS("TestDB.TestColl");
+ chunkA.setNS(kNamespace);
chunkA.setMin(BSON("a" << 1));
chunkA.setMax(BSON("a" << 100));
chunkA.setVersion({1, 2, OID::gen()});
chunkA.setShard(ShardId("shard0000"));
ChunkType chunkB;
- chunkB.setNS("TestDB.TestColl");
+ chunkB.setNS(kNamespace);
chunkB.setMin(BSON("a" << 100));
chunkB.setMax(BSON("a" << 200));
chunkB.setVersion({3, 4, OID::gen()});
@@ -812,11 +813,11 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsNoDb) {
rpc::TrackingMetadata::removeTrackingData(request.metadata));
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss.ns(), CollectionType::ConfigNS);
+ ASSERT_EQ(nss, CollectionType::ConfigNS);
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
- ASSERT_EQ(query->nss().ns(), CollectionType::ConfigNS);
+ ASSERT_EQ(query->nss(), CollectionType::ConfigNS);
ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj());
ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
@@ -865,11 +866,11 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsWithDb) {
rpc::TrackingMetadata::removeTrackingData(request.metadata));
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss.ns(), CollectionType::ConfigNS);
+ ASSERT_EQ(nss, CollectionType::ConfigNS);
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
- ASSERT_EQ(query->nss().ns(), CollectionType::ConfigNS);
+ ASSERT_EQ(query->nss(), CollectionType::ConfigNS);
{
BSONObjBuilder b;
b.appendRegex(CollectionType::fullNs(), "^test\\.");
@@ -909,14 +910,14 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsInvalidCollectionType) {
onFindCommand([this, validColl](const RemoteCommandRequest& request) {
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss.ns(), CollectionType::ConfigNS);
+ ASSERT_EQ(nss, CollectionType::ConfigNS);
ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
- ASSERT_EQ(query->nss().ns(), CollectionType::ConfigNS);
+ ASSERT_EQ(query->nss(), CollectionType::ConfigNS);
{
BSONObjBuilder b;
b.appendRegex(CollectionType::fullNs(), "^test\\.");
@@ -955,11 +956,11 @@ TEST_F(ShardingCatalogClientTest, GetDatabasesForShardValid) {
rpc::TrackingMetadata::removeTrackingData(request.metadata));
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss.ns(), DatabaseType::ConfigNS);
+ ASSERT_EQ(nss, DatabaseType::ConfigNS);
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
- ASSERT_EQ(query->nss().ns(), DatabaseType::ConfigNS);
+ ASSERT_EQ(query->nss(), DatabaseType::ConfigNS);
ASSERT_BSONOBJ_EQ(query->getFilter(),
BSON(DatabaseType::primary(dbt1.getPrimary().toString())));
ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
@@ -1003,20 +1004,20 @@ TEST_F(ShardingCatalogClientTest, GetTagsForCollection) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
TagsType tagA;
- tagA.setNS("TestDB.TestColl");
+ tagA.setNS(NamespaceString("TestDB.TestColl"));
tagA.setTag("TagA");
tagA.setMinKey(BSON("a" << 100));
tagA.setMaxKey(BSON("a" << 200));
TagsType tagB;
- tagB.setNS("TestDB.TestColl");
+ tagB.setNS(NamespaceString("TestDB.TestColl"));
tagB.setTag("TagB");
tagB.setMinKey(BSON("a" << 200));
tagB.setMaxKey(BSON("a" << 300));
auto future = launchAsync([this] {
- const auto& tags =
- assertGet(catalogClient()->getTagsForCollection(operationContext(), "TestDB.TestColl"));
+ const auto& tags = assertGet(catalogClient()->getTagsForCollection(
+ operationContext(), NamespaceString("TestDB.TestColl")));
ASSERT_EQ(2U, tags.size());
@@ -1028,11 +1029,11 @@ TEST_F(ShardingCatalogClientTest, GetTagsForCollection) {
rpc::TrackingMetadata::removeTrackingData(request.metadata));
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss.ns(), TagsType::ConfigNS);
+ ASSERT_EQ(nss, TagsType::ConfigNS);
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
- ASSERT_EQ(query->nss().ns(), TagsType::ConfigNS);
+ ASSERT_EQ(query->nss(), TagsType::ConfigNS);
ASSERT_BSONOBJ_EQ(query->getFilter(), BSON(TagsType::ns("TestDB.TestColl")));
ASSERT_BSONOBJ_EQ(query->getSort(), BSON(TagsType::min() << 1));
@@ -1050,8 +1051,8 @@ TEST_F(ShardingCatalogClientTest, GetTagsForCollectionNoTags) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
- const auto& tags =
- assertGet(catalogClient()->getTagsForCollection(operationContext(), "TestDB.TestColl"));
+ const auto& tags = assertGet(catalogClient()->getTagsForCollection(
+ operationContext(), NamespaceString("TestDB.TestColl")));
ASSERT_EQ(0U, tags.size());
@@ -1067,21 +1068,21 @@ TEST_F(ShardingCatalogClientTest, GetTagsForCollectionInvalidTag) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
- const auto swTags =
- catalogClient()->getTagsForCollection(operationContext(), "TestDB.TestColl");
+ const auto swTags = catalogClient()->getTagsForCollection(
+ operationContext(), NamespaceString("TestDB.TestColl"));
ASSERT_EQUALS(ErrorCodes::NoSuchKey, swTags.getStatus());
});
onFindCommand([](const RemoteCommandRequest& request) {
TagsType tagA;
- tagA.setNS("TestDB.TestColl");
+ tagA.setNS(NamespaceString("TestDB.TestColl"));
tagA.setTag("TagA");
tagA.setMinKey(BSON("a" << 100));
tagA.setMaxKey(BSON("a" << 200));
TagsType tagB;
- tagB.setNS("TestDB.TestColl");
+ tagB.setNS(NamespaceString("TestDB.TestColl"));
tagB.setTag("TagB");
tagB.setMinKey(BSON("a" << 200));
// Missing maxKey
@@ -1114,7 +1115,7 @@ TEST_F(ShardingCatalogClientTest, UpdateDatabase) {
const auto opMsgRequest = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
const auto updateOp = UpdateOp::parse(opMsgRequest);
- ASSERT_EQUALS(DatabaseType::ConfigNS, updateOp.getNamespace().ns());
+ ASSERT_EQUALS(DatabaseType::ConfigNS, updateOp.getNamespace());
const auto& updates = updateOp.getUpdates();
ASSERT_EQUALS(1U, updates.size());
@@ -1174,7 +1175,7 @@ TEST_F(ShardingCatalogClientTest, ApplyChunkOpsDeprecatedSuccessful) {
<< "first precondition")
<< BSON("precondition2"
<< "second precondition"));
- std::string nss = "config.chunks";
+ const NamespaceString nss("config.chunks");
ChunkVersion lastChunkVersion(0, 0, OID());
auto future = launchAsync([this, updateOps, preCondition, nss, lastChunkVersion] {
@@ -1219,7 +1220,7 @@ TEST_F(ShardingCatalogClientTest, ApplyChunkOpsDeprecatedSuccessfulWithCheck) {
<< "first precondition")
<< BSON("precondition2"
<< "second precondition"));
- std::string nss = "config.chunks";
+ const NamespaceString nss("config.chunks");
ChunkVersion lastChunkVersion(0, 0, OID());
auto future = launchAsync([this, updateOps, preCondition, nss, lastChunkVersion] {
@@ -1244,7 +1245,7 @@ TEST_F(ShardingCatalogClientTest, ApplyChunkOpsDeprecatedSuccessfulWithCheck) {
onFindCommand([this](const RemoteCommandRequest& request) {
OID oid = OID::gen();
ChunkType chunk;
- chunk.setNS("TestDB.TestColl");
+ chunk.setNS(kNamespace);
chunk.setMin(BSON("a" << 1));
chunk.setMax(BSON("a" << 100));
chunk.setVersion({1, 2, oid});
@@ -1267,7 +1268,7 @@ TEST_F(ShardingCatalogClientTest, ApplyChunkOpsDeprecatedFailedWithCheck) {
<< "first precondition")
<< BSON("precondition2"
<< "second precondition"));
- std::string nss = "config.chunks";
+ const NamespaceString nss("config.chunks");
ChunkVersion lastChunkVersion(0, 0, OID());
auto future = launchAsync([this, updateOps, preCondition, nss, lastChunkVersion] {
@@ -1660,7 +1661,7 @@ TEST_F(ShardingCatalogClientTest, GetNewKeys) {
ASSERT_EQ("admin", request.dbname);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(KeysCollectionDocument::ConfigNS, nss.ns());
+ ASSERT_EQ(KeysCollectionDocument::ConfigNS, nss);
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
@@ -1668,7 +1669,7 @@ TEST_F(ShardingCatalogClientTest, GetNewKeys) {
fromjson("{purpose: 'none',"
"expiresAt: {$gt: {$timestamp: {t: 1234, i: 5678}}}}"));
- ASSERT_EQ(KeysCollectionDocument::ConfigNS, query->nss().ns());
+ ASSERT_EQ(KeysCollectionDocument::ConfigNS, query->nss());
ASSERT_BSONOBJ_EQ(expectedQuery, query->getFilter());
ASSERT_BSONOBJ_EQ(BSON("expiresAt" << 1), query->getSort());
ASSERT_FALSE(query->getLimit().is_initialized());
@@ -1713,7 +1714,7 @@ TEST_F(ShardingCatalogClientTest, GetNewKeysWithEmptyCollection) {
ASSERT_EQ("admin", request.dbname);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(KeysCollectionDocument::ConfigNS, nss.ns());
+ ASSERT_EQ(KeysCollectionDocument::ConfigNS, nss);
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
@@ -1721,7 +1722,7 @@ TEST_F(ShardingCatalogClientTest, GetNewKeysWithEmptyCollection) {
fromjson("{purpose: 'none',"
"expiresAt: {$gt: {$timestamp: {t: 1234, i: 5678}}}}"));
- ASSERT_EQ(KeysCollectionDocument::ConfigNS, query->nss().ns());
+ ASSERT_EQ(KeysCollectionDocument::ConfigNS, query->nss());
ASSERT_BSONOBJ_EQ(expectedQuery, query->getFilter());
ASSERT_BSONOBJ_EQ(BSON("expiresAt" << 1), query->getSort());
ASSERT_FALSE(query->getLimit().is_initialized());
diff --git a/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp b/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
index 94b913ec232..d32de61a18e 100644
--- a/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
@@ -85,7 +85,7 @@ TEST_F(InsertRetryTest, RetryOnInterruptedAndNetworkErrorSuccess) {
auto future = launchAsync([&] {
Status status =
catalogClient()->insertConfigDocument(operationContext(),
- kTestNamespace.ns(),
+ kTestNamespace,
objToInsert,
ShardingCatalogClient::kMajorityWriteConcern);
ASSERT_OK(status);
@@ -117,7 +117,7 @@ TEST_F(InsertRetryTest, RetryOnNetworkErrorFails) {
auto future = launchAsync([&] {
Status status =
catalogClient()->insertConfigDocument(operationContext(),
- kTestNamespace.ns(),
+ kTestNamespace,
objToInsert,
ShardingCatalogClient::kMajorityWriteConcern);
ASSERT_EQ(ErrorCodes::NetworkTimeout, status.code());
@@ -152,7 +152,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterNetworkErrorMatch) {
auto future = launchAsync([&] {
Status status =
catalogClient()->insertConfigDocument(operationContext(),
- kTestNamespace.ns(),
+ kTestNamespace,
objToInsert,
ShardingCatalogClient::kMajorityWriteConcern);
ASSERT_OK(status);
@@ -190,7 +190,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterNetworkErrorNotFound) {
auto future = launchAsync([&] {
Status status =
catalogClient()->insertConfigDocument(operationContext(),
- kTestNamespace.ns(),
+ kTestNamespace,
objToInsert,
ShardingCatalogClient::kMajorityWriteConcern);
ASSERT_EQ(ErrorCodes::DuplicateKey, status.code());
@@ -228,7 +228,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterNetworkErrorMismatch) {
auto future = launchAsync([&] {
Status status =
catalogClient()->insertConfigDocument(operationContext(),
- kTestNamespace.ns(),
+ kTestNamespace,
objToInsert,
ShardingCatalogClient::kMajorityWriteConcern);
ASSERT_EQ(ErrorCodes::DuplicateKey, status.code());
@@ -267,7 +267,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterWriteConcernFailureMatch) {
auto future = launchAsync([&] {
Status status =
catalogClient()->insertConfigDocument(operationContext(),
- kTestNamespace.ns(),
+ kTestNamespace,
objToInsert,
ShardingCatalogClient::kMajorityWriteConcern);
ASSERT_OK(status);
@@ -276,7 +276,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterWriteConcernFailureMatch) {
onCommand([&](const RemoteCommandRequest& request) {
const auto opMsgRequest = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
const auto insertOp = InsertOp::parse(opMsgRequest);
- ASSERT_EQUALS(kTestNamespace.ns(), insertOp.getNamespace().ns());
+ ASSERT_EQUALS(kTestNamespace, insertOp.getNamespace());
BatchedCommandResponse response;
response.setStatus(Status::OK());
@@ -324,7 +324,7 @@ TEST_F(UpdateRetryTest, Success) {
auto future = launchAsync([&] {
auto status =
catalogClient()->updateConfigDocument(operationContext(),
- kTestNamespace.ns(),
+ kTestNamespace,
objToUpdate,
updateExpr,
false,
@@ -335,7 +335,7 @@ TEST_F(UpdateRetryTest, Success) {
onCommand([&](const RemoteCommandRequest& request) {
const auto opMsgRequest = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
const auto updateOp = UpdateOp::parse(opMsgRequest);
- ASSERT_EQUALS(kTestNamespace.ns(), updateOp.getNamespace().ns());
+ ASSERT_EQUALS(kTestNamespace, updateOp.getNamespace());
BatchedCommandResponse response;
response.setStatus(Status::OK());
@@ -358,7 +358,7 @@ TEST_F(UpdateRetryTest, NotMasterErrorReturnedPersistently) {
auto future = launchAsync([&] {
auto status =
catalogClient()->updateConfigDocument(operationContext(),
- kTestNamespace.ns(),
+ kTestNamespace,
objToUpdate,
updateExpr,
false,
@@ -389,7 +389,7 @@ TEST_F(UpdateRetryTest, NotMasterReturnedFromTargeter) {
auto future = launchAsync([&] {
auto status =
catalogClient()->updateConfigDocument(operationContext(),
- kTestNamespace.ns(),
+ kTestNamespace,
objToUpdate,
updateExpr,
false,
@@ -420,7 +420,7 @@ TEST_F(UpdateRetryTest, NotMasterOnceSuccessAfterRetry) {
auto future = launchAsync([&] {
ASSERT_OK(
catalogClient()->updateConfigDocument(operationContext(),
- kTestNamespace.ns(),
+ kTestNamespace,
objToUpdate,
updateExpr,
false,
@@ -442,7 +442,7 @@ TEST_F(UpdateRetryTest, NotMasterOnceSuccessAfterRetry) {
onCommand([&](const RemoteCommandRequest& request) {
const auto opMsgRequest = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
const auto updateOp = UpdateOp::parse(opMsgRequest);
- ASSERT_EQUALS(kTestNamespace.ns(), updateOp.getNamespace().ns());
+ ASSERT_EQUALS(kTestNamespace, updateOp.getNamespace());
BatchedCommandResponse response;
response.setStatus(Status::OK());
@@ -465,7 +465,7 @@ TEST_F(UpdateRetryTest, OperationInterruptedDueToPrimaryStepDown) {
auto future = launchAsync([&] {
auto status =
catalogClient()->updateConfigDocument(operationContext(),
- kTestNamespace.ns(),
+ kTestNamespace,
objToUpdate,
updateExpr,
false,
@@ -476,7 +476,7 @@ TEST_F(UpdateRetryTest, OperationInterruptedDueToPrimaryStepDown) {
onCommand([&](const RemoteCommandRequest& request) {
const auto opMsgRequest = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
const auto updateOp = UpdateOp::parse(opMsgRequest);
- ASSERT_EQUALS(kTestNamespace.ns(), updateOp.getNamespace().ns());
+ ASSERT_EQUALS(kTestNamespace, updateOp.getNamespace());
BatchedCommandResponse response;
response.setStatus(Status::OK());
@@ -493,7 +493,7 @@ TEST_F(UpdateRetryTest, OperationInterruptedDueToPrimaryStepDown) {
onCommand([&](const RemoteCommandRequest& request) {
const auto opMsgRequest = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
const auto updateOp = UpdateOp::parse(opMsgRequest);
- ASSERT_EQUALS(kTestNamespace.ns(), updateOp.getNamespace().ns());
+ ASSERT_EQUALS(kTestNamespace, updateOp.getNamespace());
BatchedCommandResponse response;
response.setStatus(Status::OK());
@@ -516,7 +516,7 @@ TEST_F(UpdateRetryTest, WriteConcernFailure) {
auto future = launchAsync([&] {
auto status =
catalogClient()->updateConfigDocument(operationContext(),
- kTestNamespace.ns(),
+ kTestNamespace,
objToUpdate,
updateExpr,
false,
@@ -527,7 +527,7 @@ TEST_F(UpdateRetryTest, WriteConcernFailure) {
onCommand([&](const RemoteCommandRequest& request) {
const auto opMsgRequest = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
const auto updateOp = UpdateOp::parse(opMsgRequest);
- ASSERT_EQUALS(kTestNamespace.ns(), updateOp.getNamespace().ns());
+ ASSERT_EQUALS(kTestNamespace, updateOp.getNamespace());
BatchedCommandResponse response;
response.setStatus(Status::OK());
@@ -550,7 +550,7 @@ TEST_F(UpdateRetryTest, WriteConcernFailure) {
onCommand([&](const RemoteCommandRequest& request) {
const auto opMsgRequest = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
const auto updateOp = UpdateOp::parse(opMsgRequest);
- ASSERT_EQUALS(kTestNamespace.ns(), updateOp.getNamespace().ns());
+ ASSERT_EQUALS(kTestNamespace, updateOp.getNamespace());
BatchedCommandResponse response;
response.setStatus(Status::OK());
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index 456771f4aff..abec9536642 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -42,7 +42,7 @@
namespace mongo {
-const std::string ChunkType::ConfigNS = "config.chunks";
+const NamespaceString ChunkType::ConfigNS("config.chunks");
const std::string ChunkType::ShardNSPrefix = "config.cache.chunks.";
const BSONField<std::string> ChunkType::name("_id");
@@ -157,7 +157,7 @@ ChunkRange ChunkRange::unionWith(ChunkRange const& other) const {
ChunkType::ChunkType() = default;
ChunkType::ChunkType(NamespaceString nss, ChunkRange range, ChunkVersion version, ShardId shardId)
- : _ns(nss.ns()),
+ : _nss(nss),
_min(range.getMin()),
_max(range.getMax()),
_version(version),
@@ -171,7 +171,7 @@ StatusWith<ChunkType> ChunkType::fromConfigBSON(const BSONObj& source) {
Status status = bsonExtractStringField(source, ns.name(), &chunkNS);
if (!status.isOK())
return status;
- chunk._ns = chunkNS;
+ chunk._nss = NamespaceString(chunkNS);
}
{
@@ -217,10 +217,10 @@ StatusWith<ChunkType> ChunkType::fromConfigBSON(const BSONObj& source) {
BSONObj ChunkType::toConfigBSON() const {
BSONObjBuilder builder;
- if (_ns && _min)
+ if (_nss && _min)
builder.append(name.name(), getName());
- if (_ns)
- builder.append(ns.name(), getNS());
+ if (_nss)
+ builder.append(ns.name(), getNS().ns());
if (_min)
builder.append(min.name(), getMin());
if (_max)
@@ -295,14 +295,14 @@ BSONObj ChunkType::toShardBSON() const {
}
std::string ChunkType::getName() const {
- invariant(_ns);
+ invariant(_nss);
invariant(_min);
- return genID(*_ns, *_min);
+ return genID(*_nss, *_min);
}
-void ChunkType::setNS(const std::string& ns) {
- invariant(!ns.empty());
- _ns = ns;
+void ChunkType::setNS(const NamespaceString& nss) {
+ invariant(nss.isValid());
+ _nss = nss;
}
void ChunkType::setMin(const BSONObj& min) {
@@ -329,9 +329,9 @@ void ChunkType::setJumbo(bool jumbo) {
_jumbo = jumbo;
}
-std::string ChunkType::genID(StringData ns, const BSONObj& o) {
+std::string ChunkType::genID(const NamespaceString& nss, const BSONObj& o) {
StringBuilder buf;
- buf << ns << "-";
+ buf << nss.ns() << "-";
BSONObjIterator i(o);
while (i.more()) {
diff --git a/src/mongo/s/catalog/type_chunk.h b/src/mongo/s/catalog/type_chunk.h
index 2852955deb6..b4998af3c9a 100644
--- a/src/mongo/s/catalog/type_chunk.h
+++ b/src/mongo/s/catalog/type_chunk.h
@@ -147,7 +147,7 @@ private:
class ChunkType {
public:
// Name of the chunks collection in the config server.
- static const std::string ConfigNS;
+ static const NamespaceString ConfigNS;
// The shard chunks collections' common namespace prefix.
static const std::string ShardNSPrefix;
@@ -199,10 +199,10 @@ public:
/**
* Getters and setters.
*/
- const std::string& getNS() const {
- return _ns.get();
+ const NamespaceString& getNS() const {
+ return _nss.get();
}
- void setNS(const std::string& name);
+ void setNS(const NamespaceString& nss);
const BSONObj& getMin() const {
return _min.get();
@@ -239,7 +239,7 @@ public:
/**
* Generates chunk id based on the namespace name and the lower bound of the chunk.
*/
- static std::string genID(StringData ns, const BSONObj& min);
+ static std::string genID(const NamespaceString& nss, const BSONObj& min);
/**
* Returns OK if all the mandatory fields have been set. Otherwise returns NoSuchKey and
@@ -256,7 +256,7 @@ private:
// Convention: (M)andatory, (O)ptional, (S)pecial; (C)onfig, (S)hard.
// (O)(C) collection this chunk is in
- boost::optional<std::string> _ns;
+ boost::optional<NamespaceString> _nss;
// (M)(C)(S) first key of the range, inclusive
boost::optional<BSONObj> _min;
// (M)(C)(S) last key of the range, non-inclusive
diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp
index eabce7ed879..8aa5cd1b20f 100644
--- a/src/mongo/s/catalog/type_chunk_test.cpp
+++ b/src/mongo/s/catalog/type_chunk_test.cpp
@@ -197,7 +197,7 @@ TEST(ChunkType, ToFromConfigBSON) {
ASSERT_BSONOBJ_EQ(chunk.toConfigBSON(), obj);
- ASSERT_EQUALS(chunk.getNS(), "test.mycol");
+ ASSERT_EQUALS(chunk.getNS().ns(), "test.mycol");
ASSERT_BSONOBJ_EQ(chunk.getMin(), BSON("a" << 10));
ASSERT_BSONOBJ_EQ(chunk.getMax(), BSON("a" << 20));
ASSERT_EQUALS(chunk.getVersion().toLong(), chunkVersion.toLong());
@@ -221,7 +221,7 @@ TEST(ChunkType, Pre22Format) {
<< "shard0001")));
ASSERT_OK(chunk.validate());
- ASSERT_EQUALS(chunk.getNS(), "test.mycol");
+ ASSERT_EQUALS(chunk.getNS().ns(), "test.mycol");
ASSERT_BSONOBJ_EQ(chunk.getMin(), BSON("a" << 10));
ASSERT_BSONOBJ_EQ(chunk.getMax(), BSON("a" << 20));
ASSERT_EQUALS(chunk.getVersion().toLong(), 1ULL);
diff --git a/src/mongo/s/catalog/type_collection.cpp b/src/mongo/s/catalog/type_collection.cpp
index 6c9a0644327..79490a4494f 100644
--- a/src/mongo/s/catalog/type_collection.cpp
+++ b/src/mongo/s/catalog/type_collection.cpp
@@ -46,7 +46,7 @@ const auto kIsAssignedShardKey = "isAssignedShardKey"_sd;
} // namespace
-const std::string CollectionType::ConfigNS = "config.collections";
+const NamespaceString CollectionType::ConfigNS("config.collections");
const BSONField<std::string> CollectionType::fullNs("_id");
const BSONField<OID> CollectionType::epoch("lastmodEpoch");
diff --git a/src/mongo/s/catalog/type_collection.h b/src/mongo/s/catalog/type_collection.h
index 796b4f7707a..db7c7490a08 100644
--- a/src/mongo/s/catalog/type_collection.h
+++ b/src/mongo/s/catalog/type_collection.h
@@ -70,7 +70,7 @@ class StatusWith;
class CollectionType {
public:
// Name of the collections collection in the config server.
- static const std::string ConfigNS;
+ static const NamespaceString ConfigNS;
static const BSONField<std::string> fullNs;
static const BSONField<OID> epoch;
diff --git a/src/mongo/s/catalog/type_config_version.cpp b/src/mongo/s/catalog/type_config_version.cpp
index 68fa14829ad..7e9ce649527 100644
--- a/src/mongo/s/catalog/type_config_version.cpp
+++ b/src/mongo/s/catalog/type_config_version.cpp
@@ -38,7 +38,7 @@
namespace mongo {
-const std::string VersionType::ConfigNS = "config.version";
+const NamespaceString VersionType::ConfigNS("config.version");
const BSONField<int> VersionType::minCompatibleVersion("minCompatibleVersion");
const BSONField<int> VersionType::currentVersion("currentVersion");
diff --git a/src/mongo/s/catalog/type_config_version.h b/src/mongo/s/catalog/type_config_version.h
index e469ac57b7b..a412eaba571 100644
--- a/src/mongo/s/catalog/type_config_version.h
+++ b/src/mongo/s/catalog/type_config_version.h
@@ -33,6 +33,7 @@
#include <vector>
#include "mongo/db/jsobj.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/s/catalog/mongo_version_range.h"
namespace mongo {
@@ -45,7 +46,7 @@ namespace mongo {
class VersionType {
public:
// Name of the version collection in the config server.
- static const std::string ConfigNS;
+ static const NamespaceString ConfigNS;
// Field names and types in the version collection type.
static const BSONField<int> minCompatibleVersion;
diff --git a/src/mongo/s/catalog/type_database.cpp b/src/mongo/s/catalog/type_database.cpp
index 492077a9337..d3d807c89a1 100644
--- a/src/mongo/s/catalog/type_database.cpp
+++ b/src/mongo/s/catalog/type_database.cpp
@@ -41,7 +41,7 @@ namespace mongo {
using std::string;
-const std::string DatabaseType::ConfigNS = "config.databases";
+const NamespaceString DatabaseType::ConfigNS("config.databases");
const BSONField<std::string> DatabaseType::name("_id");
const BSONField<std::string> DatabaseType::primary("primary");
diff --git a/src/mongo/s/catalog/type_database.h b/src/mongo/s/catalog/type_database.h
index 9525263a151..8b2fcf85c7b 100644
--- a/src/mongo/s/catalog/type_database.h
+++ b/src/mongo/s/catalog/type_database.h
@@ -32,6 +32,7 @@
#include <string>
#include "mongo/db/jsobj.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/s/database_version_gen.h"
#include "mongo/s/shard_id.h"
@@ -51,7 +52,7 @@ class StatusWith;
class DatabaseType {
public:
// Name of the databases collection in the config server.
- static const std::string ConfigNS;
+ static const NamespaceString ConfigNS;
static const BSONField<std::string> name;
static const BSONField<std::string> primary;
diff --git a/src/mongo/s/catalog/type_lockpings.cpp b/src/mongo/s/catalog/type_lockpings.cpp
index 67fab8a20e2..0d2b041df79 100644
--- a/src/mongo/s/catalog/type_lockpings.cpp
+++ b/src/mongo/s/catalog/type_lockpings.cpp
@@ -35,7 +35,7 @@
#include "mongo/util/mongoutils/str.h"
namespace mongo {
-const std::string LockpingsType::ConfigNS = "config.lockpings";
+const NamespaceString LockpingsType::ConfigNS("config.lockpings");
const BSONField<std::string> LockpingsType::process("_id");
const BSONField<Date_t> LockpingsType::ping("ping");
diff --git a/src/mongo/s/catalog/type_lockpings.h b/src/mongo/s/catalog/type_lockpings.h
index 66556963705..52814f8d6c6 100644
--- a/src/mongo/s/catalog/type_lockpings.h
+++ b/src/mongo/s/catalog/type_lockpings.h
@@ -32,6 +32,7 @@
#include <string>
#include "mongo/db/jsobj.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/util/time_support.h"
namespace mongo {
@@ -44,7 +45,7 @@ namespace mongo {
class LockpingsType {
public:
// Name of the lockpings collection in the config server.
- static const std::string ConfigNS;
+ static const NamespaceString ConfigNS;
// Field names and types in the lockpings collection type.
static const BSONField<std::string> process;
diff --git a/src/mongo/s/catalog/type_locks.cpp b/src/mongo/s/catalog/type_locks.cpp
index 9724ac92519..59e4aa02881 100644
--- a/src/mongo/s/catalog/type_locks.cpp
+++ b/src/mongo/s/catalog/type_locks.cpp
@@ -38,7 +38,7 @@
#include "mongo/util/mongoutils/str.h"
namespace mongo {
-const std::string LocksType::ConfigNS = "config.locks";
+const NamespaceString LocksType::ConfigNS("config.locks");
const BSONField<std::string> LocksType::name("_id");
const BSONField<LocksType::State> LocksType::state("state");
diff --git a/src/mongo/s/catalog/type_locks.h b/src/mongo/s/catalog/type_locks.h
index 9df7998d35d..f2d30afd349 100644
--- a/src/mongo/s/catalog/type_locks.h
+++ b/src/mongo/s/catalog/type_locks.h
@@ -32,6 +32,7 @@
#include <string>
#include "mongo/db/jsobj.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/util/time_support.h"
namespace mongo {
@@ -51,7 +52,7 @@ public:
};
// Name of the locks collection in the config server.
- static const std::string ConfigNS;
+ static const NamespaceString ConfigNS;
// Field names and types in the locks collection type.
static const BSONField<std::string> name;
diff --git a/src/mongo/s/catalog/type_mongos.cpp b/src/mongo/s/catalog/type_mongos.cpp
index c656f59b4ee..3cbee67603f 100644
--- a/src/mongo/s/catalog/type_mongos.cpp
+++ b/src/mongo/s/catalog/type_mongos.cpp
@@ -35,7 +35,7 @@
#include "mongo/util/mongoutils/str.h"
namespace mongo {
-const std::string MongosType::ConfigNS = "config.mongos";
+const NamespaceString MongosType::ConfigNS("config.mongos");
const BSONField<std::string> MongosType::name("_id");
const BSONField<Date_t> MongosType::ping("ping");
diff --git a/src/mongo/s/catalog/type_mongos.h b/src/mongo/s/catalog/type_mongos.h
index 02e7fea601b..d48141ac156 100644
--- a/src/mongo/s/catalog/type_mongos.h
+++ b/src/mongo/s/catalog/type_mongos.h
@@ -32,6 +32,7 @@
#include <string>
#include "mongo/db/jsobj.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/util/time_support.h"
namespace mongo {
@@ -44,7 +45,7 @@ namespace mongo {
class MongosType {
public:
// Name of the mongos collection in the config server.
- static const std::string ConfigNS;
+ static const NamespaceString ConfigNS;
// Field names and types in the mongos collection type.
static const BSONField<std::string> name;
diff --git a/src/mongo/s/catalog/type_shard.cpp b/src/mongo/s/catalog/type_shard.cpp
index 0b97a3d3cd6..56d432cf43e 100644
--- a/src/mongo/s/catalog/type_shard.cpp
+++ b/src/mongo/s/catalog/type_shard.cpp
@@ -40,7 +40,7 @@
namespace mongo {
-const std::string ShardType::ConfigNS = "config.shards";
+const NamespaceString ShardType::ConfigNS("config.shards");
const BSONField<std::string> ShardType::name("_id");
const BSONField<std::string> ShardType::host("host");
diff --git a/src/mongo/s/catalog/type_shard.h b/src/mongo/s/catalog/type_shard.h
index 5e3ca3d8989..29fe7922043 100644
--- a/src/mongo/s/catalog/type_shard.h
+++ b/src/mongo/s/catalog/type_shard.h
@@ -33,6 +33,7 @@
#include <vector>
#include "mongo/db/jsobj.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/s/shard_id.h"
namespace mongo {
@@ -56,7 +57,7 @@ public:
};
// Name of the shards collection in the config server.
- static const std::string ConfigNS;
+ static const NamespaceString ConfigNS;
// Field names and types in the shards collection type.
static const BSONField<std::string> name;
diff --git a/src/mongo/s/catalog/type_shard_collection.cpp b/src/mongo/s/catalog/type_shard_collection.cpp
index 195606b0878..7343238a1b1 100644
--- a/src/mongo/s/catalog/type_shard_collection.cpp
+++ b/src/mongo/s/catalog/type_shard_collection.cpp
@@ -39,8 +39,8 @@
namespace mongo {
-const std::string ShardCollectionType::ConfigNS =
- NamespaceString::kShardConfigCollectionsCollectionName.toString();
+const NamespaceString ShardCollectionType::ConfigNS(
+ NamespaceString::kShardConfigCollectionsCollectionName);
const BSONField<std::string> ShardCollectionType::ns("_id");
const BSONField<UUID> ShardCollectionType::uuid("uuid");
diff --git a/src/mongo/s/catalog/type_shard_collection.h b/src/mongo/s/catalog/type_shard_collection.h
index 3f1da52eb9c..0df48ffe59b 100644
--- a/src/mongo/s/catalog/type_shard_collection.h
+++ b/src/mongo/s/catalog/type_shard_collection.h
@@ -71,7 +71,7 @@ class StatusWith;
class ShardCollectionType {
public:
// Name of the collections collection on the shard server.
- static const std::string ConfigNS;
+ static const NamespaceString ConfigNS;
static const BSONField<std::string> ns; // "_id"
static const BSONField<UUID> uuid;
diff --git a/src/mongo/s/catalog/type_tags.cpp b/src/mongo/s/catalog/type_tags.cpp
index b0c72053cca..5701633a273 100644
--- a/src/mongo/s/catalog/type_tags.cpp
+++ b/src/mongo/s/catalog/type_tags.cpp
@@ -42,7 +42,7 @@ namespace mongo {
using std::string;
-const std::string TagsType::ConfigNS = "config.tags";
+const NamespaceString TagsType::ConfigNS("config.tags");
const BSONField<std::string> TagsType::ns("ns");
const BSONField<std::string> TagsType::tag("tag");
@@ -60,7 +60,7 @@ StatusWith<TagsType> TagsType::fromBSON(const BSONObj& source) {
return status;
}
- tags._ns = tagsNs;
+ tags._ns = NamespaceString(tagsNs);
}
{
@@ -87,7 +87,7 @@ StatusWith<TagsType> TagsType::fromBSON(const BSONObj& source) {
}
Status TagsType::validate() const {
- if (!_ns.is_initialized() || _ns->empty()) {
+ if (!_ns.is_initialized() || !_ns->isValid()) {
return Status(ErrorCodes::NoSuchKey, str::stream() << "missing " << ns.name() << " field");
}
@@ -130,7 +130,7 @@ BSONObj TagsType::toBSON() const {
BSONObjBuilder builder;
if (_ns)
- builder.append(ns.name(), getNS());
+ builder.append(ns.name(), getNS().ns());
if (_tag)
builder.append(tag.name(), getTag());
if (_minKey)
@@ -145,8 +145,8 @@ std::string TagsType::toString() const {
return toBSON().toString();
}
-void TagsType::setNS(const std::string& ns) {
- invariant(!ns.empty());
+void TagsType::setNS(const NamespaceString& ns) {
+ invariant(ns.isValid());
_ns = ns;
}
diff --git a/src/mongo/s/catalog/type_tags.h b/src/mongo/s/catalog/type_tags.h
index 8549af8fadd..b205e2d5646 100644
--- a/src/mongo/s/catalog/type_tags.h
+++ b/src/mongo/s/catalog/type_tags.h
@@ -32,6 +32,7 @@
#include <string>
#include "mongo/db/jsobj.h"
+#include "mongo/db/namespace_string.h"
namespace mongo {
@@ -49,7 +50,7 @@ class StatusWith;
class TagsType {
public:
// Name of the tags collection in the config server.
- static const std::string ConfigNS;
+ static const NamespaceString ConfigNS;
// Field names and types in the tags collection type.
static const BSONField<std::string> ns;
@@ -80,10 +81,10 @@ public:
*/
std::string toString() const;
- const std::string& getNS() const {
+ const NamespaceString& getNS() const {
return _ns.get();
}
- void setNS(const std::string& ns);
+ void setNS(const NamespaceString& ns);
const std::string& getTag() const {
return _tag.get();
@@ -102,7 +103,7 @@ public:
private:
// Required namespace to which this tag belongs
- boost::optional<std::string> _ns;
+ boost::optional<NamespaceString> _ns;
// Required tag name
boost::optional<std::string> _tag;
diff --git a/src/mongo/s/catalog/type_tags_test.cpp b/src/mongo/s/catalog/type_tags_test.cpp
index 16a7c6dc2ee..e3529097730 100644
--- a/src/mongo/s/catalog/type_tags_test.cpp
+++ b/src/mongo/s/catalog/type_tags_test.cpp
@@ -50,7 +50,7 @@ TEST(TagsType, Valid) {
TagsType tag = status.getValue();
- ASSERT_EQUALS(tag.getNS(), "test.mycol");
+ ASSERT_EQUALS(tag.getNS().ns(), "test.mycol");
ASSERT_EQUALS(tag.getTag(), "tag");
ASSERT_BSONOBJ_EQ(tag.getMinKey(), BSON("a" << 10));
ASSERT_BSONOBJ_EQ(tag.getMaxKey(), BSON("a" << 20));
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index 1f970c5dca6..bf072e10df4 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -241,7 +241,7 @@ void CatalogCache::onStaleConfigError(CachedCollectionRoutingInfo&& ccriToInvali
// Here we received a stale config error for a collection which we previously though was sharded
stdx::lock_guard<stdx::mutex> lg(_mutex);
- auto it = _databases.find(NamespaceString(ccri._cm->getns()).db());
+ auto it = _databases.find(ccri._cm->getns().db());
if (it == _databases.end()) {
// If the database does not exist, the collection must have been dropped so there is
// nothing to invalidate. The getCollectionRoutingInfo will handle the reload of the
@@ -251,7 +251,7 @@ void CatalogCache::onStaleConfigError(CachedCollectionRoutingInfo&& ccriToInvali
auto& collections = it->second->collections;
- auto itColl = collections.find(ccri._cm->getns());
+ auto itColl = collections.find(ccri._cm->getns().ns());
if (itColl == collections.end()) {
// If the collection does not exist, this means it must have been dropped since the last
// time we retrieved a cache entry for it. Doing nothing in this case will cause the
diff --git a/src/mongo/s/chunk_manager.h b/src/mongo/s/chunk_manager.h
index ed7a34d5468..dc04c1b8548 100644
--- a/src/mongo/s/chunk_manager.h
+++ b/src/mongo/s/chunk_manager.h
@@ -139,8 +139,8 @@ public:
return _sequenceNumber;
}
- const std::string& getns() const {
- return _nss.ns();
+ const NamespaceString& getns() const {
+ return _nss;
}
const ShardKeyPattern& getShardKeyPattern() const {
diff --git a/src/mongo/s/client/parallel.cpp b/src/mongo/s/client/parallel.cpp
index 7c56b69c053..0c35d6f5c68 100644
--- a/src/mongo/s/client/parallel.cpp
+++ b/src/mongo/s/client/parallel.cpp
@@ -445,7 +445,7 @@ void ParallelSortClusteredCursor::startInit(OperationContext* opCtx) {
if (manager) {
if (MONGO_unlikely(shouldLog(pc))) {
- vinfo = str::stream() << "[" << manager->getns() << " @ "
+ vinfo = str::stream() << "[" << manager->getns().ns() << " @ "
<< manager->getVersion().toString() << "]";
}
@@ -1303,7 +1303,7 @@ BSONObj ParallelConnectionState::toBSON() const {
BSONObj stateObj =
BSON("conn" << (conn ? (conn->ok() ? conn->conn().toString() : "(done)") : "") << "vinfo"
- << (manager ? (str::stream() << manager->getns() << " @ "
+ << (manager ? (str::stream() << manager->getns().ns() << " @ "
<< manager->getVersion().toString())
: primary->toString()));
diff --git a/src/mongo/s/client/shard_connection.cpp b/src/mongo/s/client/shard_connection.cpp
index fcb033b1025..c977467dbb0 100644
--- a/src/mongo/s/client/shard_connection.cpp
+++ b/src/mongo/s/client/shard_connection.cpp
@@ -406,7 +406,7 @@ ShardConnection::ShardConnection(const ConnectionString& connectionString,
// Make sure we specified a manager for the correct namespace
if (_ns.size() && _manager) {
- invariant(_manager->getns() == _ns);
+ invariant(_manager->getns().ns() == _ns);
}
auto csString = _cs.toString();
diff --git a/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp b/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp
index 419ca7e2a0e..ca9f56b0ab7 100644
--- a/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp
+++ b/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp
@@ -89,8 +89,7 @@ public:
const std::string& dbname,
const BSONObj& cmdObj) override {
if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(ShardType::ConfigNS)),
- ActionType::update)) {
+ ResourcePattern::forExactNamespace(ShardType::ConfigNS), ActionType::update)) {
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
return Status::OK();
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index 5e58c1c0e68..9eed7db5fa2 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -476,7 +476,7 @@ public:
} else {
// Collection is already sharded; read the collection's UUID from the config server.
const auto coll =
- uassertStatusOK(catalogClient->getCollection(opCtx, outputCollNss.ns())).value;
+ uassertStatusOK(catalogClient->getCollection(opCtx, outputCollNss)).value;
shardedOutputCollUUID = coll.getUUID();
}
diff --git a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
index e251376c920..88549b9f70a 100644
--- a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
@@ -182,7 +182,7 @@ public:
uassertStatusOK(MigrationSecondaryThrottleOptions::createFromCommand(cmdObj));
ChunkType chunkType;
- chunkType.setNS(nss.ns());
+ chunkType.setNS(nss);
chunkType.setMin(chunk->getMin());
chunkType.setMax(chunk->getMax());
chunkType.setShard(chunk->getShardId());
diff --git a/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp b/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp
index bc35d3c573c..6ba4dc36278 100644
--- a/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp
+++ b/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp
@@ -89,14 +89,12 @@ public:
const std::string& dbname,
const BSONObj& cmdObj) override {
if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(ShardType::ConfigNS)),
- ActionType::update)) {
+ ResourcePattern::forExactNamespace(ShardType::ConfigNS), ActionType::update)) {
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(TagsType::ConfigNS)),
- ActionType::find)) {
+ ResourcePattern::forExactNamespace(TagsType::ConfigNS), ActionType::find)) {
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
diff --git a/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp b/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp
index f286abc6a6b..b573d937ddd 100644
--- a/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp
+++ b/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp
@@ -92,26 +92,22 @@ public:
const std::string& dbname,
const BSONObj& cmdObj) override {
if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(ShardType::ConfigNS)),
- ActionType::find)) {
+ ResourcePattern::forExactNamespace(ShardType::ConfigNS), ActionType::find)) {
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(TagsType::ConfigNS)),
- ActionType::find)) {
+ ResourcePattern::forExactNamespace(TagsType::ConfigNS), ActionType::find)) {
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(TagsType::ConfigNS)),
- ActionType::update)) {
+ ResourcePattern::forExactNamespace(TagsType::ConfigNS), ActionType::update)) {
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(TagsType::ConfigNS)),
- ActionType::remove)) {
+ ResourcePattern::forExactNamespace(TagsType::ConfigNS), ActionType::remove)) {
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
diff --git a/src/mongo/s/commands/cluster_write.cpp b/src/mongo/s/commands/cluster_write.cpp
index 14a0482a268..012ec0b17d8 100644
--- a/src/mongo/s/commands/cluster_write.cpp
+++ b/src/mongo/s/commands/cluster_write.cpp
@@ -258,7 +258,7 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* opCtx,
return;
}
- const NamespaceString nss(manager->getns());
+ const NamespaceString& nss = manager->getns();
if (!manager->_autoSplitThrottle._splitTickets.tryAcquire()) {
LOG(1) << "won't auto split because not enough tickets: " << nss;
@@ -390,7 +390,7 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* opCtx,
suggestedMigrateChunk->getMin());
ChunkType chunkToMove;
- chunkToMove.setNS(nss.ns());
+ chunkToMove.setNS(nss);
chunkToMove.setShard(suggestedChunk->getShardId());
chunkToMove.setMin(suggestedChunk->getMin());
chunkToMove.setMax(suggestedChunk->getMax());
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index c5e0ecb358d..1ce1611ff25 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -458,7 +458,7 @@ public:
Strategy::commandOp(opCtx,
dbName,
CommandHelpers::filterCommandRequestForPassthrough(cmdObj),
- cm->getns(),
+ cm->getns().ns(),
query,
CollationSpec::kSimpleSpec,
&results);
diff --git a/src/mongo/s/config_server_catalog_cache_loader.cpp b/src/mongo/s/config_server_catalog_cache_loader.cpp
index 5f46e72fb20..49bbe011939 100644
--- a/src/mongo/s/config_server_catalog_cache_loader.cpp
+++ b/src/mongo/s/config_server_catalog_cache_loader.cpp
@@ -93,7 +93,7 @@ CollectionAndChangedChunks getChangedChunks(OperationContext* opCtx,
const auto catalogClient = Grid::get(opCtx)->catalogClient();
// Decide whether to do a full or partial load based on the state of the collection
- const auto coll = uassertStatusOK(catalogClient->getCollection(opCtx, nss.ns())).value;
+ const auto coll = uassertStatusOK(catalogClient->getCollection(opCtx, nss)).value;
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Collection " << nss.ns() << " is dropped.",
!coll.getDropped());
diff --git a/src/mongo/s/config_server_test_fixture.cpp b/src/mongo/s/config_server_test_fixture.cpp
index d6260b1984d..0ab892018bb 100644
--- a/src/mongo/s/config_server_test_fixture.cpp
+++ b/src/mongo/s/config_server_test_fixture.cpp
@@ -300,8 +300,8 @@ Status ConfigServerTestFixture::setupShards(const std::vector<ShardType>& shards
StatusWith<ShardType> ConfigServerTestFixture::getShardDoc(OperationContext* opCtx,
const std::string& shardId) {
- auto doc = findOneOnConfigCollection(
- opCtx, NamespaceString(ShardType::ConfigNS), BSON(ShardType::name(shardId)));
+ auto doc =
+ findOneOnConfigCollection(opCtx, ShardType::ConfigNS, BSON(ShardType::name(shardId)));
if (!doc.isOK()) {
if (doc.getStatus() == ErrorCodes::NoMatchingDocument) {
return {ErrorCodes::ShardNotFound,
@@ -327,8 +327,8 @@ Status ConfigServerTestFixture::setupChunks(const std::vector<ChunkType>& chunks
StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc(OperationContext* opCtx,
const BSONObj& minKey) {
- auto doc = findOneOnConfigCollection(
- opCtx, NamespaceString(ChunkType::ConfigNS), BSON(ChunkType::min() << minKey));
+ auto doc =
+ findOneOnConfigCollection(opCtx, ChunkType::ConfigNS, BSON(ChunkType::min() << minKey));
if (!doc.isOK())
return doc.getStatus();
@@ -374,14 +374,13 @@ StatusWith<std::vector<BSONObj>> ConfigServerTestFixture::getIndexes(OperationCo
std::vector<KeysCollectionDocument> ConfigServerTestFixture::getKeys(OperationContext* opCtx) {
auto config = getConfigShard();
- auto findStatus =
- config->exhaustiveFindOnConfig(opCtx,
- kReadPref,
- repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(KeysCollectionDocument::ConfigNS),
- BSONObj(),
- BSON("expiresAt" << 1),
- boost::none);
+ auto findStatus = config->exhaustiveFindOnConfig(opCtx,
+ kReadPref,
+ repl::ReadConcernLevel::kMajorityReadConcern,
+ KeysCollectionDocument::ConfigNS,
+ BSONObj(),
+ BSON("expiresAt" << 1),
+ boost::none);
ASSERT_OK(findStatus.getStatus());
std::vector<KeysCollectionDocument> keys;
diff --git a/src/mongo/s/request_types/balance_chunk_request_test.cpp b/src/mongo/s/request_types/balance_chunk_request_test.cpp
index 97af5a292bf..4697853109f 100644
--- a/src/mongo/s/request_types/balance_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/balance_chunk_request_test.cpp
@@ -57,7 +57,7 @@ TEST(BalanceChunkRequest, ParseFromConfigCommandNoSecondaryThrottle) {
<< "lastmodEpoch"
<< version.epoch())));
const auto& chunk = request.getChunk();
- ASSERT_EQ("TestDB.TestColl", chunk.getNS());
+ ASSERT_EQ("TestDB.TestColl", chunk.getNS().ns());
ASSERT_BSONOBJ_EQ(BSON("a" << -100LL), chunk.getMin());
ASSERT_BSONOBJ_EQ(BSON("a" << 100LL), chunk.getMax());
ASSERT_EQ(ShardId("TestShard0000"), chunk.getShard());
@@ -87,7 +87,7 @@ TEST(BalanceChunkRequest, ParseFromConfigCommandWithSecondaryThrottle) {
<< BSON("_secondaryThrottle" << true << "writeConcern"
<< BSON("w" << 2)))));
const auto& chunk = request.getChunk();
- ASSERT_EQ("TestDB.TestColl", chunk.getNS());
+ ASSERT_EQ("TestDB.TestColl", chunk.getNS().ns());
ASSERT_BSONOBJ_EQ(BSON("a" << -100LL), chunk.getMin());
ASSERT_BSONOBJ_EQ(BSON("a" << 100LL), chunk.getMax());
ASSERT_EQ(ShardId("TestShard0000"), chunk.getShard());
diff --git a/src/mongo/s/sharding_test_fixture.cpp b/src/mongo/s/sharding_test_fixture.cpp
index 8b053a7426f..235054b36c1 100644
--- a/src/mongo/s/sharding_test_fixture.cpp
+++ b/src/mongo/s/sharding_test_fixture.cpp
@@ -302,13 +302,13 @@ void ShardingTestFixture::setupShards(const std::vector<ShardType>& shards) {
void ShardingTestFixture::expectGetShards(const std::vector<ShardType>& shards) {
onFindCommand([this, &shards](const RemoteCommandRequest& request) {
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss.toString(), ShardType::ConfigNS);
+ ASSERT_EQ(nss, ShardType::ConfigNS);
auto queryResult = QueryRequest::makeFromFindCommand(nss, request.cmdObj, false);
ASSERT_OK(queryResult.getStatus());
const auto& query = queryResult.getValue();
- ASSERT_EQ(query->nss().ns(), ShardType::ConfigNS);
+ ASSERT_EQ(query->nss(), ShardType::ConfigNS);
ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj());
ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
@@ -334,7 +334,7 @@ void ShardingTestFixture::expectInserts(const NamespaceString& nss,
const auto opMsgRequest = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
const auto insertOp = InsertOp::parse(opMsgRequest);
- ASSERT_EQUALS(nss.ns(), insertOp.getNamespace().ns());
+ ASSERT_EQUALS(nss, insertOp.getNamespace());
const auto& inserted = insertOp.getDocuments();
ASSERT_EQUALS(expected.size(), inserted.size());
@@ -451,7 +451,7 @@ void ShardingTestFixture::expectUpdateCollection(const HostAndPort& expectedHost
const auto opMsgRequest = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
const auto updateOp = UpdateOp::parse(opMsgRequest);
- ASSERT_EQUALS(CollectionType::ConfigNS, updateOp.getNamespace().ns());
+ ASSERT_EQUALS(CollectionType::ConfigNS, updateOp.getNamespace());
const auto& updates = updateOp.getUpdates();
ASSERT_EQUALS(1U, updates.size());
@@ -485,7 +485,7 @@ void ShardingTestFixture::expectSetShardVersion(const HostAndPort& expectedHost,
ASSERT(!ssv.isInit());
ASSERT(ssv.isAuthoritative());
ASSERT_EQ(expectedShard.getHost(), ssv.getShardConnectionString().toString());
- ASSERT_EQ(expectedNs.toString(), ssv.getNS().ns());
+ ASSERT_EQ(expectedNs, ssv.getNS());
ASSERT_EQ(expectedChunkVersion.toString(), ssv.getNSVersion().toString());
return BSON("ok" << true);