diff options
author | Silvia Surroca <silvia.surroca@mongodb.com> | 2022-06-03 11:58:55 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2022-06-03 12:36:42 +0000 |
commit | cc541819baf3d798f8f0da1edf6f9beda10961a7 (patch) | |
tree | 95ec3ce26711e0749796795d0dfd78cf279fd108 /src/mongo/db/s | |
parent | 9aafbc13112e03e9aa6889b0fb754adaac8d1e84 (diff) | |
download | mongo-cc541819baf3d798f8f0da1edf6f9beda10961a7.tar.gz |
SERVER-66431 Replace all usages of ShardType::ConfigNS by kConfigsvrShardsNamespace
Diffstat (limited to 'src/mongo/db/s')
14 files changed, 69 insertions, 52 deletions
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp index 8e19a6087e6..bf22d67619e 100644 --- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp +++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp @@ -122,10 +122,14 @@ protected: TEST_F(BalancerChunkSelectionTest, TagRangesOverlap) { // Set up two shards in the metadata. - ASSERT_OK(catalogClient()->insertConfigDocument( - operationContext(), ShardType::ConfigNS, kShard0, kMajorityWriteConcern)); - ASSERT_OK(catalogClient()->insertConfigDocument( - operationContext(), ShardType::ConfigNS, kShard1, kMajorityWriteConcern)); + ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), + NamespaceString::kConfigsvrShardsNamespace, + kShard0, + kMajorityWriteConcern)); + ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), + NamespaceString::kConfigsvrShardsNamespace, + kShard1, + kMajorityWriteConcern)); // Set up a database and a sharded collection in the metadata. const auto collUUID = UUID::gen(); @@ -178,11 +182,11 @@ TEST_F(BalancerChunkSelectionTest, TagRangeMaxNotAlignedWithChunkMax) { "featureFlagBalanceAccordingToDataSize", false}; // Set up two shards in the metadata. ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, appendTags(kShard0, {"A"}), kMajorityWriteConcern)); ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, appendTags(kShard1, {"A"}), kMajorityWriteConcern)); @@ -237,11 +241,11 @@ TEST_F(BalancerChunkSelectionTest, TagRangeMaxNotAlignedWithChunkMax) { TEST_F(BalancerChunkSelectionTest, ShardedTimeseriesCollectionsCanBeAutoSplitted) { // Set up two shards in the metadata, each one with its own tag ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, appendTags(kShard0, {"A"}), kMajorityWriteConcern)); ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, appendTags(kShard1, {"B"}), kMajorityWriteConcern)); @@ -287,10 +291,14 @@ TEST_F(BalancerChunkSelectionTest, ShardedTimeseriesCollectionsCanBeBalanced) { RAIIServerParameterControllerForTest featureFlagBalanceAccordingToDataSize{ "featureFlagBalanceAccordingToDataSize", false}; // Set up two shards in the metadata. - ASSERT_OK(catalogClient()->insertConfigDocument( - operationContext(), ShardType::ConfigNS, kShard0, kMajorityWriteConcern)); - ASSERT_OK(catalogClient()->insertConfigDocument( - operationContext(), ShardType::ConfigNS, kShard1, kMajorityWriteConcern)); + ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), + NamespaceString::kConfigsvrShardsNamespace, + kShard0, + kMajorityWriteConcern)); + ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), + NamespaceString::kConfigsvrShardsNamespace, + kShard1, + kMajorityWriteConcern)); // Set up a database and a sharded collection in the metadata. const auto collUUID = UUID::gen(); diff --git a/src/mongo/db/s/config/config_server_test_fixture.cpp b/src/mongo/db/s/config/config_server_test_fixture.cpp index 05cb045f5ae..198371cf17a 100644 --- a/src/mongo/db/s/config/config_server_test_fixture.cpp +++ b/src/mongo/db/s/config/config_server_test_fixture.cpp @@ -292,7 +292,7 @@ StatusWith<BSONObj> ConfigServerTestFixture::findOneOnConfigCollection(Operation } void ConfigServerTestFixture::setupShards(const std::vector<ShardType>& shards) { - const NamespaceString shardNS(ShardType::ConfigNS); + const NamespaceString shardNS(NamespaceString::kConfigsvrShardsNamespace); for (const auto& shard : shards) { ASSERT_OK(insertToConfigCollection(operationContext(), shardNS, shard.toBSON())); } @@ -300,8 +300,8 @@ void ConfigServerTestFixture::setupShards(const std::vector<ShardType>& shards) StatusWith<ShardType> ConfigServerTestFixture::getShardDoc(OperationContext* opCtx, const std::string& shardId) { - auto doc = - findOneOnConfigCollection(opCtx, ShardType::ConfigNS, BSON(ShardType::name(shardId))); + auto doc = findOneOnConfigCollection( + opCtx, NamespaceString::kConfigsvrShardsNamespace, BSON(ShardType::name(shardId))); if (!doc.isOK()) { if (doc.getStatus() == ErrorCodes::NoMatchingDocument) { return {ErrorCodes::ShardNotFound, @@ -323,8 +323,8 @@ void ConfigServerTestFixture::setupCollection(const NamespaceString& nss, if (!dbDoc.isOK()) { // If the database is not setup, choose the first available shard as primary to implicitly // create the db - auto swShardDoc = - findOneOnConfigCollection(operationContext(), ShardType::ConfigNS, BSONObj()); + auto swShardDoc = findOneOnConfigCollection( + operationContext(), NamespaceString::kConfigsvrShardsNamespace, BSONObj()); invariant(swShardDoc.isOK(), "At least one shard should be setup when initializing a collection"); auto shard = uassertStatusOK(ShardType::fromBSON(swShardDoc.getValue())); diff --git a/src/mongo/db/s/config/index_on_config_test.cpp b/src/mongo/db/s/config/index_on_config_test.cpp index d6e3b5936b1..38f36d98fc2 100644 --- a/src/mongo/db/s/config/index_on_config_test.cpp +++ b/src/mongo/db/s/config/index_on_config_test.cpp @@ -63,8 +63,10 @@ protected: }; TEST_F(ConfigIndexTest, CompatibleIndexAlreadyExists) { - createIndexOnConfigCollection( - operationContext(), ShardType::ConfigNS, BSON("host" << 1), /*unique*/ true) + createIndexOnConfigCollection(operationContext(), + NamespaceString::kConfigsvrShardsNamespace, + BSON("host" << 1), + /*unique*/ true) .transitional_ignore(); ASSERT_OK(ShardingCatalogManager::get(operationContext()) @@ -77,15 +79,18 @@ TEST_F(ConfigIndexTest, CompatibleIndexAlreadyExists) { << "host_1")}; - auto foundShardsIndexes = assertGet(getIndexes(operationContext(), ShardType::ConfigNS)); + auto foundShardsIndexes = + assertGet(getIndexes(operationContext(), NamespaceString::kConfigsvrShardsNamespace)); assertBSONObjsSame(expectedShardsIndexes, foundShardsIndexes); } TEST_F(ConfigIndexTest, IncompatibleIndexAlreadyExists) { // Make the index non-unique even though its supposed to be unique, make sure initialization // fails - createIndexOnConfigCollection( - operationContext(), ShardType::ConfigNS, BSON("host" << 1), /*unique*/ false) + createIndexOnConfigCollection(operationContext(), + NamespaceString::kConfigsvrShardsNamespace, + BSON("host" << 1), + /*unique*/ false) .transitional_ignore(); ASSERT_EQUALS(ErrorCodes::IndexKeySpecsConflict, diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp index 6847ade89ef..1be2dd486fb 100644 --- a/src/mongo/db/s/config/initial_split_policy.cpp +++ b/src/mongo/db/s/config/initial_split_policy.cpp @@ -138,7 +138,7 @@ StringMap<std::vector<ShardId>> buildTagsToShardIdsMap(OperationContext* opCtx, configServer->exhaustiveFindOnConfig(opCtx, ReadPreferenceSetting(ReadPreference::Nearest), repl::ReadConcernLevel::kMajorityReadConcern, - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, BSONObj(), BSONObj(), boost::none)); diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp index baeda1f643f..86b55b03739 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp @@ -436,7 +436,7 @@ Status ShardingCatalogManager::_initConfigIndexes(OperationContext* opCtx) { } result = createIndexOnConfigCollection( - opCtx, ShardType::ConfigNS, BSON(ShardType::host() << 1), unique); + opCtx, NamespaceString::kConfigsvrShardsNamespace, BSON(ShardType::host() << 1), unique); if (!result.isOK()) { return result.withContext("couldn't create host_1 index on config db"); } @@ -548,7 +548,7 @@ StatusWith<bool> ShardingCatalogManager::_isShardRequiredByZoneStillInUse( configShard->exhaustiveFindOnConfig(opCtx, readPref, repl::ReadConcernLevel::kLocalReadConcern, - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, BSON(ShardType::tags() << zoneName), BSONObj(), 2); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp index 8d187d683f5..0a45a9d3a6d 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp @@ -190,7 +190,7 @@ protected: operationContext(), ReadPreferenceSetting(ReadPreference::PrimaryOnly), repl::ReadConcernLevel::kLocalReadConcern, - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, BSONObj(), BSONObj(), boost::none)); @@ -543,7 +543,7 @@ TEST_F(AddShardTest, StandaloneGenerateName) { // Add a pre-existing shard so when generating a name for the new shard it will have to go // higher than the existing one. ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, existingShard.toBSON(), ShardingCatalogClient::kMajorityWriteConcern)); assertShardExists(existingShard); @@ -1209,7 +1209,7 @@ TEST_F(AddShardTest, AddExistingShardStandalone) { // Make sure the shard already exists. ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, existingShard.toBSON(), ShardingCatalogClient::kMajorityWriteConcern)); assertShardExists(existingShard); @@ -1318,7 +1318,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) { // Make sure the shard already exists. ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, existingShard.toBSON(), ShardingCatalogClient::kMajorityWriteConcern)); assertShardExists(existingShard); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp index 4093d840d87..297a4aa04d8 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp @@ -596,7 +596,8 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, shard.setHost("b:1234"); shard.setTags({"y"}); - ASSERT_OK(insertToConfigCollection(operationContext(), ShardType::ConfigNS, shard.toBSON())); + ASSERT_OK(insertToConfigCollection( + operationContext(), NamespaceString::kConfigsvrShardsNamespace, shard.toBSON())); ASSERT_THROWS_CODE( ShardingCatalogManager::get(operationContext()) diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp index 067ce787d2a..0cf64cc5288 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp @@ -995,7 +995,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration( configShard->exhaustiveFindOnConfig(opCtx, ReadPreferenceSetting(ReadPreference::PrimaryOnly), repl::ReadConcernLevel::kLocalReadConcern, - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, BSON(ShardType::name(toShard.toString())), {}, boost::none)); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp index a480f42dec5..c7c844aa33b 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp @@ -317,7 +317,8 @@ TEST_F(ConfigInitializationTest, BuildsNecessaryIndexes) { auto foundLocksIndexes = assertGet(getIndexes(operationContext(), LocksType::ConfigNS)); assertBSONObjsSame(expectedLocksIndexes, foundLocksIndexes); - auto foundShardsIndexes = assertGet(getIndexes(operationContext(), ShardType::ConfigNS)); + auto foundShardsIndexes = + assertGet(getIndexes(operationContext(), NamespaceString::kConfigsvrShardsNamespace)); assertBSONObjsSame(expectedShardsIndexes, foundShardsIndexes); auto foundTagsIndexes = assertGet(getIndexes(operationContext(), TagsType::ConfigNS)); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp index 40641dfa4d8..60327f62be5 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp @@ -249,7 +249,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneFromShardShouldErrorIfShardDocIsMalfor << "z")); insertToConfigCollection( - operationContext(), ShardType::ConfigNS, invalidShardDoc); + operationContext(), NamespaceString::kConfigsvrShardsNamespace, invalidShardDoc); auto status = diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp index 5e2c7b656df..32544cacc7b 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp @@ -118,7 +118,7 @@ protected: operationContext(), ReadPreferenceSetting{ReadPreference::PrimaryOnly}, repl::ReadConcernLevel::kLocalReadConcern, - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, BSON(ShardType::name() << shardName), BSONObj(), 1)); @@ -367,7 +367,7 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) { operationContext(), ReadPreferenceSetting{ReadPreference::PrimaryOnly}, repl::ReadConcernLevel::kLocalReadConcern, - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, BSON(ShardType::name() << shard1.getName()), BSONObj(), 1)); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp index e2bd69e8cea..7de1d4c3efe 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp @@ -120,7 +120,7 @@ StatusWith<std::string> generateNewShardName(OperationContext* opCtx) { opCtx, kConfigReadSelector, repl::ReadConcernLevel::kLocalReadConcern, - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, shardNameRegex.obj(), BSON(ShardType::name() << -1), 1); @@ -748,7 +748,7 @@ StatusWith<std::string> ShardingCatalogManager::addShard( Status result = Grid::get(opCtx)->catalogClient()->insertConfigDocument( opCtx, - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, shardType.toBSON(), ShardingCatalogClient::kLocalWriteConcern); if (!result.isOK()) { @@ -821,7 +821,7 @@ RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx, configShard->exhaustiveFindOnConfig(opCtx, kConfigReadSelector, repl::ReadConcernLevel::kLocalReadConcern, - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, BSON(ShardType::name() << name), BSONObj(), 1)); @@ -833,7 +833,7 @@ RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx, // Find how many *other* shards exist, which are *not* currently draining const auto countOtherNotDrainingShards = uassertStatusOK(_runCountCommandOnConfig( opCtx, - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, BSON(ShardType::name() << NE << name << ShardType::draining.ne(true)))); uassert(ErrorCodes::IllegalOperation, "Operation not allowed because it would remove the last shard", @@ -854,7 +854,7 @@ RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx, const bool isShardCurrentlyDraining = uassertStatusOK(_runCountCommandOnConfig( opCtx, - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, BSON(ShardType::name() << name << ShardType::draining(true)))) > 0; auto* const catalogClient = Grid::get(opCtx)->catalogClient(); @@ -875,7 +875,7 @@ RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx, uassertStatusOKWithContext( catalogClient->updateConfigDocument(opCtx, - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, BSON(ShardType::name() << name), BSON("$set" << BSON(ShardType::draining(true))), false, @@ -930,7 +930,7 @@ RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx, configShard->exhaustiveFindOnConfig(opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, repl::ReadConcernLevel::kLocalReadConcern, - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, BSON(ShardType::name.ne(name)), {}, 1); @@ -1219,7 +1219,7 @@ void ShardingCatalogManager::_standardizeClusterParameters(OperationContext* opC opCtx, ReadPreferenceSetting(ReadPreference::PrimaryOnly), repl::ReadConcernLevel::kLocalReadConcern, - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, BSONObj(), BSONObj(), boost::none)); @@ -1240,7 +1240,7 @@ void ShardingCatalogManager::_removeShardInTransaction(OperationContext* opCtx, const Timestamp& newTopologyTime) { auto removeShardFn = [removedShardName, controlShardName, newTopologyTime]( const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { - write_ops::DeleteCommandRequest deleteOp(ShardType::ConfigNS); + write_ops::DeleteCommandRequest deleteOp(NamespaceString::kConfigsvrShardsNamespace); deleteOp.setDeletes({[&]() { write_ops::DeleteOpEntry entry; entry.setMulti(false); @@ -1253,7 +1253,8 @@ void ShardingCatalogManager::_removeShardInTransaction(OperationContext* opCtx, auto deleteResponse) { uassertStatusOK(deleteResponse.toStatus()); - write_ops::UpdateCommandRequest updateOp(ShardType::ConfigNS); + write_ops::UpdateCommandRequest updateOp( + NamespaceString::kConfigsvrShardsNamespace); updateOp.setUpdates({[&]() { write_ops::UpdateOpEntry entry; entry.setUpsert(false); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp index 6bdfb5d7655..e8739c4ec62 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp @@ -217,7 +217,7 @@ Status ShardingCatalogManager::addShardToZone(OperationContext* opCtx, auto updateStatus = Grid::get(opCtx)->catalogClient()->updateConfigDocument( opCtx, - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, BSON(ShardType::name(shardName)), BSON("$addToSet" << BSON(ShardType::tags() << zoneName)), false, @@ -241,7 +241,7 @@ Status ShardingCatalogManager::removeShardFromZone(OperationContext* opCtx, Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kZoneOpLock); auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); - const NamespaceString shardNS(ShardType::ConfigNS); + const NamespaceString shardNS(NamespaceString::kConfigsvrShardsNamespace); // // Check whether the shard even exist in the first place. @@ -287,7 +287,7 @@ Status ShardingCatalogManager::removeShardFromZone(OperationContext* opCtx, auto updateStatus = Grid::get(opCtx)->catalogClient()->updateConfigDocument( opCtx, - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, BSON(ShardType::name(shardName)), BSON("$pull" << BSON(ShardType::tags() << zoneName)), false, @@ -322,7 +322,7 @@ void ShardingCatalogManager::assignKeyRangeToZone(OperationContext* opCtx, opCtx, kConfigPrimarySelector, repl::ReadConcernLevel::kLocalReadConcern, - ShardType::ConfigNS, + NamespaceString::kConfigsvrShardsNamespace, BSON(ShardType::tags() << zoneName), BSONObj(), 1)) diff --git a/src/mongo/db/s/config_server_op_observer.cpp b/src/mongo/db/s/config_server_op_observer.cpp index dff7d0d6022..c0081d68a41 100644 --- a/src/mongo/db/s/config_server_op_observer.cpp +++ b/src/mongo/db/s/config_server_op_observer.cpp @@ -96,7 +96,8 @@ void ConfigServerOpObserver::_onReplicationRollback(OperationContext* opCtx, ClusterIdentityLoader::get(opCtx)->discardCachedClusterId(); } - if (rbInfo.rollbackNamespaces.find(ShardType::ConfigNS) != rbInfo.rollbackNamespaces.end()) { + if (rbInfo.rollbackNamespaces.find(NamespaceString::kConfigsvrShardsNamespace) != + rbInfo.rollbackNamespaces.end()) { // If some entries were rollbacked from config.shards we might need to discard some tick // points from the TopologyTimeTicker const auto lastApplied = repl::ReplicationCoordinator::get(opCtx)->getMyLastAppliedOpTime(); @@ -110,7 +111,7 @@ void ConfigServerOpObserver::onInserts(OperationContext* opCtx, std::vector<InsertStatement>::const_iterator begin, std::vector<InsertStatement>::const_iterator end, bool fromMigrate) { - if (nss != ShardType::ConfigNS) { + if (nss != NamespaceString::kConfigsvrShardsNamespace) { return; } @@ -137,7 +138,7 @@ void ConfigServerOpObserver::onInserts(OperationContext* opCtx, } void ConfigServerOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) { - if (args.nss != ShardType::ConfigNS) { + if (args.nss != NamespaceString::kConfigsvrShardsNamespace) { return; } |