diff options
129 files changed, 316 insertions, 323 deletions
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp index 96429793986..bf3987a39c4 100644 --- a/src/mongo/db/catalog/collection_impl.cpp +++ b/src/mongo/db/catalog/collection_impl.cpp @@ -153,7 +153,7 @@ Status validateChangeStreamPreAndPostImagesOptionIsPermitted(const NamespaceStri return validationStatus; } - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer && + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && !gFeatureFlagCatalogShard.isEnabled(serverGlobalParams.featureCompatibility)) { return { ErrorCodes::InvalidOptions, diff --git a/src/mongo/db/change_stream_options_manager.cpp b/src/mongo/db/change_stream_options_manager.cpp index 3aa92f73e4d..37e82659886 100644 --- a/src/mongo/db/change_stream_options_manager.cpp +++ b/src/mongo/db/change_stream_options_manager.cpp @@ -101,7 +101,7 @@ Status ChangeStreamOptionsParameter::validate(const BSONElement& newValueElement auto* repl = repl::ReplicationCoordinator::get(getGlobalServiceContext()); bool isStandalone = repl && repl->getReplicationMode() == repl::ReplicationCoordinator::modeNone && - serverGlobalParams.clusterRole == ClusterRole::None; + serverGlobalParams.clusterRole.has(ClusterRole::None); if (isStandalone) { return {ErrorCodes::IllegalOperation, "The 'changeStreamOptions' parameter is unsupported in standalone."}; diff --git a/src/mongo/db/change_stream_serverless_helpers.cpp b/src/mongo/db/change_stream_serverless_helpers.cpp index 9fcfbb4e91f..4e082c005b0 100644 --- a/src/mongo/db/change_stream_serverless_helpers.cpp +++ b/src/mongo/db/change_stream_serverless_helpers.cpp @@ -71,7 +71,7 @@ bool isChangeStreamEnabled(OperationContext* opCtx, const TenantId& tenantId) { bool canInitializeServices() { // A change collection must not be enabled on the config server. - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { return false; } diff --git a/src/mongo/db/change_streams_cluster_parameter.cpp b/src/mongo/db/change_streams_cluster_parameter.cpp index 1b4a148adf7..f3636b6e41c 100644 --- a/src/mongo/db/change_streams_cluster_parameter.cpp +++ b/src/mongo/db/change_streams_cluster_parameter.cpp @@ -44,7 +44,7 @@ Status validateChangeStreamsClusterParameter( auto* repl = repl::ReplicationCoordinator::get(getGlobalServiceContext()); bool isStandalone = repl && repl->getReplicationMode() == repl::ReplicationCoordinator::modeNone && - serverGlobalParams.clusterRole == ClusterRole::None; + serverGlobalParams.clusterRole.has(ClusterRole::None); if (isStandalone) { return {ErrorCodes::IllegalOperation, "The 'changeStreams' parameter is unsupported in standalone."}; diff --git a/src/mongo/db/cluster_role.cpp b/src/mongo/db/cluster_role.cpp index 370207f7325..468b5380d0a 100644 --- a/src/mongo/db/cluster_role.cpp +++ b/src/mongo/db/cluster_role.cpp @@ -33,7 +33,7 @@ namespace mongo { -bool ClusterRole::operator==(const ClusterRole& other) const { +bool ClusterRole::has(const ClusterRole& other) const { if (gFeatureFlagCatalogShard.isEnabledAndIgnoreFCV() && _value == ClusterRole::ConfigServer) { return other._value == ClusterRole::ConfigServer || other._value == ClusterRole::ShardServer; @@ -42,16 +42,11 @@ bool ClusterRole::operator==(const ClusterRole& other) const { return _value == other._value; } -bool ClusterRole::isShardRole() { - return _value == ClusterRole::ShardServer || - (gFeatureFlagCatalogShard.isEnabledAndIgnoreFCV() && _value == ClusterRole::ConfigServer); -} - -bool ClusterRole::isExclusivelyShardRole() { +bool ClusterRole::exclusivelyHasShardRole() { return _value == ClusterRole::ShardServer; } -bool ClusterRole::isExclusivelyConfigSvrRole() { +bool ClusterRole::exclusivelyHasConfigRole() { return _value == ClusterRole::ConfigServer && !gFeatureFlagCatalogShard.isEnabledAndIgnoreFCV(); } } // namespace mongo diff --git a/src/mongo/db/cluster_role.h b/src/mongo/db/cluster_role.h index c5fde6472f5..343d8c1d48d 100644 --- a/src/mongo/db/cluster_role.h +++ b/src/mongo/db/cluster_role.h @@ -51,20 +51,15 @@ public: return *this; } - bool operator==(const ClusterRole& other) const; + bool has(const ClusterRole& other) const; - bool operator!=(const ClusterRole& other) const { - return !ClusterRole::operator==(other); - } - - // Returns true if this mongod was started with --shardsvr or --configsvr in a catalog shard - // topology, false otherwise. - bool isShardRole(); // Returns true if this mongod was started with --shardsvr, false otherwise. - bool isExclusivelyShardRole(); - // Returns true if this mongod was started with --configServer in a non-catalog shard topology, + bool exclusivelyHasShardRole(); + + // Returns true if this mongod was started with --configsvr in a non-catalog shard topology, // false otherwise. - bool isExclusivelyConfigSvrRole(); + // TODO SERVER-75391: Remove. + bool exclusivelyHasConfigRole(); private: Value _value; diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp index 853ec6e013a..07e1ee446fc 100644 --- a/src/mongo/db/commands/dbcommands.cpp +++ b/src/mongo/db/commands/dbcommands.cpp @@ -140,7 +140,7 @@ public: Reply typedRun(OperationContext* opCtx) final { auto dbName = request().getDbName(); // disallow dropping the config database - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer && + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && (dbName == DatabaseName::kConfig)) { uasserted(ErrorCodes::IllegalOperation, "Cannot drop 'config' database if mongod started " diff --git a/src/mongo/db/commands/feature_compatibility_version.cpp b/src/mongo/db/commands/feature_compatibility_version.cpp index 3cd876c19e3..b262dafaee1 100644 --- a/src/mongo/db/commands/feature_compatibility_version.cpp +++ b/src/mongo/db/commands/feature_compatibility_version.cpp @@ -431,7 +431,7 @@ void FeatureCompatibilityVersion::setIfCleanStartup(OperationContext* opCtx, // featureCompatibilityVersion is the downgrade version, so that it can be safely added to a // downgrade version cluster. The config server will run setFeatureCompatibilityVersion as // part of addShard. - const bool storeUpgradeVersion = !serverGlobalParams.clusterRole.isExclusivelyShardRole(); + const bool storeUpgradeVersion = !serverGlobalParams.clusterRole.exclusivelyHasShardRole(); UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx); NamespaceString nss(NamespaceString::kServerConfigurationNamespace); diff --git a/src/mongo/db/commands/get_cluster_parameter_invocation.cpp b/src/mongo/db/commands/get_cluster_parameter_invocation.cpp index 5285b84e3ff..7d827971f00 100644 --- a/src/mongo/db/commands/get_cluster_parameter_invocation.cpp +++ b/src/mongo/db/commands/get_cluster_parameter_invocation.cpp @@ -120,7 +120,7 @@ GetClusterParameterInvocation::Reply GetClusterParameterInvocation::getCachedPar auto* repl = repl::ReplicationCoordinator::get(opCtx); bool isStandalone = repl && repl->getReplicationMode() == repl::ReplicationCoordinator::modeNone && - serverGlobalParams.clusterRole == ClusterRole::None; + serverGlobalParams.clusterRole.has(ClusterRole::None); auto [parameterNames, parameterValues] = retrieveRequestedParameters(opCtx, cmdBody, request.getDbName().tenantId(), isStandalone); diff --git a/src/mongo/db/commands/internal_rename_if_options_and_indexes_match_cmd.cpp b/src/mongo/db/commands/internal_rename_if_options_and_indexes_match_cmd.cpp index fa09b571c2d..4a12c44304f 100644 --- a/src/mongo/db/commands/internal_rename_if_options_and_indexes_match_cmd.cpp +++ b/src/mongo/db/commands/internal_rename_if_options_and_indexes_match_cmd.cpp @@ -70,8 +70,8 @@ public: std::list<BSONObj>(originalIndexes.begin(), originalIndexes.end()); const auto& collectionOptions = thisRequest.getCollectionOptions(); - if (serverGlobalParams.clusterRole == ClusterRole::None || - serverGlobalParams.clusterRole.isExclusivelyConfigSvrRole()) { + if (serverGlobalParams.clusterRole.has(ClusterRole::None) || + serverGlobalParams.clusterRole.exclusivelyHasConfigRole()) { // No need to acquire additional locks in a non-sharded environment _internalRun(opCtx, fromNss, toNss, indexList, collectionOptions); return; diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp index b906f79b1fd..321cd9ed91c 100644 --- a/src/mongo/db/commands/parameters.cpp +++ b/src/mongo/db/commands/parameters.cpp @@ -389,7 +389,7 @@ public: str::stream() << "Cannot set parameter requireApiVersion=true on a shard or config server", parameterName != "requireApiVersion" || !parameter.trueValue() || - (serverGlobalParams.clusterRole == ClusterRole::None)); + (serverGlobalParams.clusterRole.has(ClusterRole::None))); auto oldValueObj = ([&] { BSONObjBuilder bb; diff --git a/src/mongo/db/commands/read_write_concern_defaults_server_status.cpp b/src/mongo/db/commands/read_write_concern_defaults_server_status.cpp index 132efb77391..8fbe94aab1b 100644 --- a/src/mongo/db/commands/read_write_concern_defaults_server_status.cpp +++ b/src/mongo/db/commands/read_write_concern_defaults_server_status.cpp @@ -42,12 +42,12 @@ public: ReadWriteConcernDefaultsServerStatus() : ServerStatusSection("defaultRWConcern") {} bool includeByDefault() const override { - return !serverGlobalParams.clusterRole.isExclusivelyShardRole(); + return !serverGlobalParams.clusterRole.exclusivelyHasShardRole(); } BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElement) const override { - if (serverGlobalParams.clusterRole.isExclusivelyShardRole() || + if (serverGlobalParams.clusterRole.exclusivelyHasShardRole() || !repl::ReplicationCoordinator::get(opCtx)->isReplEnabled()) { return {}; } diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp index cb6d9503b13..6f503f0f850 100644 --- a/src/mongo/db/commands/run_aggregate.cpp +++ b/src/mongo/db/commands/run_aggregate.cpp @@ -812,10 +812,11 @@ Status runAggregate(OperationContext* opCtx, } // Assert that a change stream on the config server is always opened on the oplog. - tassert(6763400, - str::stream() << "Change stream was unexpectedly opened on the namespace: " - << nss << " in the config server", - serverGlobalParams.clusterRole != ClusterRole::ConfigServer || nss.isOplog()); + tassert( + 6763400, + str::stream() << "Change stream was unexpectedly opened on the namespace: " << nss + << " in the config server", + !serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) || nss.isOplog()); // Upgrade and wait for read concern if necessary. _adjustChangeStreamReadConcern(opCtx); @@ -925,7 +926,7 @@ Status runAggregate(OperationContext* opCtx, // Set this operation's shard version for the underlying collection to unsharded. // This is prerequisite for future shard versioning checks. boost::optional<ScopedSetShardRole> scopeSetShardRole; - if (serverGlobalParams.clusterRole != ClusterRole::None) { + if (!serverGlobalParams.clusterRole.has(ClusterRole::None)) { scopeSetShardRole.emplace(opCtx, resolvedView.getNamespace(), ShardVersion::UNSHARDED() /* shardVersion */, diff --git a/src/mongo/db/commands/rwc_defaults_commands.cpp b/src/mongo/db/commands/rwc_defaults_commands.cpp index 1a39456b35c..d29ffbc5190 100644 --- a/src/mongo/db/commands/rwc_defaults_commands.cpp +++ b/src/mongo/db/commands/rwc_defaults_commands.cpp @@ -83,8 +83,8 @@ void assertNotStandaloneOrShardServer(OperationContext* opCtx, StringData cmdNam uassert(51301, str::stream() << "'" << cmdName << "' is not supported on shard nodes.", - serverGlobalParams.clusterRole == ClusterRole::None || - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::None) || + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); } auto makeResponse(const ReadWriteConcernDefaults::RWConcernDefaultAndTime& rwcDefault, @@ -136,7 +136,7 @@ public: opCtx, request().getDefaultReadConcern(), request().getDefaultWriteConcern()); // We don't want to check if the custom write concern exists on the config servers // because it only has to exist on the actual shards in order to be valid. - if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) { + if (!serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { if (auto optWC = newDefaults.getDefaultWriteConcern()) { uassertStatusOK(replCoord->validateWriteConcern(*optWC)); } diff --git a/src/mongo/db/commands/set_cluster_parameter_command.cpp b/src/mongo/db/commands/set_cluster_parameter_command.cpp index 025d7f3a0a6..f44f546d143 100644 --- a/src/mongo/db/commands/set_cluster_parameter_command.cpp +++ b/src/mongo/db/commands/set_cluster_parameter_command.cpp @@ -78,7 +78,7 @@ public: void typedRun(OperationContext* opCtx) { uassert(ErrorCodes::ErrorCodes::NotImplemented, "setClusterParameter can only run on mongos in sharded clusters", - (serverGlobalParams.clusterRole == ClusterRole::None)); + (serverGlobalParams.clusterRole.has(ClusterRole::None))); if (!feature_flags::gFeatureFlagAuditConfigClusterParameter.isEnabled( serverGlobalParams.featureCompatibility)) { diff --git a/src/mongo/db/commands/set_cluster_parameter_invocation.cpp b/src/mongo/db/commands/set_cluster_parameter_invocation.cpp index f5df04e0a20..ddc6a21fccc 100644 --- a/src/mongo/db/commands/set_cluster_parameter_invocation.cpp +++ b/src/mongo/db/commands/set_cluster_parameter_invocation.cpp @@ -62,7 +62,7 @@ bool SetClusterParameterInvocation::invoke(OperationContext* opCtx, serverParameter, parameterName, tenantId, - serverGlobalParams.clusterRole.isExclusivelyShardRole()); + serverGlobalParams.clusterRole.exclusivelyHasShardRole()); BSONObjBuilder oldValueBob; serverParameter->append(opCtx, &oldValueBob, parameterName.toString(), tenantId); diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp index a85adeaab70..ee80c075b6f 100644 --- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp +++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp @@ -322,14 +322,14 @@ public: repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx); // TODO SERVER-72796: Remove once gGlobalIndexesShardingCatalog is enabled. - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer && + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) && feature_flags::gGlobalIndexesShardingCatalog.isEnabledOnVersion(requestedVersion)) { ShardingDDLCoordinatorService::getService(opCtx) ->waitForCoordinatorsOfGivenTypeToComplete( opCtx, DDLCoordinatorTypeEnum::kRenameCollectionPre63Compatible); } // TODO SERVER-73627: Remove once 7.0 becomes last LTS. - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer && + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) && feature_flags::gDropCollectionHoldingCriticalSection.isEnabledOnVersion( requestedVersion)) { ShardingDDLCoordinatorService::getService(opCtx) @@ -345,7 +345,7 @@ public: } const auto upgradeOrDowngrade = requestedVersion > actualVersion ? "upgrade" : "downgrade"; - const auto server_type = serverGlobalParams.clusterRole == ClusterRole::ConfigServer + const auto server_type = serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) ? "config server" : (request.getPhase() ? "shard server" : "replica set/standalone"); @@ -365,7 +365,8 @@ public: uassert(5563600, "'phase' field is only valid to be specified on shards", - !request.getPhase() || serverGlobalParams.clusterRole == ClusterRole::ShardServer); + !request.getPhase() || + serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); if (!request.getPhase() || request.getPhase() == SetFCVPhaseEnum::kStart) { { @@ -376,8 +377,9 @@ public: // If catalogShard is enabled and there is an entry in config.shards with _id: // ShardId::kConfigServerId then the config server is a catalog shard. - auto isCatalogShard = serverGlobalParams.clusterRole == ClusterRole::ConfigServer && - serverGlobalParams.clusterRole == ClusterRole::ShardServer && + auto isCatalogShard = + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && + serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) && !ShardingCatalogManager::get(opCtx) ->findOneConfigDocument(opCtx, NamespaceString::kConfigsvrShardsNamespace, @@ -422,7 +424,7 @@ public: } if (request.getPhase() == SetFCVPhaseEnum::kStart) { - invariant(serverGlobalParams.clusterRole == ClusterRole::ShardServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); // This helper function is only for any actions that should be done specifically on // shard servers during phase 1 of the 2-phase setFCV protocol for sharded clusters. @@ -439,7 +441,7 @@ public: invariant(serverGlobalParams.featureCompatibility.isUpgradingOrDowngrading()); if (!request.getPhase() || request.getPhase() == SetFCVPhaseEnum::kPrepare) { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { if (gFeatureFlagCatalogShard.isEnabledAndIgnoreFCV()) { // The config server may also be a shard, so have it run any shard server tasks. _shardServerPhase1Tasks(opCtx, requestedVersion); @@ -463,7 +465,7 @@ public: _prepareToDowngrade(opCtx, request, changeTimestamp); } - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { // Tell the shards to enter the 'prepare' phase of setFCV (check that they will be // able to upgrade or downgrade). _sendSetFCVRequestToShards( @@ -471,7 +473,7 @@ public: } if (request.getPhase() == SetFCVPhaseEnum::kPrepare) { - invariant(serverGlobalParams.clusterRole == ClusterRole::ShardServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); // If we are only running the 'prepare' phase, then we are done return true; } @@ -512,7 +514,7 @@ public: } // TODO SERVER-72796: Remove once gGlobalIndexesShardingCatalog is enabled. - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer && + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) && requestedVersion > actualVersion && feature_flags::gGlobalIndexesShardingCatalog .isEnabledOnTargetFCVButDisabledOnOriginalFCV(requestedVersion, actualVersion)) { @@ -522,7 +524,7 @@ public: } // TODO SERVER-73627: Remove once 7.0 becomes last LTS. - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer && + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) && requestedVersion > actualVersion && feature_flags::gDropCollectionHoldingCriticalSection .isEnabledOnTargetFCVButDisabledOnOriginalFCV(requestedVersion, actualVersion)) { @@ -621,18 +623,18 @@ private: // and could be done after _runDowngrade even if it failed at any point in the middle of // _userCollectionsUassertsForDowngrade or _internalServerCleanupForDowngrade. void _prepareToUpgradeActions(OperationContext* opCtx) { - if (serverGlobalParams.clusterRole == ClusterRole::None) { + if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { _cancelServerlessMigrations(opCtx); return; } // Note the config server is also considered a shard, so the ConfigServer and ShardServer // roles aren't mutually exclusive. - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { // Config server role actions. } - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { // Shard server role actions. } } @@ -652,7 +654,7 @@ private: // _internalServerCleanupForDowngrade. void _completeUpgrade(OperationContext* opCtx, const multiversion::FeatureCompatibilityVersion requestedVersion) { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { const auto actualVersion = serverGlobalParams.featureCompatibility.getVersion(); _cleanupConfigVersionOnUpgrade(opCtx, requestedVersion, actualVersion); _createSchemaOnConfigSettings(opCtx, requestedVersion, actualVersion); @@ -755,7 +757,7 @@ private: if (feature_flags::gGlobalIndexesShardingCatalog .isEnabledOnTargetFCVButDisabledOnOriginalFCV(requestedVersion, actualVersion)) { uassertStatusOK(sharding_util::createShardingIndexCatalogIndexes(opCtx)); - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { uassertStatusOK(sharding_util::createShardCollectionCatalogIndexes(opCtx)); } } @@ -860,8 +862,8 @@ private: // need to happen during the upgrade. It is required that the code in this helper function // is idempotent and could be done after _runDowngrade even if it failed at any point in the // middle of _userCollectionsUassertsForDowngrade or _internalServerCleanupForDowngrade. - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer || - serverGlobalParams.clusterRole == ClusterRole::None) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) || + serverGlobalParams.clusterRole.has(ClusterRole::None)) { _userCollectionsWorkForUpgrade(); } @@ -881,7 +883,7 @@ private: boost::optional<Timestamp> changeTimestamp) { const auto requestedVersion = request.getCommandParameter(); - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { // Always abort the reshardCollection regardless of version to ensure that it will run // on a consistent version from start to finish. This will ensure that it will be able @@ -910,18 +912,18 @@ private: // This helper function is for any actions that should be done before taking the FCV full // transition lock in S mode. void _prepareToDowngradeActions(OperationContext* opCtx) { - if (serverGlobalParams.clusterRole == ClusterRole::None) { + if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { _cancelServerlessMigrations(opCtx); return; } // Note the config server is also considered a shard, so the ConfigServer and ShardServer // roles aren't mutually exclusive. - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { // Config server role actions. } - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { // Shard server role actions. } } @@ -958,7 +960,7 @@ private: // Note the config server is also considered a shard, so the ConfigServer and ShardServer // roles aren't mutually exclusive. - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { if (gFeatureFlagCatalogShard.isDisabledOnTargetFCVButEnabledOnOriginalFCV( requestedVersion, originalVersion)) { _assertNoCollectionsHaveChangeStreamsPrePostImages(opCtx); @@ -998,8 +1000,8 @@ private: } } - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer || - serverGlobalParams.clusterRole == ClusterRole::None) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) || + serverGlobalParams.clusterRole.has(ClusterRole::None)) { if (feature_flags::gTimeseriesScalabilityImprovements .isDisabledOnTargetFCVButEnabledOnOriginalFCV(requestedVersion, originalVersion)) { @@ -1161,7 +1163,7 @@ private: _cleanUpClusterParameters(opCtx, requestedVersion); // Note the config server is also considered a shard, so the ConfigServer and ShardServer // roles aren't mutually exclusive. - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { _dropInternalShardingIndexCatalogCollection(opCtx, requestedVersion, originalVersion); _removeSchemaOnConfigSettings(opCtx, requestedVersion, originalVersion); // Always abort the reshardCollection regardless of version to ensure that it will @@ -1171,7 +1173,7 @@ private: _updateConfigVersionOnDowngrade(opCtx, requestedVersion, originalVersion); } - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { // If we are downgrading to a version that doesn't support implicit translation of // Timeseries collection in sharding DDL Coordinators we need to drain all ongoing // coordinators @@ -1197,7 +1199,7 @@ private: .isDisabledOnTargetFCVButEnabledOnOriginalFCV(requestedVersion, originalVersion)) { // Note the config server is also considered a shard, so the ConfigServer and // ShardServer roles aren't mutually exclusive. - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { // There cannot be any global indexes at this point, but calling // clearCollectionShardingIndexCatalog removes the index version from // config.shard.collections and the csr transactionally. @@ -1230,7 +1232,7 @@ private: dropStatus.isOK() || dropStatus.code() == ErrorCodes::NamespaceNotFound); } - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { LOGV2(6711906, "Unset index version field in config.collections", "nss"_attr = CollectionType::ConfigNS); @@ -1252,7 +1254,7 @@ private: // TODO SERVER-75274: Drop both collections on a catalog shard enabled config server. NamespaceString indexCatalogNss; - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { indexCatalogNss = NamespaceString::kConfigsvrIndexCatalogNamespace; } else { indexCatalogNss = NamespaceString::kShardIndexCatalogNamespace; @@ -1398,7 +1400,7 @@ private: // they would turn into a Support case. _internalServerCleanupForDowngrade(opCtx, requestedVersion); - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { // Tell the shards to complete setFCV (transition to fully downgraded). _sendSetFCVRequestToShards(opCtx, request, changeTimestamp, SetFCVPhaseEnum::kComplete); } @@ -1412,7 +1414,7 @@ private: */ void _cancelServerlessMigrations(OperationContext* opCtx) { invariant(serverGlobalParams.featureCompatibility.isUpgradingOrDowngrading()); - if (serverGlobalParams.clusterRole == ClusterRole::None) { + if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { auto donorService = checked_cast<TenantMigrationDonorService*>( repl::PrimaryOnlyServiceRegistry::get(opCtx->getServiceContext()) ->lookupServiceByName(TenantMigrationDonorService::kServiceName)); @@ -1447,12 +1449,12 @@ private: boost::optional<Timestamp> getChangeTimestamp(mongo::OperationContext* opCtx, mongo::SetFeatureCompatibilityVersion request) { boost::optional<Timestamp> changeTimestamp; - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { // The Config Server always creates a new ID (i.e., timestamp) when it receives an // upgrade or downgrade request. const auto now = VectorClock::get(opCtx)->getTime(); changeTimestamp = now.clusterTime().asTimestamp(); - } else if (serverGlobalParams.clusterRole == ClusterRole::ShardServer && + } else if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) && request.getPhase()) { // Shards receive the timestamp from the Config Server's request. changeTimestamp = request.getChangeTimestamp(); @@ -1467,7 +1469,7 @@ private: } void _assertNoCollectionsHaveChangeStreamsPrePostImages(OperationContext* opCtx) { - invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); // Config servers only started allowing collections with changeStreamPreAndPostImages // in 7.0, so don't allow downgrading with such a collection. diff --git a/src/mongo/db/commands/set_user_write_block_mode_command.cpp b/src/mongo/db/commands/set_user_write_block_mode_command.cpp index bd8c89dcaf8..b852cf00d24 100644 --- a/src/mongo/db/commands/set_user_write_block_mode_command.cpp +++ b/src/mongo/db/commands/set_user_write_block_mode_command.cpp @@ -69,7 +69,7 @@ public: uassert(ErrorCodes::IllegalOperation, str::stream() << Request::kCommandName << " cannot be run on shardsvrs nor configsvrs", - serverGlobalParams.clusterRole == ClusterRole::None); + serverGlobalParams.clusterRole.has(ClusterRole::None)); uassert(ErrorCodes::IllegalOperation, str::stream() << Request::kCommandName << " cannot be run on standalones", diff --git a/src/mongo/db/commands/tenant_migration_donor_cmds.cpp b/src/mongo/db/commands/tenant_migration_donor_cmds.cpp index ddd1dc50afa..b0e952a9a63 100644 --- a/src/mongo/db/commands/tenant_migration_donor_cmds.cpp +++ b/src/mongo/db/commands/tenant_migration_donor_cmds.cpp @@ -64,7 +64,7 @@ public: Response typedRun(OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, "tenant migrations are not available on config servers", - serverGlobalParams.clusterRole != ClusterRole::ConfigServer); + !serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); // (Generic FCV reference): This FCV reference should exist across LTS binary versions. uassert( @@ -180,7 +180,7 @@ public: void typedRun(OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, "tenant migrations are not available on config servers", - serverGlobalParams.clusterRole != ClusterRole::ConfigServer); + !serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); const auto& cmd = request(); @@ -254,7 +254,7 @@ public: void typedRun(OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, "tenant migrations are not available on config servers", - serverGlobalParams.clusterRole != ClusterRole::ConfigServer); + !serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); const RequestType& cmd = request(); diff --git a/src/mongo/db/commands/tenant_migration_recipient_cmds.cpp b/src/mongo/db/commands/tenant_migration_recipient_cmds.cpp index 684d9c35624..bb6b1a0a2f8 100644 --- a/src/mongo/db/commands/tenant_migration_recipient_cmds.cpp +++ b/src/mongo/db/commands/tenant_migration_recipient_cmds.cpp @@ -64,7 +64,7 @@ public: Response typedRun(OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, "tenant migrations are not available on config servers", - serverGlobalParams.clusterRole != ClusterRole::ConfigServer); + !serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); // (Generic FCV reference): This FCV reference should exist across LTS binary versions. uassert( @@ -287,7 +287,7 @@ public: void typedRun(OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, "tenant migrations are not available on config servers", - serverGlobalParams.clusterRole != ClusterRole::ConfigServer); + !serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); const auto& cmd = request(); const auto migrationProtocol = cmd.getProtocol().value_or(kDefaultMigrationProtocol); diff --git a/src/mongo/db/commands/txn_cmds.cpp b/src/mongo/db/commands/txn_cmds.cpp index 9240c572a54..b846e0bd1fe 100644 --- a/src/mongo/db/commands/txn_cmds.cpp +++ b/src/mongo/db/commands/txn_cmds.cpp @@ -150,7 +150,7 @@ public: opCtx, optionalCommitTimestamp.value(), {}); } else { if (ShardingState::get(opCtx)->canAcceptShardedCommands().isOK() || - serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { TransactionCoordinatorService::get(opCtx)->cancelIfCommitNotYetStarted( opCtx, *opCtx->getLogicalSessionId(), txnNumberAndRetryCounter); } @@ -258,7 +258,7 @@ public: if (!MONGO_unlikely(dontRemoveTxnCoordinatorOnAbort.shouldFail()) && (ShardingState::get(opCtx)->canAcceptShardedCommands().isOK() || - serverGlobalParams.clusterRole == ClusterRole::ConfigServer)) { + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer))) { TransactionCoordinatorService::get(opCtx)->cancelIfCommitNotYetStarted( opCtx, *opCtx->getLogicalSessionId(), txnNumberAndRetryCounter); } diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp index 404e6ace225..4384a88ebbf 100644 --- a/src/mongo/db/commands/user_management_commands.cpp +++ b/src/mongo/db/commands/user_management_commands.cpp @@ -2094,7 +2094,7 @@ CmdUMCTyped<GetUserCacheGenerationCommand, UMCGetUserCacheGenParams>::Invocation OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, "_getUserCacheGeneration can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); cmdGetUserCacheGeneration.skipApiVersionCheck(); GetUserCacheGenerationReply reply; diff --git a/src/mongo/db/exec/write_stage_common.cpp b/src/mongo/db/exec/write_stage_common.cpp index ca893ba6774..11670a420d6 100644 --- a/src/mongo/db/exec/write_stage_common.cpp +++ b/src/mongo/db/exec/write_stage_common.cpp @@ -53,7 +53,7 @@ namespace write_stage_common { PreWriteFilter::PreWriteFilter(OperationContext* opCtx, NamespaceString nss) : _opCtx(opCtx), _nss(std::move(nss)), _skipFiltering([&] { // Always allow writes on replica sets. - if (serverGlobalParams.clusterRole == ClusterRole::None) { + if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { return true; } diff --git a/src/mongo/db/ftdc/ftdc_mongod.cpp b/src/mongo/db/ftdc/ftdc_mongod.cpp index 7b4b4468d25..7632f8bb5dd 100644 --- a/src/mongo/db/ftdc/ftdc_mongod.cpp +++ b/src/mongo/db/ftdc/ftdc_mongod.cpp @@ -124,7 +124,7 @@ void registerMongoDCollectors(FTDCController* controller) { << BSON_ARRAY(BSON("$collStats" << BSON( "storageStats" << BSON( "waitForLock" << false << "numericOnly" << true))))))); - if (!serverGlobalParams.clusterRole.isExclusivelyShardRole()) { + if (!serverGlobalParams.clusterRole.exclusivelyHasShardRole()) { // GetDefaultRWConcern controller->addOnRotateCollector(std::make_unique<FTDCSimpleInternalCommandCollector>( "getDefaultRWConcern", diff --git a/src/mongo/db/mongod_main.cpp b/src/mongo/db/mongod_main.cpp index 836fd59ed91..ad6d15979ca 100644 --- a/src/mongo/db/mongod_main.cpp +++ b/src/mongo/db/mongod_main.cpp @@ -368,12 +368,12 @@ void registerPrimaryOnlyServices(ServiceContext* serviceContext) { std::vector<std::unique_ptr<repl::PrimaryOnlyService>> services; - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { services.push_back(std::make_unique<ReshardingCoordinatorService>(serviceContext)); services.push_back(std::make_unique<ConfigsvrCoordinatorService>(serviceContext)); } - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { services.push_back(std::make_unique<RenameCollectionParticipantService>(serviceContext)); services.push_back(std::make_unique<ShardingDDLCoordinatorService>(serviceContext)); services.push_back(std::make_unique<ReshardingDonorService>(serviceContext)); @@ -386,7 +386,7 @@ void registerPrimaryOnlyServices(ServiceContext* serviceContext) { } } - if (serverGlobalParams.clusterRole == ClusterRole::None) { + if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { services.push_back(std::make_unique<TenantMigrationDonorService>(serviceContext)); services.push_back(std::make_unique<repl::TenantMigrationRecipientService>(serviceContext)); if (getGlobalReplSettings().isServerless()) { @@ -697,7 +697,7 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) { WaitForMajorityService::get(serviceContext).startup(serviceContext); - if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) { + if (!serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { // A catalog shard initializes sharding awareness after setting up its config server state. // This function may take the global lock. @@ -705,8 +705,8 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) { } try { - if ((serverGlobalParams.clusterRole == ClusterRole::ConfigServer || - serverGlobalParams.clusterRole == ClusterRole::None) && + if ((serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) || + serverGlobalParams.clusterRole.has(ClusterRole::None)) && replSettings.usingReplSets()) { ReadWriteConcernDefaults::get(startupOpCtx.get()->getServiceContext()) .refreshIfNecessary(startupOpCtx.get()); @@ -750,7 +750,7 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) { startFreeMonitoring(serviceContext); - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { // Note: For replica sets, ShardingStateRecovery happens on transition to primary. if (!replCoord->isReplEnabled()) { if (ShardingState::get(startupOpCtx.get())->enabled()) { @@ -759,14 +759,14 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) { } } - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { initializeGlobalShardingStateForConfigServerIfNeeded(startupOpCtx.get()); // This function may take the global lock. initializeShardingAwarenessIfNeededAndLoadGlobalSettings(startupOpCtx.get()); } - if (serverGlobalParams.clusterRole == ClusterRole::None && + if (serverGlobalParams.clusterRole.has(ClusterRole::None) && replSettings.usingReplSets()) { // standalone replica set // The keys client must use local read concern if the storage engine can't support // majority read concern. @@ -869,7 +869,7 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) { } if (computeModeEnabled) { - if (!isStandalone || serverGlobalParams.clusterRole != ClusterRole::None) { + if (!isStandalone || !serverGlobalParams.clusterRole.has(ClusterRole::None)) { LOGV2_ERROR(6968200, "'enableComputeMode' can be used only in standalone server"); exitCleanly(ExitCode::badOptions); } @@ -895,9 +895,9 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) { // Set up the logical session cache LogicalSessionCacheServer kind = LogicalSessionCacheServer::kStandalone; - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { kind = LogicalSessionCacheServer::kConfigServer; - } else if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + } else if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { kind = LogicalSessionCacheServer::kSharded; } else if (replSettings.usingReplSets()) { kind = LogicalSessionCacheServer::kReplicaSet; @@ -906,7 +906,7 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) { LogicalSessionCache::set(serviceContext, makeLogicalSessionCacheD(kind)); if (analyze_shard_key::supportsSamplingQueries(serviceContext, true /* ignoreFCV */) && - serverGlobalParams.clusterRole == ClusterRole::None) { + serverGlobalParams.clusterRole.has(ClusterRole::None)) { analyze_shard_key::QueryAnalysisSampler::get(serviceContext).onStartup(); } @@ -1131,7 +1131,7 @@ void startupConfigActions(const std::vector<std::string>& args) { } void setUpCollectionShardingState(ServiceContext* serviceContext) { - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { CollectionShardingStateFactory::set( serviceContext, std::make_unique<CollectionShardingStateFactoryShard>(serviceContext)); } else { @@ -1211,7 +1211,7 @@ void setUpReplication(ServiceContext* serviceContext) { SecureRandom().nextInt64()); // Only create a ReplicaSetNodeExecutor if sharding is disabled and replication is enabled. // Note that sharding sets up its own executors for scheduling work to remote nodes. - if (serverGlobalParams.clusterRole == ClusterRole::None && replCoord->isReplEnabled()) + if (serverGlobalParams.clusterRole.has(ClusterRole::None) && replCoord->isReplEnabled()) ReplicaSetNodeProcessInterface::setReplicaSetNodeExecutor( serviceContext, makeReplicaSetNodeExecutor(serviceContext)); @@ -1231,7 +1231,7 @@ void setUpReplication(ServiceContext* serviceContext) { void setUpObservers(ServiceContext* serviceContext) { auto opObserverRegistry = std::make_unique<OpObserverRegistry>(); - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { DurableHistoryRegistry::get(serviceContext) ->registerPin(std::make_unique<ReshardingHistoryHook>()); opObserverRegistry->addObserver(std::make_unique<OpObserverShardingImpl>( @@ -1249,7 +1249,7 @@ void setUpObservers(ServiceContext* serviceContext) { } } - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { if (!gFeatureFlagCatalogShard.isEnabledAndIgnoreFCV()) { opObserverRegistry->addObserver( std::make_unique<OpObserverImpl>(std::make_unique<OplogWriterImpl>())); @@ -1259,7 +1259,7 @@ void setUpObservers(ServiceContext* serviceContext) { opObserverRegistry->addObserver(std::make_unique<ReshardingOpObserver>()); } - if (serverGlobalParams.clusterRole == ClusterRole::None) { + if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { opObserverRegistry->addObserver( std::make_unique<OpObserverImpl>(std::make_unique<OplogWriterImpl>())); opObserverRegistry->addObserver(std::make_unique<repl::TenantMigrationDonorOpObserver>()); @@ -1445,7 +1445,7 @@ void shutdownTask(const ShutdownTaskArgs& shutdownArgs) { repl::ReplicationCoordinator::get(serviceContext)->shutdown(opCtx); // Terminate the index consistency check. - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { LOGV2_OPTIONS(4784904, {LogComponent::kSharding}, "Shutting down the PeriodicShardedIndexConsistencyChecker"); diff --git a/src/mongo/db/ops/insert.cpp b/src/mongo/db/ops/insert.cpp index c7f18b2ab5f..3caaf351668 100644 --- a/src/mongo/db/ops/insert.cpp +++ b/src/mongo/db/ops/insert.cpp @@ -206,7 +206,7 @@ Status userAllowedCreateNS(OperationContext* opCtx, const NamespaceString& ns) { str::stream() << "Invalid collection name: " << ns.coll()); } - if (serverGlobalParams.clusterRole.isExclusivelyConfigSvrRole() && !ns.isOnInternalDb()) { + if (serverGlobalParams.clusterRole.exclusivelyHasConfigRole() && !ns.isOnInternalDb()) { return Status(ErrorCodes::InvalidNamespace, str::stream() << "Can't create user databases on a dedicated --configsvr instance " diff --git a/src/mongo/db/pipeline/document_source_index_stats.cpp b/src/mongo/db/pipeline/document_source_index_stats.cpp index fdf4003a26b..d8e07355efa 100644 --- a/src/mongo/db/pipeline/document_source_index_stats.cpp +++ b/src/mongo/db/pipeline/document_source_index_stats.cpp @@ -54,7 +54,7 @@ DocumentSource::GetNextResult DocumentSourceIndexStats::doGetNext() { pExpCtx->opCtx, pExpCtx->ns, _processName, - serverGlobalParams.clusterRole != ClusterRole::None); + !serverGlobalParams.clusterRole.has(ClusterRole::None)); _indexStatsIter = _indexStats.cbegin(); } diff --git a/src/mongo/db/pipeline/sharded_agg_helpers.cpp b/src/mongo/db/pipeline/sharded_agg_helpers.cpp index 2f72457e2b8..c72ed111884 100644 --- a/src/mongo/db/pipeline/sharded_agg_helpers.cpp +++ b/src/mongo/db/pipeline/sharded_agg_helpers.cpp @@ -1640,7 +1640,7 @@ std::unique_ptr<Pipeline, PipelineDeleter> attachCursorToPipeline( if (!cm.isSharded() && // TODO SERVER-75391: Remove this condition. - (serverGlobalParams.clusterRole == ClusterRole::ConfigServer || + (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) || expCtx->ns != NamespaceString::kConfigsvrCollectionsNamespace)) { // If the collection is unsharded and we are on the primary, we should be able to // do a local read. The primary may be moved right after the primary shard check, diff --git a/src/mongo/db/read_concern_mongod.cpp b/src/mongo/db/read_concern_mongod.cpp index cba89dd67e5..381f6c7963e 100644 --- a/src/mongo/db/read_concern_mongod.cpp +++ b/src/mongo/db/read_concern_mongod.cpp @@ -147,7 +147,7 @@ Status makeNoopWriteIfNeeded(OperationContext* opCtx, // Standalone replica set, so there is no need to advance the OpLog on the primary. The only // exception is after a tenant migration because the target time may be from the other // replica set and is not guaranteed to be in the oplog of this node's set. - if (serverGlobalParams.clusterRole == ClusterRole::None && + if (serverGlobalParams.clusterRole.has(ClusterRole::None) && !tenant_migration_access_blocker::hasActiveTenantMigration(opCtx, dbName)) { return Status::OK(); } @@ -428,7 +428,8 @@ Status waitForReadConcernImpl(OperationContext* opCtx, return Status::OK(); } - const int debugLevel = serverGlobalParams.clusterRole == ClusterRole::ConfigServer ? 1 : 2; + const int debugLevel = + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) ? 1 : 2; LOGV2_DEBUG( 20991, diff --git a/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp b/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp index ff337f50921..daadb0c1444 100644 --- a/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp +++ b/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp @@ -67,7 +67,7 @@ boost::optional<RWConcernDefault> readWriteConcernDefaultsCacheLookupMongoD( } void readWriteConcernDefaultsMongodStartupChecks(OperationContext* opCtx) { - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { DBDirectClient client(opCtx); const auto numPersistedDocuments = client.count(NamespaceString::kConfigSettingsNamespace, diff --git a/src/mongo/db/repl/repl_set_config.cpp b/src/mongo/db/repl/repl_set_config.cpp index 46f96bbbb59..8f0f8a2a083 100644 --- a/src/mongo/db/repl/repl_set_config.cpp +++ b/src/mongo/db/repl/repl_set_config.cpp @@ -171,7 +171,7 @@ Status ReplSetConfig::_initialize(bool forInitiate, // // Initialize configServer // - if (forInitiate && serverGlobalParams.clusterRole == ClusterRole::ConfigServer && + if (forInitiate && serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && !getConfigServer().has_value()) { setConfigServer(true); } @@ -402,7 +402,7 @@ Status ReplSetConfig::_validate(bool allowSplitHorizonIP) const { "servers cannot have a non-zero secondaryDelaySecs"); } } - if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer && + if (!serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && !skipShardingConfigurationChecks) { return Status(ErrorCodes::BadValue, "Nodes being used for config servers must be started with the " @@ -414,7 +414,7 @@ Status ReplSetConfig::_validate(bool allowSplitHorizonIP) const { << " must be true in replica set configurations being " "used for config servers"); } - } else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + } else if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { return Status(ErrorCodes::BadValue, "Nodes started with the --configsvr flag must have configsvr:true in " "their config"); diff --git a/src/mongo/db/repl/replica_set_aware_service.h b/src/mongo/db/repl/replica_set_aware_service.h index f1af24148a4..5192bb79482 100644 --- a/src/mongo/db/repl/replica_set_aware_service.h +++ b/src/mongo/db/repl/replica_set_aware_service.h @@ -291,7 +291,7 @@ template <class ActualService> class ReplicaSetAwareServiceConfigSvr : public ReplicaSetAwareService<ActualService> { private: virtual bool shouldRegisterReplicaSetAwareService() const final { - return serverGlobalParams.clusterRole == ClusterRole::ConfigServer; + return serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer); } }; @@ -303,7 +303,7 @@ template <class ActualService> class ReplicaSetAwareServiceShardSvr : public ReplicaSetAwareService<ActualService> { private: virtual bool shouldRegisterReplicaSetAwareService() const final { - return serverGlobalParams.clusterRole == ClusterRole::ShardServer; + return serverGlobalParams.clusterRole.has(ClusterRole::ShardServer); } }; diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp index fabe5d1f02e..f7d2bc14eac 100644 --- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp @@ -825,14 +825,14 @@ void ReplicationCoordinatorExternalStateImpl::onStepDownHook() { } void ReplicationCoordinatorExternalStateImpl::_shardingOnStepDownHook() { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { PeriodicShardedIndexConsistencyChecker::get(_service).onStepDown(); TransactionCoordinatorService::get(_service)->onStepDown(); } if (ShardingState::get(_service)->enabled()) { CatalogCacheLoader::get(_service).onStepDown(); - if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) { + if (!serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { // Called earlier for config servers. TransactionCoordinatorService::get(_service)->onStepDown(); } @@ -895,7 +895,7 @@ void ReplicationCoordinatorExternalStateImpl::_stopAsyncUpdatesOfAndClearOplogTr void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook( OperationContext* opCtx) { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { Status status = ShardingCatalogManager::get(opCtx)->initializeConfigDatabaseIfNeeded(opCtx); if (!status.isOK() && status != ErrorCodes::AlreadyInitialized) { // If the node is shutting down or it lost quorum just as it was becoming primary, @@ -943,7 +943,7 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook CatalogCacheLoader::get(_service).onStepUp(); } } - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { if (ShardingState::get(opCtx)->enabled()) { VectorClockMutable::get(opCtx)->recoverDirect(opCtx); Status status = ShardingStateRecovery_DEPRECATED::recover(opCtx); @@ -959,7 +959,7 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook CatalogCacheLoader::get(_service).onStepUp(); - if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) { + if (!serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { // Called earlier for config servers. TransactionCoordinatorService::get(_service)->onStepUp(opCtx); } @@ -1047,14 +1047,14 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook } } } - if (serverGlobalParams.clusterRole == ClusterRole::None) { // unsharded + if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { // unsharded if (auto validator = LogicalTimeValidator::get(_service)) { validator->enableKeyGenerator(opCtx, true); } } if (gFeatureFlagCatalogShard.isEnabledAndIgnoreFCV() && - serverGlobalParams.clusterRole == ClusterRole::ConfigServer && + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && !ShardingState::get(opCtx)->enabled()) { // Note this must be called after the config server has created the cluster ID and also // after the onStepUp logic for the shard role because this triggers sharding state @@ -1273,7 +1273,7 @@ void ReplicationCoordinatorExternalStateImpl::setupNoopWriter(Seconds waitTime) bool ReplicationCoordinatorExternalStateImpl::isShardPartOfShardedCluster( OperationContext* opCtx) const { - return serverGlobalParams.clusterRole == ClusterRole::ShardServer && + return serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) && ShardingState::get(opCtx)->enabled(); } diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp index 83be62085c9..8f03d542612 100644 --- a/src/mongo/db/repl/replication_coordinator_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl.cpp @@ -358,7 +358,7 @@ ReplicationCoordinatorImpl::ReplicationCoordinatorImpl( // If this is a config server, then we set the periodic no-op interval to 1 second. This is to // ensure that the config server will not unduly hold up change streams running on the cluster. - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { periodicNoopIntervalSecs.store(1); } @@ -3801,7 +3801,7 @@ Status ReplicationCoordinatorImpl::_doReplSetReconfig(OperationContext* opCtx, // If the new config changes the replica set's implicit default write concern, we fail the // reconfig command. This includes force reconfigs. // The user should set a cluster-wide write concern and attempt the reconfig command again. - if (serverGlobalParams.clusterRole != ClusterRole::ShardServer) { + if (!serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { if (!repl::enableDefaultWriteConcernUpdatesForInitiate.load() && currIDWC != newIDWC && !ReadWriteConcernDefaults::get(opCtx).isCWWCSet(opCtx)) { return Status( @@ -3845,7 +3845,7 @@ Status ReplicationCoordinatorImpl::_doReplSetReconfig(OperationContext* opCtx, // If we are currently using a custom write concern as the default, check that the // corresponding definition still exists in the new config. - if (serverGlobalParams.clusterRole == ClusterRole::None) { + if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { try { const auto rwcDefaults = ReadWriteConcernDefaults::get(opCtx->getServiceContext()).getDefault(opCtx); @@ -5246,7 +5246,7 @@ WriteConcernOptions ReplicationCoordinatorImpl::getGetLastErrorDefault() { Status ReplicationCoordinatorImpl::checkReplEnabledForCommand(BSONObjBuilder* result) { if (!_settings.usingReplSets()) { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { result->append("info", "configsvr"); // for shell prompt } return Status(ErrorCodes::NoReplicationEnabled, "not running with --replSet"); @@ -6342,7 +6342,7 @@ void ReplicationCoordinatorImpl::recordIfCWWCIsSetOnConfigServerOnStartup(Operat } void ReplicationCoordinatorImpl::_validateDefaultWriteConcernOnShardStartup(WithLock lk) const { - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { // Checking whether the shard is part of a sharded cluster or not by checking if CWWC // flag is set as we record it during sharding initialization phase, as on restarting a // shard node for upgrading or any other reason, sharding initialization happens before diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp index 8debebb6ece..183244c15a7 100644 --- a/src/mongo/db/repl/replication_info.cpp +++ b/src/mongo/db/repl/replication_info.cpp @@ -129,7 +129,7 @@ TopologyVersion appendReplicationInfo(OperationContext* opCtx, invariant(helloResponse->getTopologyVersion()); // Only shard servers will respond with the isImplicitDefaultMajorityWC field. - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { result->append(HelloCommandReply::kIsImplicitDefaultMajorityWCFieldName, replCoord->getConfig().isImplicitDefaultWriteConcernMajority()); @@ -480,7 +480,7 @@ public: timerGuard.reset(); // Resume curOp timer. - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { constexpr int kConfigServerModeNumber = 2; result.append(HelloCommandReply::kConfigsvrFieldName, kConfigServerModeNumber); } diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp index a7c6435cfb2..d10a1673061 100644 --- a/src/mongo/db/repl/rollback_impl.cpp +++ b/src/mongo/db/repl/rollback_impl.cpp @@ -958,7 +958,7 @@ Status RollbackImpl::_processRollbackOp(OperationContext* opCtx, const OplogEntr if (opType == OpTypeEnum::kInsert || opType == OpTypeEnum::kInsertGlobalIndexKey) { auto idVal = oplogEntry.getObject().getStringField("_id"); - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer && + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) && opNss == NamespaceString::kServerConfigurationNamespace && idVal == ShardIdentityType::IdName) { // Check if the creation of the shard identity document is being rolled back. @@ -967,7 +967,7 @@ Status RollbackImpl::_processRollbackOp(OperationContext* opCtx, const OplogEntr "Shard identity document rollback detected. oplog op: {oplogEntry}", "Shard identity document rollback detected", "oplogEntry"_attr = redact(oplogEntry.toBSONForLogging())); - } else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer && + } else if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && opNss == VersionType::ConfigNS) { // Check if the creation of the config server config version document is being rolled // back. diff --git a/src/mongo/db/repl/topology_coordinator.cpp b/src/mongo/db/repl/topology_coordinator.cpp index 31050e60c7a..3c48aa8d382 100644 --- a/src/mongo/db/repl/topology_coordinator.cpp +++ b/src/mongo/db/repl/topology_coordinator.cpp @@ -2596,7 +2596,8 @@ MemberState TopologyCoordinator::getMemberState() const { } if (_rsConfig.getConfigServer()) { - if (_options.clusterRole != ClusterRole::ConfigServer && !skipShardingConfigurationChecks) { + if (!_options.clusterRole.has(ClusterRole::ConfigServer) && + !skipShardingConfigurationChecks) { return MemberState::RS_REMOVED; } else { invariant(_storageEngineSupportsReadCommitted != ReadCommittedSupport::kUnknown); @@ -2605,7 +2606,8 @@ MemberState TopologyCoordinator::getMemberState() const { } } } else { - if (_options.clusterRole == ClusterRole::ConfigServer && !skipShardingConfigurationChecks) { + if (_options.clusterRole.has(ClusterRole::ConfigServer) && + !skipShardingConfigurationChecks) { return MemberState::RS_REMOVED; } } diff --git a/src/mongo/db/s/add_shard_cmd.cpp b/src/mongo/db/s/add_shard_cmd.cpp index b5505c4371c..89ee5e5533c 100644 --- a/src/mongo/db/s/add_shard_cmd.cpp +++ b/src/mongo/db/s/add_shard_cmd.cpp @@ -62,7 +62,7 @@ public: void typedRun(OperationContext* opCtx) { uassert(50876, "Cannot run addShard on a node started without --shardsvr", - serverGlobalParams.clusterRole == ClusterRole::ShardServer); + serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); tassert(5624104, "Cannot run addShard on a node that contains customized getLastErrorDefaults, " "which has been deprecated and is now ignored. Use setDefaultRWConcern instead " diff --git a/src/mongo/db/s/analyze_shard_key_cmd.cpp b/src/mongo/db/s/analyze_shard_key_cmd.cpp index a8a10345abd..52ea94b9f3b 100644 --- a/src/mongo/db/s/analyze_shard_key_cmd.cpp +++ b/src/mongo/db/s/analyze_shard_key_cmd.cpp @@ -72,7 +72,7 @@ public: repl::ReplicationCoordinator::get(opCtx)->isReplEnabled()); uassert(ErrorCodes::IllegalOperation, "analyzeShardKey command is not supported on a configsvr mongod", - !serverGlobalParams.clusterRole.isExclusivelyConfigSvrRole()); + !serverGlobalParams.clusterRole.exclusivelyHasConfigRole()); const auto& nss = ns(); const auto& key = request().getKey(); diff --git a/src/mongo/db/s/analyze_shard_key_cmd_util.cpp b/src/mongo/db/s/analyze_shard_key_cmd_util.cpp index 6bcec29207b..60067b75282 100644 --- a/src/mongo/db/s/analyze_shard_key_cmd_util.cpp +++ b/src/mongo/db/s/analyze_shard_key_cmd_util.cpp @@ -149,7 +149,7 @@ void runLocalAggregate(OperationContext* opCtx, void runClusterAggregate(OperationContext* opCtx, AggregateCommandRequest aggRequest, std::function<void(const BSONObj&)> callbackFn) { - invariant(serverGlobalParams.clusterRole == ClusterRole::ShardServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); auto nss = aggRequest.getNamespace(); boost::optional<UUID> collUuid; @@ -211,7 +211,7 @@ void runClusterAggregate(OperationContext* opCtx, void runAggregate(OperationContext* opCtx, AggregateCommandRequest aggRequest, std::function<void(const BSONObj&)> callbackFn) { - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { return runClusterAggregate(opCtx, aggRequest, callbackFn); } return runLocalAggregate(opCtx, aggRequest, callbackFn); @@ -516,7 +516,7 @@ MonotonicityMetrics calculateMonotonicity(OperationContext* opCtx, throw; } - uassert(serverGlobalParams.clusterRole == ClusterRole::ShardServer + uassert(serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) ? ErrorCodes::CollectionIsEmptyLocally : ErrorCodes::IllegalOperation, "Cannot analyze the monotonicity of a shard key for an empty collection", @@ -580,7 +580,7 @@ CollStatsMetrics calculateCollStats(OperationContext* opCtx, const NamespaceStri aggRequest.setReadConcern(extractReadConcern(opCtx)); auto isShardedCollection = [&] { - if (serverGlobalParams.clusterRole.isShardRole()) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { auto cm = uassertStatusOK( Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(opCtx, nss)) .cm; @@ -756,7 +756,7 @@ KeyCharacteristicsMetrics calculateKeyCharacteristicsMetrics(OperationContext* o DBDirectClient client(opCtx); auto doc = client.findOne(nss, {}); - uassert(serverGlobalParams.clusterRole == ClusterRole::ShardServer + uassert(serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) ? ErrorCodes::CollectionIsEmptyLocally : ErrorCodes::IllegalOperation, "Cannot analyze the characteristics of a shard key for an empty collection", diff --git a/src/mongo/db/s/auto_split_vector_command.cpp b/src/mongo/db/s/auto_split_vector_command.cpp index 2fa8ad9c5c3..2c4ea47e8ad 100644 --- a/src/mongo/db/s/auto_split_vector_command.cpp +++ b/src/mongo/db/s/auto_split_vector_command.cpp @@ -70,7 +70,7 @@ public: Response typedRun(OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, "The autoSplitVector command can only be invoked on shards (no CSRS).", - serverGlobalParams.clusterRole == ClusterRole::ShardServer); + serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands()); opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); diff --git a/src/mongo/db/s/clone_catalog_data_command.cpp b/src/mongo/db/s/clone_catalog_data_command.cpp index 6fefadfc3ab..fee1c7a14ce 100644 --- a/src/mongo/db/s/clone_catalog_data_command.cpp +++ b/src/mongo/db/s/clone_catalog_data_command.cpp @@ -96,7 +96,7 @@ public: uassert(ErrorCodes::IllegalOperation, str::stream() << "_shardsvrCloneCatalogData can only be run on shard servers", - serverGlobalParams.clusterRole == ClusterRole::ShardServer); + serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); CommandHelpers::uassertCommandRunWithMajority(getName(), opCtx->getWriteConcern()); diff --git a/src/mongo/db/s/config/configsvr_abort_reshard_collection_command.cpp b/src/mongo/db/s/config/configsvr_abort_reshard_collection_command.cpp index 5471e0b7126..9388b0e735a 100644 --- a/src/mongo/db/s/config/configsvr_abort_reshard_collection_command.cpp +++ b/src/mongo/db/s/config/configsvr_abort_reshard_collection_command.cpp @@ -108,7 +108,7 @@ public: uassert(ErrorCodes::IllegalOperation, "_configsvrAbortReshardCollection can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); diff --git a/src/mongo/db/s/config/configsvr_add_shard_command.cpp b/src/mongo/db/s/config/configsvr_add_shard_command.cpp index 86093b10102..7b14f48bc73 100644 --- a/src/mongo/db/s/config/configsvr_add_shard_command.cpp +++ b/src/mongo/db/s/config/configsvr_add_shard_command.cpp @@ -97,7 +97,7 @@ public: BSONObjBuilder& result) override { uassert(ErrorCodes::IllegalOperation, "_configsvrAddShard can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(getName(), opCtx->getWriteConcern()); // Set the operation context read concern level to local for reads into the config database. diff --git a/src/mongo/db/s/config/configsvr_add_shard_to_zone_command.cpp b/src/mongo/db/s/config/configsvr_add_shard_to_zone_command.cpp index 697ba004878..0fdf8d5bc0b 100644 --- a/src/mongo/db/s/config/configsvr_add_shard_to_zone_command.cpp +++ b/src/mongo/db/s/config/configsvr_add_shard_to_zone_command.cpp @@ -99,7 +99,7 @@ public: BSONObjBuilder& result) override { uassert(ErrorCodes::IllegalOperation, "_configsvrAddShardToZone can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); // Set the operation context read concern level to local for reads into the config database. repl::ReadConcernArgs::get(opCtx) = diff --git a/src/mongo/db/s/config/configsvr_balancer_collection_status_command.cpp b/src/mongo/db/s/config/configsvr_balancer_collection_status_command.cpp index b4ef7e6b4aa..168d8960925 100644 --- a/src/mongo/db/s/config/configsvr_balancer_collection_status_command.cpp +++ b/src/mongo/db/s/config/configsvr_balancer_collection_status_command.cpp @@ -66,7 +66,7 @@ public: uassert(ErrorCodes::IllegalOperation, str::stream() << ConfigsvrBalancerCollectionStatus::kCommandName << " can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); const NamespaceString& nss = ns(); diff --git a/src/mongo/db/s/config/configsvr_cleanup_reshard_collection_command.cpp b/src/mongo/db/s/config/configsvr_cleanup_reshard_collection_command.cpp index b26e3428d5d..bb87c8a8aac 100644 --- a/src/mongo/db/s/config/configsvr_cleanup_reshard_collection_command.cpp +++ b/src/mongo/db/s/config/configsvr_cleanup_reshard_collection_command.cpp @@ -85,7 +85,7 @@ public: uassert(ErrorCodes::IllegalOperation, "_configsvrCleanupReshardCollection can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); repl::ReadConcernArgs::get(opCtx) = repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern); diff --git a/src/mongo/db/s/config/configsvr_clear_jumbo_flag_command.cpp b/src/mongo/db/s/config/configsvr_clear_jumbo_flag_command.cpp index e8b3761e21a..5946e6a3baa 100644 --- a/src/mongo/db/s/config/configsvr_clear_jumbo_flag_command.cpp +++ b/src/mongo/db/s/config/configsvr_clear_jumbo_flag_command.cpp @@ -54,7 +54,7 @@ public: uassert(ErrorCodes::IllegalOperation, "_configsvrClearJumboFlag can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); diff --git a/src/mongo/db/s/config/configsvr_collmod_command.cpp b/src/mongo/db/s/config/configsvr_collmod_command.cpp index 3f6c62eccb0..c87d12874be 100644 --- a/src/mongo/db/s/config/configsvr_collmod_command.cpp +++ b/src/mongo/db/s/config/configsvr_collmod_command.cpp @@ -77,7 +77,7 @@ public: void typedRun(OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, "_configsvrCollMod can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); diff --git a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp index 6950d6c5c6d..fff35354888 100644 --- a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp +++ b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp @@ -118,7 +118,7 @@ public: uassert(ErrorCodes::IllegalOperation, "_configsvrCommitChunkMigration can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); // Set the operation context read concern level to local for reads into the config // database. diff --git a/src/mongo/db/s/config/configsvr_commit_index_command.cpp b/src/mongo/db/s/config/configsvr_commit_index_command.cpp index e0ea3f07812..a08db05ce10 100644 --- a/src/mongo/db/s/config/configsvr_commit_index_command.cpp +++ b/src/mongo/db/s/config/configsvr_commit_index_command.cpp @@ -150,7 +150,7 @@ public: uassert( ErrorCodes::IllegalOperation, format(FMT_STRING("{} can only be run on config servers"), definition()->getName()), - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); diff --git a/src/mongo/db/s/config/configsvr_commit_move_primary_command.cpp b/src/mongo/db/s/config/configsvr_commit_move_primary_command.cpp index a66e8f0e3ff..adb2873fd9c 100644 --- a/src/mongo/db/s/config/configsvr_commit_move_primary_command.cpp +++ b/src/mongo/db/s/config/configsvr_commit_move_primary_command.cpp @@ -50,7 +50,7 @@ public: void typedRun(OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, str::stream() << Request::kCommandName << " can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); diff --git a/src/mongo/db/s/config/configsvr_commit_reshard_collection_command.cpp b/src/mongo/db/s/config/configsvr_commit_reshard_collection_command.cpp index 0fa75ee14b3..0e7fe758552 100644 --- a/src/mongo/db/s/config/configsvr_commit_reshard_collection_command.cpp +++ b/src/mongo/db/s/config/configsvr_commit_reshard_collection_command.cpp @@ -81,7 +81,7 @@ public: uassert( ErrorCodes::IllegalOperation, format(FMT_STRING("{} can only be run on config servers"), definition()->getName()), - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); diff --git a/src/mongo/db/s/config/configsvr_configure_collection_balancing.cpp b/src/mongo/db/s/config/configsvr_configure_collection_balancing.cpp index be0f0fe7a41..18eb9bb7f96 100644 --- a/src/mongo/db/s/config/configsvr_configure_collection_balancing.cpp +++ b/src/mongo/db/s/config/configsvr_configure_collection_balancing.cpp @@ -64,7 +64,7 @@ public: opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); uassert(ErrorCodes::IllegalOperation, str::stream() << Request::kCommandName << " can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); const NamespaceString& nss = ns(); diff --git a/src/mongo/db/s/config/configsvr_control_balancer_command.cpp b/src/mongo/db/s/config/configsvr_control_balancer_command.cpp index 06f2e32d14d..3605499c369 100644 --- a/src/mongo/db/s/config/configsvr_control_balancer_command.cpp +++ b/src/mongo/db/s/config/configsvr_control_balancer_command.cpp @@ -93,7 +93,7 @@ public: uassert(ErrorCodes::IllegalOperation, str::stream() << getName() << " can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); _run(opCtx, &result); diff --git a/src/mongo/db/s/config/configsvr_create_database_command.cpp b/src/mongo/db/s/config/configsvr_create_database_command.cpp index 78bd10b4a08..df184528e0c 100644 --- a/src/mongo/db/s/config/configsvr_create_database_command.cpp +++ b/src/mongo/db/s/config/configsvr_create_database_command.cpp @@ -72,7 +72,7 @@ public: Response typedRun(OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, "_configsvrCreateDatabase can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); diff --git a/src/mongo/db/s/config/configsvr_drop_index_catalog_command.cpp b/src/mongo/db/s/config/configsvr_drop_index_catalog_command.cpp index 879ab1dc50b..85c8122e79f 100644 --- a/src/mongo/db/s/config/configsvr_drop_index_catalog_command.cpp +++ b/src/mongo/db/s/config/configsvr_drop_index_catalog_command.cpp @@ -142,7 +142,7 @@ public: uassert( ErrorCodes::IllegalOperation, format(FMT_STRING("{} can only be run on config servers"), definition()->getName()), - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); diff --git a/src/mongo/db/s/config/configsvr_ensure_chunk_version_is_greater_than_command.cpp b/src/mongo/db/s/config/configsvr_ensure_chunk_version_is_greater_than_command.cpp index 7d2f659703e..959f255500a 100644 --- a/src/mongo/db/s/config/configsvr_ensure_chunk_version_is_greater_than_command.cpp +++ b/src/mongo/db/s/config/configsvr_ensure_chunk_version_is_greater_than_command.cpp @@ -51,7 +51,7 @@ public: void typedRun(OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, "_configsvrEnsureChunkVersionIsGreaterThan can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); ShardingCatalogManager::get(opCtx)->ensureChunkVersionIsGreaterThan( diff --git a/src/mongo/db/s/config/configsvr_get_historical_placement_info.cpp b/src/mongo/db/s/config/configsvr_get_historical_placement_info.cpp index 9fc46c92577..f6672440f6d 100644 --- a/src/mongo/db/s/config/configsvr_get_historical_placement_info.cpp +++ b/src/mongo/db/s/config/configsvr_get_historical_placement_info.cpp @@ -51,7 +51,7 @@ public: uassert(ErrorCodes::IllegalOperation, "_configsvrGetHistoricalPlacement can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); // Set the operation context read concern level to majority for reads into the config // database. diff --git a/src/mongo/db/s/config/configsvr_merge_all_chunks_on_shard_command.cpp b/src/mongo/db/s/config/configsvr_merge_all_chunks_on_shard_command.cpp index aa4b1ca9bd2..d35e28cc1e2 100644 --- a/src/mongo/db/s/config/configsvr_merge_all_chunks_on_shard_command.cpp +++ b/src/mongo/db/s/config/configsvr_merge_all_chunks_on_shard_command.cpp @@ -75,7 +75,7 @@ public: uassert(ErrorCodes::IllegalOperation, str::stream() << Request::kCommandName << " can only be run on the config server", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); // Set read concern level to local for reads into the config database repl::ReadConcernArgs::get(opCtx) = diff --git a/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp b/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp index d237cfd8b4b..bdc4a1b26e2 100644 --- a/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp +++ b/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp @@ -77,7 +77,7 @@ public: ConfigSvrMergeResponse typedRun(OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, "_configsvrCommitChunksMerge can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); uassert(ErrorCodes::InvalidNamespace, "invalid namespace specified for request", ns().isValid()); diff --git a/src/mongo/db/s/config/configsvr_move_range_command.cpp b/src/mongo/db/s/config/configsvr_move_range_command.cpp index 19db7dbe595..e3c08325514 100644 --- a/src/mongo/db/s/config/configsvr_move_range_command.cpp +++ b/src/mongo/db/s/config/configsvr_move_range_command.cpp @@ -67,7 +67,7 @@ public: uassert(ErrorCodes::IllegalOperation, str::stream() << Request::kCommandName << " can only be run on the config server", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); diff --git a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp index d3227d4a917..03a9a24a8c3 100644 --- a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp +++ b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp @@ -58,7 +58,7 @@ public: void typedRun(OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, "_configsvrRefineCollectionShardKey can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); _internalRun(opCtx); diff --git a/src/mongo/db/s/config/configsvr_remove_chunks_command.cpp b/src/mongo/db/s/config/configsvr_remove_chunks_command.cpp index bdd7413fa37..5f6709ae9e5 100644 --- a/src/mongo/db/s/config/configsvr_remove_chunks_command.cpp +++ b/src/mongo/db/s/config/configsvr_remove_chunks_command.cpp @@ -64,7 +64,7 @@ public: uassert(ErrorCodes::IllegalOperation, "_configsvrRemoveChunks can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); diff --git a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp index afde46baca2..4e2ad30912b 100644 --- a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp +++ b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp @@ -100,7 +100,7 @@ public: BSONObjBuilder& result) override { uassert(ErrorCodes::IllegalOperation, "_configsvrRemoveShard can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(getName(), opCtx->getWriteConcern()); ON_BLOCK_EXIT([&opCtx] { diff --git a/src/mongo/db/s/config/configsvr_remove_shard_from_zone_command.cpp b/src/mongo/db/s/config/configsvr_remove_shard_from_zone_command.cpp index 69a50eae5a0..55162a97efc 100644 --- a/src/mongo/db/s/config/configsvr_remove_shard_from_zone_command.cpp +++ b/src/mongo/db/s/config/configsvr_remove_shard_from_zone_command.cpp @@ -102,7 +102,7 @@ public: BSONObjBuilder& result) override { uassert(ErrorCodes::IllegalOperation, "_configsvrRemoveShardFromZone can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); // Set the operation context read concern level to local for reads into the config database. repl::ReadConcernArgs::get(opCtx) = diff --git a/src/mongo/db/s/config/configsvr_remove_tags_command.cpp b/src/mongo/db/s/config/configsvr_remove_tags_command.cpp index 2aecc09dbd6..8f5fd5f7ec3 100644 --- a/src/mongo/db/s/config/configsvr_remove_tags_command.cpp +++ b/src/mongo/db/s/config/configsvr_remove_tags_command.cpp @@ -64,7 +64,7 @@ public: uassert(ErrorCodes::IllegalOperation, "_configsvrRemoveTags can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); diff --git a/src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp b/src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp index ac0bf36615f..6209d5992d0 100644 --- a/src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp +++ b/src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp @@ -79,7 +79,7 @@ public: void typedRun(OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, "_configsvrRenameCollectionMetadata can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); diff --git a/src/mongo/db/s/config/configsvr_repair_sharded_collection_chunks_history_command.cpp b/src/mongo/db/s/config/configsvr_repair_sharded_collection_chunks_history_command.cpp index 1524ec36138..41809d3744c 100644 --- a/src/mongo/db/s/config/configsvr_repair_sharded_collection_chunks_history_command.cpp +++ b/src/mongo/db/s/config/configsvr_repair_sharded_collection_chunks_history_command.cpp @@ -90,7 +90,7 @@ public: BSONObjBuilder& result) override { uassert(ErrorCodes::IllegalOperation, "_configsvrRepairShardedCollectionChunksHistory can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); // Set the operation context read concern level to local for reads into the config database. repl::ReadConcernArgs::get(opCtx) = diff --git a/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp b/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp index 0dbbcf2b6e0..0e9e1f82e9e 100644 --- a/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp +++ b/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp @@ -70,7 +70,7 @@ public: void typedRun(OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, "_configsvrReshardCollection can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); repl::ReadConcernArgs::get(opCtx) = diff --git a/src/mongo/db/s/config/configsvr_set_allow_migrations_command.cpp b/src/mongo/db/s/config/configsvr_set_allow_migrations_command.cpp index 628b2a837d3..8d6ade7eeb4 100644 --- a/src/mongo/db/s/config/configsvr_set_allow_migrations_command.cpp +++ b/src/mongo/db/s/config/configsvr_set_allow_migrations_command.cpp @@ -57,7 +57,7 @@ public: uassert(ErrorCodes::IllegalOperation, "_configsvrSetAllowMigrations can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); diff --git a/src/mongo/db/s/config/configsvr_set_cluster_parameter_command.cpp b/src/mongo/db/s/config/configsvr_set_cluster_parameter_command.cpp index 08834bf9f67..f87cfff653c 100644 --- a/src/mongo/db/s/config/configsvr_set_cluster_parameter_command.cpp +++ b/src/mongo/db/s/config/configsvr_set_cluster_parameter_command.cpp @@ -59,7 +59,7 @@ public: void typedRun(OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, str::stream() << Request::kCommandName << " can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); const auto coordinatorCompletionFuture = [&]() -> SharedSemiFuture<void> { std::unique_ptr<ServerParameterService> sps = diff --git a/src/mongo/db/s/config/configsvr_set_user_write_block_mode_command.cpp b/src/mongo/db/s/config/configsvr_set_user_write_block_mode_command.cpp index 8d18a4db889..395c646bf12 100644 --- a/src/mongo/db/s/config/configsvr_set_user_write_block_mode_command.cpp +++ b/src/mongo/db/s/config/configsvr_set_user_write_block_mode_command.cpp @@ -56,7 +56,7 @@ public: void typedRun(OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, str::stream() << Request::kCommandName << " can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); diff --git a/src/mongo/db/s/config/configsvr_split_chunk_command.cpp b/src/mongo/db/s/config/configsvr_split_chunk_command.cpp index 48eb421a579..9244dd07a71 100644 --- a/src/mongo/db/s/config/configsvr_split_chunk_command.cpp +++ b/src/mongo/db/s/config/configsvr_split_chunk_command.cpp @@ -116,7 +116,7 @@ public: BSONObjBuilder& result) override { uassert(ErrorCodes::IllegalOperation, "_configsvrCommitChunkSplit can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); // Set the operation context read concern level to local for reads into the config database. repl::ReadConcernArgs::get(opCtx) = diff --git a/src/mongo/db/s/config/configsvr_transition_to_catalog_shard_command.cpp b/src/mongo/db/s/config/configsvr_transition_to_catalog_shard_command.cpp index 1ba9052a695..ec1bc451945 100644 --- a/src/mongo/db/s/config/configsvr_transition_to_catalog_shard_command.cpp +++ b/src/mongo/db/s/config/configsvr_transition_to_catalog_shard_command.cpp @@ -78,7 +78,7 @@ public: uassert(ErrorCodes::IllegalOperation, "_configsvrTransitionToCatalogShard can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); diff --git a/src/mongo/db/s/config/configsvr_transition_to_dedicated_config_server_command.cpp b/src/mongo/db/s/config/configsvr_transition_to_dedicated_config_server_command.cpp index 81ec751cc00..c3227aa4889 100644 --- a/src/mongo/db/s/config/configsvr_transition_to_dedicated_config_server_command.cpp +++ b/src/mongo/db/s/config/configsvr_transition_to_dedicated_config_server_command.cpp @@ -97,7 +97,7 @@ public: uassert(ErrorCodes::IllegalOperation, "_configsvrTransitionToDedicatedConfigServer can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(getName(), opCtx->getWriteConcern()); auto shardingState = ShardingState::get(opCtx); diff --git a/src/mongo/db/s/config/configsvr_update_zone_key_range_command.cpp b/src/mongo/db/s/config/configsvr_update_zone_key_range_command.cpp index 11e8ebf7ed9..92ae02ef6b6 100644 --- a/src/mongo/db/s/config/configsvr_update_zone_key_range_command.cpp +++ b/src/mongo/db/s/config/configsvr_update_zone_key_range_command.cpp @@ -104,7 +104,7 @@ public: BSONObjBuilder& result) override { uassert(ErrorCodes::IllegalOperation, "_configsvrAssignKeyRangeToZone can only be run on config servers", - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); // Set the operation context read concern level to local for reads into the config database. repl::ReadConcernArgs::get(opCtx) = diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp index 8d49011b717..db1b0332d95 100644 --- a/src/mongo/db/s/config/initial_split_policy.cpp +++ b/src/mongo/db/s/config/initial_split_policy.cpp @@ -777,8 +777,8 @@ SamplingBasedSplitPolicy::_makePipelineDocumentSource(OperationContext* opCtx, resolvedNamespaces[ns.coll()] = {ns, std::vector<BSONObj>{}}; auto pi = [&]() -> std::shared_ptr<MongoProcessInterface> { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer || - serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) || + serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { // For the pipeline to be dispatched to shards, the ShardServerProcessInterface must be // used. However, the generic factory would only return a ShardServerProcessInterface // if the mongod is a shardsvr and the connection is internal. That is, if the mongod is diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp index c2eb7ee7f2d..aad9a5abcdd 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp @@ -489,7 +489,7 @@ void ShardingCatalogManager::create(ServiceContext* serviceContext, std::unique_ptr<executor::TaskExecutor> addShardExecutor, std::shared_ptr<Shard> localConfigShard, std::unique_ptr<ShardingCatalogClient> localCatalogClient) { - invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); auto& shardingCatalogManager = getShardingCatalogManager(serviceContext); invariant(!shardingCatalogManager); @@ -603,12 +603,12 @@ Status ShardingCatalogManager::upgradeConfigSettings(OperationContext* opCtx) { } ShardingCatalogClient* ShardingCatalogManager::localCatalogClient() { - invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); return _localCatalogClient.get(); } const std::shared_ptr<Shard>& ShardingCatalogManager::localConfigShard() { - invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); return _localConfigShard; } diff --git a/src/mongo/db/s/configure_query_analyzer_cmd.cpp b/src/mongo/db/s/configure_query_analyzer_cmd.cpp index ab918556719..77538b5f1de 100644 --- a/src/mongo/db/s/configure_query_analyzer_cmd.cpp +++ b/src/mongo/db/s/configure_query_analyzer_cmd.cpp @@ -123,7 +123,7 @@ StatusWith<UUID> validateCollectionOptionsOnPrimaryShard(OperationContext* opCtx } StatusWith<UUID> validateCollectionOptions(OperationContext* opCtx, const NamespaceString& nss) { - if (serverGlobalParams.clusterRole == ClusterRole::None) { + if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { return validateCollectionOptionsLocally(opCtx, nss); } return validateCollectionOptionsOnPrimaryShard(opCtx, nss); @@ -147,7 +147,7 @@ public: !gMultitenancySupport); uassert(ErrorCodes::IllegalOperation, "configQueryAnalyzer command is not supported on a shardsvr mongod", - !serverGlobalParams.clusterRole.isExclusivelyShardRole()); + !serverGlobalParams.clusterRole.exclusivelyHasShardRole()); const auto& nss = ns(); const auto mode = request().getMode(); diff --git a/src/mongo/db/s/database_sharding_state.cpp b/src/mongo/db/s/database_sharding_state.cpp index b53c0f81bce..446ea0f93ff 100644 --- a/src/mongo/db/s/database_sharding_state.cpp +++ b/src/mongo/db/s/database_sharding_state.cpp @@ -198,7 +198,7 @@ void DatabaseShardingState::assertIsPrimaryShardForDb(OperationContext* opCtx, if (dbName == DatabaseName::kConfig || dbName == DatabaseName::kAdmin) { uassert(7393700, "The config server is the primary shard for database: {}"_format(dbName.toString()), - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); return; } diff --git a/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.cpp b/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.cpp index ca90eef2efc..57296b406a4 100644 --- a/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.cpp +++ b/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.cpp @@ -74,7 +74,7 @@ void fetchSplitPoints(OperationContext* opCtx, auto readConcern = repl::ReadConcernArgs(LogicalTime{splitPointsAfterClusterTime}, repl::ReadConcernLevel::kLocalReadConcern); - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { uassert(ErrorCodes::InvalidOptions, "The id of the shard that contains the temporary collection storing the split " "points for the shard key must be specified when running on a sharded cluster", diff --git a/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.h b/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.h index ef8e950eaa2..4a70b8dd341 100644 --- a/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.h +++ b/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.h @@ -53,7 +53,7 @@ public: !gMultitenancySupport); uassert(ErrorCodes::IllegalOperation, str::stream() << kStageName << " is not supported on a configsvr mongod", - !serverGlobalParams.clusterRole.isExclusivelyConfigSvrRole()); + !serverGlobalParams.clusterRole.exclusivelyHasConfigRole()); uassert(6875700, str::stream() << kStageName << " must take a nested object but found: " << specElem, diff --git a/src/mongo/db/s/get_database_version_command.cpp b/src/mongo/db/s/get_database_version_command.cpp index f47466b361f..af93287ca53 100644 --- a/src/mongo/db/s/get_database_version_command.cpp +++ b/src/mongo/db/s/get_database_version_command.cpp @@ -78,7 +78,7 @@ public: void run(OperationContext* opCtx, rpc::ReplyBuilderInterface* result) override { uassert(ErrorCodes::IllegalOperation, str::stream() << definition()->getName() << " can only be run on shard servers", - serverGlobalParams.clusterRole == ClusterRole::ShardServer); + serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); DatabaseName dbName(boost::none, _targetDb()); AutoGetDb autoDb(opCtx, dbName, MODE_IS); diff --git a/src/mongo/db/s/global_user_write_block_state.cpp b/src/mongo/db/s/global_user_write_block_state.cpp index 6f5fce35d5a..79bd254d7dc 100644 --- a/src/mongo/db/s/global_user_write_block_state.cpp +++ b/src/mongo/db/s/global_user_write_block_state.cpp @@ -83,7 +83,7 @@ void GlobalUserWriteBlockState::disableUserShardedDDLBlocking(OperationContext* void GlobalUserWriteBlockState::checkShardedDDLAllowedToStart(OperationContext* opCtx, const NamespaceString& nss) const { - invariant(serverGlobalParams.clusterRole == ClusterRole::ShardServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); uassert(ErrorCodes::UserWritesBlocked, "User writes blocked", !_userShardedDDLBlocked.load() || diff --git a/src/mongo/db/s/query_analysis_coordinator.cpp b/src/mongo/db/s/query_analysis_coordinator.cpp index 303eb656e19..298b6139145 100644 --- a/src/mongo/db/s/query_analysis_coordinator.cpp +++ b/src/mongo/db/s/query_analysis_coordinator.cpp @@ -142,7 +142,7 @@ void QueryAnalysisCoordinator::Sampler::resetLastNumQueriesExecutedPerSecond() { } void QueryAnalysisCoordinator::onSamplerInsert(const BSONObj& doc) { - invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); stdx::lock_guard<Latch> lk(_mutex); auto mongosDoc = uassertStatusOK(MongosType::fromBSON(doc)); @@ -154,7 +154,7 @@ void QueryAnalysisCoordinator::onSamplerInsert(const BSONObj& doc) { } void QueryAnalysisCoordinator::onSamplerUpdate(const BSONObj& doc) { - invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); stdx::lock_guard<Latch> lk(_mutex); auto mongosDoc = uassertStatusOK(MongosType::fromBSON(doc)); @@ -168,7 +168,7 @@ void QueryAnalysisCoordinator::onSamplerUpdate(const BSONObj& doc) { } void QueryAnalysisCoordinator::onSamplerDelete(const BSONObj& doc) { - invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); stdx::lock_guard<Latch> lk(_mutex); auto mongosDoc = uassertStatusOK(MongosType::fromBSON(doc)); @@ -199,7 +199,7 @@ void QueryAnalysisCoordinator::onStartup(OperationContext* opCtx) { } } - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { invariant(_samplers.empty()); auto minPingTime = _getMinLastPingTime(); @@ -216,7 +216,7 @@ void QueryAnalysisCoordinator::onStartup(OperationContext* opCtx) { } void QueryAnalysisCoordinator::onSetCurrentConfig(OperationContext* opCtx) { - if (serverGlobalParams.clusterRole == ClusterRole::None) { + if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { stdx::lock_guard<Latch> lk(_mutex); StringMap<Sampler> samplers; diff --git a/src/mongo/db/s/query_analysis_op_observer.cpp b/src/mongo/db/s/query_analysis_op_observer.cpp index fe039093307..6e292ec9ba1 100644 --- a/src/mongo/db/s/query_analysis_op_observer.cpp +++ b/src/mongo/db/s/query_analysis_op_observer.cpp @@ -62,7 +62,7 @@ void QueryAnalysisOpObserver::onInserts(OperationContext* opCtx, insertedDoc); }); } - } else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer && + } else if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && coll->ns() == MongosType::ConfigNS) { for (auto it = begin; it != end; ++it) { const auto& insertedDoc = it->doc; @@ -85,7 +85,7 @@ void QueryAnalysisOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdat analyze_shard_key::QueryAnalysisCoordinator::get(opCtx)->onConfigurationUpdate( updatedDoc); }); - } else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer && + } else if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && args.coll->ns() == MongosType::ConfigNS) { const auto& updatedDoc = args.updateArgs->updatedDoc; opCtx->recoveryUnit()->onCommit( @@ -131,7 +131,7 @@ void QueryAnalysisOpObserver::onDelete(OperationContext* opCtx, boost::optional<Timestamp>) { analyze_shard_key::QueryAnalysisCoordinator::get(opCtx)->onConfigurationDelete(doc); }); - } else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer && + } else if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && coll->ns() == MongosType::ConfigNS) { auto& doc = docToDeleteDecoration(opCtx); invariant(!doc.isEmpty()); diff --git a/src/mongo/db/s/refresh_query_analyzer_configuration_cmd.cpp b/src/mongo/db/s/refresh_query_analyzer_configuration_cmd.cpp index 7b2a39c871c..0efae360f48 100644 --- a/src/mongo/db/s/refresh_query_analyzer_configuration_cmd.cpp +++ b/src/mongo/db/s/refresh_query_analyzer_configuration_cmd.cpp @@ -66,7 +66,7 @@ public: uassert( ErrorCodes::IllegalOperation, "_refreshQueryAnalyzerConfiguration command is not supported on a shardsvr mongod", - !serverGlobalParams.clusterRole.isExclusivelyShardRole()); + !serverGlobalParams.clusterRole.exclusivelyHasShardRole()); auto coodinator = analyze_shard_key::QueryAnalysisCoordinator::get(opCtx); auto configurations = coodinator->getNewConfigurationsForSampler( diff --git a/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp b/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp index 1fe78a5b943..fad84d1a4e8 100644 --- a/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp +++ b/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp @@ -180,7 +180,7 @@ ReshardingCoordinatorCleaner::ReshardingCoordinatorCleaner(NamespaceString nss, : ReshardingCleaner(NamespaceString::kConfigReshardingOperationsNamespace, std::move(nss), std::move(reshardingUUID)) { - invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); } void ReshardingCoordinatorCleaner::_doClean(OperationContext* opCtx, @@ -261,14 +261,14 @@ ReshardingDonorCleaner::ReshardingDonorCleaner(NamespaceString nss, UUID reshard : ReshardingCleaner(NamespaceString::kDonorReshardingOperationsNamespace, std::move(nss), std::move(reshardingUUID)) { - invariant(serverGlobalParams.clusterRole == ClusterRole::ShardServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); } ReshardingRecipientCleaner::ReshardingRecipientCleaner(NamespaceString nss, UUID reshardingUUID) : ReshardingCleaner(NamespaceString::kRecipientReshardingOperationsNamespace, std::move(nss), std::move(reshardingUUID)) { - invariant(serverGlobalParams.clusterRole == ClusterRole::ShardServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); } void ReshardingRecipientCleaner::_doClean(OperationContext* opCtx, diff --git a/src/mongo/db/s/shard_local.cpp b/src/mongo/db/s/shard_local.cpp index d7a89b5bbb2..51020d82543 100644 --- a/src/mongo/db/s/shard_local.cpp +++ b/src/mongo/db/s/shard_local.cpp @@ -54,7 +54,7 @@ namespace mongo { ShardLocal::ShardLocal(const ShardId& id) : Shard(id) { // Currently ShardLocal only works for config servers. If we ever start using ShardLocal on // shards we'll need to consider how to handle shards. - invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); } ConnectionString ShardLocal::getConnString() const { diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp index 2f3f83161e6..d66ea920221 100644 --- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp +++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp @@ -296,7 +296,7 @@ StatusWith<CollectionAndChangedChunks> getIncompletePersistedMetadataSinceVersio } ShardId getSelfShardId(OperationContext* opCtx) { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { return ShardId::kConfigServerId; } @@ -353,7 +353,7 @@ void forcePrimaryDatabaseRefreshAndWaitForReplication(OperationContext* opCtx, S // TODO: SERVER-74105 remove bool shouldSkipStoringLocally() { // Note: cannot use isExclusivelyConfigSvrRole as it ignores fcv. - return serverGlobalParams.clusterRole == ClusterRole::ConfigServer && + return serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && !gFeatureFlagCatalogShard.isEnabled(serverGlobalParams.featureCompatibility); } diff --git a/src/mongo/db/s/sharded_index_consistency_server_status.cpp b/src/mongo/db/s/sharded_index_consistency_server_status.cpp index 876427b1525..4ee7873755b 100644 --- a/src/mongo/db/s/sharded_index_consistency_server_status.cpp +++ b/src/mongo/db/s/sharded_index_consistency_server_status.cpp @@ -37,7 +37,7 @@ namespace mongo { namespace { bool isConfigServerWithShardedIndexConsistencyCheckEnabled() { - return serverGlobalParams.clusterRole == ClusterRole::ConfigServer && + return serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && enableShardedIndexConsistencyCheck.load(); } diff --git a/src/mongo/db/s/sharding_ddl_util.cpp b/src/mongo/db/s/sharding_ddl_util.cpp index cf7b85a2d1c..8d8d9c08750 100644 --- a/src/mongo/db/s/sharding_ddl_util.cpp +++ b/src/mongo/db/s/sharding_ddl_util.cpp @@ -121,7 +121,7 @@ void runTransactionOnShardingCatalog(OperationContext* opCtx, // Instantiate the right custom TXN client to ensure that the queries to the config DB will be // routed to the CSRS. auto customTxnClient = [&]() -> std::unique_ptr<txn_api::TransactionClient> { - if (serverGlobalParams.clusterRole.isExclusivelyShardRole()) { + if (serverGlobalParams.clusterRole.exclusivelyHasShardRole()) { return std::make_unique<txn_api::details::SEPTransactionClient>( opCtx, executor, @@ -129,7 +129,7 @@ void runTransactionOnShardingCatalog(OperationContext* opCtx, opCtx->getServiceContext())); } - invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); return nullptr; }(); @@ -552,7 +552,7 @@ bool removeCollAndChunksMetadataFromConfig_notIdempotent(OperationContext* opCtx ShardingCatalogClient* catalogClient, const NamespaceString& nss, const WriteConcernOptions& writeConcern) { - invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); IgnoreAPIParametersBlock ignoreApiParametersBlock(opCtx); ON_BLOCK_EXIT( @@ -576,7 +576,7 @@ void shardedRenameMetadata(OperationContext* opCtx, CollectionType& fromCollType, const NamespaceString& toNss, const WriteConcernOptions& writeConcern) { - invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); auto fromNss = fromCollType.getNss(); auto fromUUID = fromCollType.getUuid(); diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp index 7d01c558fef..567eebd4bf3 100644 --- a/src/mongo/db/s/sharding_initialization_mongod.cpp +++ b/src/mongo/db/s/sharding_initialization_mongod.cpp @@ -316,7 +316,7 @@ bool ShardingInitializationMongoD::initializeShardingAwarenessIfNeeded(Operation // In sharded queryableBackupMode mode, we ignore the shardIdentity document on disk and instead // *require* a shardIdentity document to be passed through --overrideShardIdentity if (storageGlobalParams.queryableBackupMode) { - if (serverGlobalParams.clusterRole.isExclusivelyShardRole()) { + if (serverGlobalParams.clusterRole.exclusivelyHasShardRole()) { uassert(ErrorCodes::InvalidOptions, "If started with --shardsvr in queryableBackupMode, a shardIdentity document " "must be provided through --overrideShardIdentity", @@ -369,9 +369,9 @@ bool ShardingInitializationMongoD::initializeShardingAwarenessIfNeeded(Operation shardIdentityBSON); }(); - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { if (!foundShardIdentity) { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { LOGV2_WARNING(7445900, "Started with ShardServer role, but no shardIdentity document was " "found on disk.", @@ -415,7 +415,7 @@ bool ShardingInitializationMongoD::initializeShardingAwarenessIfNeeded(Operation void ShardingInitializationMongoD::initializeFromShardIdentity( OperationContext* opCtx, const ShardIdentityType& shardIdentity) { - invariant(serverGlobalParams.clusterRole == ClusterRole::ShardServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); invariant(opCtx->lockState()->isLocked()); uassertStatusOKWithContext( @@ -445,7 +445,7 @@ void ShardingInitializationMongoD::initializeFromShardIdentity( shardingState->clusterId() == shardIdentity.getClusterId()); // If run on a config server, we may not know our connection string yet. - if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) { + if (!serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { auto prevConfigsvrConnStr = shardRegistry->getConfigServerConnectionString(); uassert( 40373, @@ -475,7 +475,7 @@ void ShardingInitializationMongoD::initializeFromShardIdentity( shardingState->setInitialized(ex.toStatus()); } - if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) { + if (!serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { Grid::get(opCtx)->setShardingInitialized(); } else { // A config server always initializes sharding at startup. @@ -529,7 +529,7 @@ void ShardingInitializationMongoD::updateShardIdentityConfigString( } void ShardingInitializationMongoD::onSetCurrentConfig(OperationContext* opCtx) { - if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer || + if (!serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) || !gFeatureFlagCatalogShard.isEnabledAndIgnoreFCV()) { // Only config servers capable of acting as a shard set up the config shard in their shard // registry with a real connection string. @@ -542,7 +542,7 @@ void ShardingInitializationMongoD::onSetCurrentConfig(OperationContext* opCtx) { void ShardingInitializationMongoD::onInitialDataAvailable(OperationContext* opCtx, bool isMajorityDataAvailable) { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { initializeGlobalShardingStateForConfigServerIfNeeded(opCtx); } @@ -656,7 +656,7 @@ void initializeGlobalShardingStateForMongoD(OperationContext* opCtx, auto initKeysClient = [service](ShardingCatalogClient* catalogClient) -> std::unique_ptr<KeysCollectionClient> { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { // The direct keys client must use local read concern if the storage engine can't // support majority read concern. bool keysClientMustUseLocalReads = @@ -677,7 +677,7 @@ void initializeGlobalShardingStateForMongoD(OperationContext* opCtx, initKeysClient)); auto const replCoord = repl::ReplicationCoordinator::get(service); - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer && + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && replCoord->getMemberState().primary()) { LogicalTimeValidator::get(opCtx)->enableKeyGenerator(opCtx, true); } @@ -702,7 +702,7 @@ void ShardingInitializationMongoD::_initializeShardingEnvironmentOnShardServer( bool isStandaloneOrPrimary = !isReplSet || (replCoord->getMemberState() == repl::MemberState::RS_PRIMARY); - if (serverGlobalParams.clusterRole.isExclusivelyShardRole()) { + if (serverGlobalParams.clusterRole.exclusivelyHasShardRole()) { // A config server added as a shard would have already set this up at startup. if (storageGlobalParams.queryableBackupMode) { CatalogCacheLoader::set(service, std::make_unique<ReadOnlyCatalogCacheLoader>()); @@ -733,7 +733,7 @@ void initializeShardingAwarenessIfNeededAndLoadGlobalSettings(OperationContext* ShardingInitializationMongoD::get(opCtx)->initializeShardingAwarenessIfNeeded(opCtx); if (shardingInitialized) { // Config servers can't always perform remote reads here, so they use a local client. - auto catalogClient = serverGlobalParams.clusterRole == ClusterRole::ConfigServer + auto catalogClient = serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) ? ShardingCatalogManager::get(opCtx)->localCatalogClient() : Grid::get(opCtx)->catalogClient(); auto status = loadGlobalSettingsFromConfigServer(opCtx, catalogClient); diff --git a/src/mongo/db/s/sharding_logging.cpp b/src/mongo/db/s/sharding_logging.cpp index 01c3266d1b0..872f0d1f7f1 100644 --- a/src/mongo/db/s/sharding_logging.cpp +++ b/src/mongo/db/s/sharding_logging.cpp @@ -111,7 +111,7 @@ Status ShardingLogging::logChangeChecked(OperationContext* opCtx, const WriteConcernOptions& writeConcern, std::shared_ptr<Shard> configShard, ShardingCatalogClient* catalogClient) { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { // If we're using a non-majority write concern, we should have provided an overriden // configShard and catalogClient to perform local operations. invariant(writeConcern.isMajority() || (configShard && catalogClient)); @@ -163,7 +163,7 @@ Status ShardingLogging::_log(OperationContext* opCtx, ChangeLogType changeLog; changeLog.setChangeId(changeId); changeLog.setServer(serverName); - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { changeLog.setShard("config"); } else { auto shardingState = ShardingState::get(opCtx); diff --git a/src/mongo/db/s/sharding_mongod_test_fixture.cpp b/src/mongo/db/s/sharding_mongod_test_fixture.cpp index 2756f256952..76dca0f5e14 100644 --- a/src/mongo/db/s/sharding_mongod_test_fixture.cpp +++ b/src/mongo/db/s/sharding_mongod_test_fixture.cpp @@ -152,7 +152,7 @@ std::unique_ptr<ShardRegistry> ShardingMongodTestFixture::makeShardRegistry( {ConnectionString::ConnectionType::kStandalone, std::move(standaloneBuilder)}}; // Only config servers use ShardLocal for now. - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { ShardFactory::BuilderCallable localBuilder = [](const ShardId& shardId, const ConnectionString& connStr) { return std::make_unique<ShardLocal>(shardId); @@ -184,8 +184,8 @@ std::unique_ptr<CatalogCache> ShardingMongodTestFixture::makeCatalogCache() { Status ShardingMongodTestFixture::initializeGlobalShardingStateForMongodForTest( const ConnectionString& configConnStr) { - invariant(serverGlobalParams.clusterRole == ClusterRole::ShardServer || - serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) || + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); // Create and initialize each sharding component individually before moving them to the Grid // in order to control the order of initialization, since some components depend on others. diff --git a/src/mongo/db/s/sharding_server_status.cpp b/src/mongo/db/s/sharding_server_status.cpp index c906068e87e..ccd2ec782a7 100644 --- a/src/mongo/db/s/sharding_server_status.cpp +++ b/src/mongo/db/s/sharding_server_status.cpp @@ -145,7 +145,7 @@ public: // To calculate the number of sharded collection we simply get the number of records from // `config.collections` collection. This count must only be appended when serverStatus is // invoked on the config server. - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { AutoGetCollectionForReadLockFree autoColl(opCtx, CollectionType::ConfigNS); const auto& collection = autoColl.getCollection(); const auto numShardedCollections = collection ? collection->numRecords(opCtx) : 0; diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp index 7ffcbcd2ff2..77bb96d16f0 100644 --- a/src/mongo/db/s/sharding_state.cpp +++ b/src/mongo/db/s/sharding_state.cpp @@ -96,7 +96,7 @@ bool ShardingState::enabled() const { } Status ShardingState::canAcceptShardedCommands() const { - if (!serverGlobalParams.clusterRole.isShardRole()) { + if (!serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { return {ErrorCodes::NoShardingEnabled, "Cannot accept sharding commands if node does not have shard role"}; } diff --git a/src/mongo/db/s/sharding_state_recovery.cpp b/src/mongo/db/s/sharding_state_recovery.cpp index fe2a52e098d..0a73b4a91a3 100644 --- a/src/mongo/db/s/sharding_state_recovery.cpp +++ b/src/mongo/db/s/sharding_state_recovery.cpp @@ -264,7 +264,7 @@ Status ShardingStateRecovery_DEPRECATED::recover(OperationContext* opCtx) { // perform majority writes. The write isn't required in this case, since the node must be in a // recent enough version where configTime guarantees are maintained via the vector clock. Status status = Status::OK(); - if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) { + if (!serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { status = ShardingLogging::get(opCtx)->logChangeChecked( opCtx, "Sharding minOpTime recovery", diff --git a/src/mongo/db/s/sharding_util.cpp b/src/mongo/db/s/sharding_util.cpp index 483193445f6..4cf80f20341 100644 --- a/src/mongo/db/s/sharding_util.cpp +++ b/src/mongo/db/s/sharding_util.cpp @@ -215,7 +215,7 @@ Status createIndexOnCollection(OperationContext* opCtx, Status createShardingIndexCatalogIndexes(OperationContext* opCtx) { bool unique = true; NamespaceString indexCatalogNamespace; - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { indexCatalogNamespace = NamespaceString::kConfigsvrIndexCatalogNamespace; } else { indexCatalogNamespace = NamespaceString::kShardIndexCatalogNamespace; diff --git a/src/mongo/db/s/sharding_write_router.cpp b/src/mongo/db/s/sharding_write_router.cpp index b2cbc5c6b11..9cd070cbf31 100644 --- a/src/mongo/db/s/sharding_write_router.cpp +++ b/src/mongo/db/s/sharding_write_router.cpp @@ -34,7 +34,7 @@ namespace mongo { ShardingWriteRouter::ShardingWriteRouter(OperationContext* opCtx, const NamespaceString& nss, CatalogCache* catalogCache) { - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { _scopedCss.emplace(CollectionShardingState::assertCollectionLockedAndAcquire(opCtx, nss)); _collDesc = (*_scopedCss)->getCollectionDescription(opCtx); diff --git a/src/mongo/db/s/shardsvr_abort_reshard_collection_command.cpp b/src/mongo/db/s/shardsvr_abort_reshard_collection_command.cpp index 79361b0797f..d4c3195f3da 100644 --- a/src/mongo/db/s/shardsvr_abort_reshard_collection_command.cpp +++ b/src/mongo/db/s/shardsvr_abort_reshard_collection_command.cpp @@ -62,7 +62,7 @@ public: uassert(ErrorCodes::IllegalOperation, "_shardsvrAbortReshardCollection can only be run on shard servers", - serverGlobalParams.clusterRole == ClusterRole::ShardServer); + serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); diff --git a/src/mongo/db/s/shardsvr_cleanup_reshard_collection_command.cpp b/src/mongo/db/s/shardsvr_cleanup_reshard_collection_command.cpp index 74754350096..a1541d396a6 100644 --- a/src/mongo/db/s/shardsvr_cleanup_reshard_collection_command.cpp +++ b/src/mongo/db/s/shardsvr_cleanup_reshard_collection_command.cpp @@ -59,7 +59,7 @@ public: uassert(ErrorCodes::IllegalOperation, "_shardsvrCleanupReshardCollection can only be run on shard servers", - serverGlobalParams.clusterRole == ClusterRole::ShardServer); + serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); repl::ReadConcernArgs::get(opCtx) = repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern); diff --git a/src/mongo/db/s/shardsvr_commit_reshard_collection_command.cpp b/src/mongo/db/s/shardsvr_commit_reshard_collection_command.cpp index a7cd3bbff19..9f719315717 100644 --- a/src/mongo/db/s/shardsvr_commit_reshard_collection_command.cpp +++ b/src/mongo/db/s/shardsvr_commit_reshard_collection_command.cpp @@ -62,7 +62,7 @@ public: uassert(ErrorCodes::IllegalOperation, "_shardsvrCommitReshardCollection can only be run on shard servers", - serverGlobalParams.clusterRole == ClusterRole::ShardServer); + serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); diff --git a/src/mongo/db/s/shardsvr_notify_sharding_event_command.cpp b/src/mongo/db/s/shardsvr_notify_sharding_event_command.cpp index 4ea92a2a120..8b05305731c 100644 --- a/src/mongo/db/s/shardsvr_notify_sharding_event_command.cpp +++ b/src/mongo/db/s/shardsvr_notify_sharding_event_command.cpp @@ -74,7 +74,7 @@ public: uassert(ErrorCodes::IllegalOperation, "_shardsvrNotifyShardingEvent can only run on shard servers", - serverGlobalParams.clusterRole == ClusterRole::ShardServer); + serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); switch (request().getEventType()) { case EventTypeEnum::kDatabasesAdded: { diff --git a/src/mongo/db/s/shardsvr_set_cluster_parameter_command.cpp b/src/mongo/db/s/shardsvr_set_cluster_parameter_command.cpp index 82aafc2e3c7..d1cb7cc9d42 100644 --- a/src/mongo/db/s/shardsvr_set_cluster_parameter_command.cpp +++ b/src/mongo/db/s/shardsvr_set_cluster_parameter_command.cpp @@ -64,7 +64,7 @@ public: void typedRun(OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, str::stream() << Request::kCommandName << " can only be run on shard servers", - serverGlobalParams.clusterRole == ClusterRole::ShardServer); + serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); diff --git a/src/mongo/db/s/shardsvr_set_user_write_block_mode_command.cpp b/src/mongo/db/s/shardsvr_set_user_write_block_mode_command.cpp index 195eca0c745..c79bb4ded59 100644 --- a/src/mongo/db/s/shardsvr_set_user_write_block_mode_command.cpp +++ b/src/mongo/db/s/shardsvr_set_user_write_block_mode_command.cpp @@ -63,7 +63,7 @@ public: void typedRun(OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, str::stream() << Request::kCommandName << " can only be run on shard servers", - serverGlobalParams.clusterRole == ClusterRole::ShardServer); + serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.cpp b/src/mongo/db/s/transaction_coordinator_futures_util.cpp index d469e6199b7..881c92275c7 100644 --- a/src/mongo/db/s/transaction_coordinator_futures_util.cpp +++ b/src/mongo/db/s/transaction_coordinator_futures_util.cpp @@ -279,10 +279,10 @@ void AsyncWorkScheduler::_notifyAllTasksComplete(WithLock wl) { } ShardId getLocalShardId(ServiceContext* service) { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { return ShardId::kConfigServerId; } - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { return ShardingState::get(service)->shardId(); } diff --git a/src/mongo/db/s/txn_two_phase_commit_cmds.cpp b/src/mongo/db/s/txn_two_phase_commit_cmds.cpp index 19c7d58e58d..3a1030d95c6 100644 --- a/src/mongo/db/s/txn_two_phase_commit_cmds.cpp +++ b/src/mongo/db/s/txn_two_phase_commit_cmds.cpp @@ -87,7 +87,7 @@ public: Response typedRun(OperationContext* opCtx) { if (!getTestCommandsEnabled() && - serverGlobalParams.clusterRole != ClusterRole::ConfigServer) { + !serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands()); } @@ -256,7 +256,7 @@ public: void typedRun(OperationContext* opCtx) { // Only config servers or initialized shard servers can act as transaction coordinators. - if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) { + if (!serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands()); } diff --git a/src/mongo/db/s/type_shard_identity.cpp b/src/mongo/db/s/type_shard_identity.cpp index b3173bee04f..9d650fcd5bb 100644 --- a/src/mongo/db/s/type_shard_identity.cpp +++ b/src/mongo/db/s/type_shard_identity.cpp @@ -75,7 +75,7 @@ Status ShardIdentityType::validate() const { } if (getShardName() == ShardId::kConfigServerId && - serverGlobalParams.clusterRole != ClusterRole::ConfigServer) { + !serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { return { ErrorCodes::UnsupportedFormat, str::stream() @@ -84,7 +84,7 @@ Status ShardIdentityType::validate() const { } if (getShardName() != ShardId::kConfigServerId && - serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { return {ErrorCodes::UnsupportedFormat, str::stream() << "Invalid shard identity document: the shard name for a config " "server cannot be \"" diff --git a/src/mongo/db/s/user_writes_recoverable_critical_section_service.cpp b/src/mongo/db/s/user_writes_recoverable_critical_section_service.cpp index 66b31f3dbfb..9a68cdb7e2b 100644 --- a/src/mongo/db/s/user_writes_recoverable_critical_section_service.cpp +++ b/src/mongo/db/s/user_writes_recoverable_critical_section_service.cpp @@ -172,14 +172,14 @@ const ReplicaSetAwareServiceRegistry::Registerer<UserWritesRecoverableCriticalSe "UserWritesRecoverableCriticalSectionService"); bool UserWritesRecoverableCriticalSectionService::shouldRegisterReplicaSetAwareService() const { - return serverGlobalParams.clusterRole == ClusterRole::None || - serverGlobalParams.clusterRole.isShardRole(); + return serverGlobalParams.clusterRole.has(ClusterRole::None) || + serverGlobalParams.clusterRole.has(ClusterRole::ShardServer); } void UserWritesRecoverableCriticalSectionService:: acquireRecoverableCriticalSectionBlockingUserWrites(OperationContext* opCtx, const NamespaceString& nss) { - invariant(serverGlobalParams.clusterRole == ClusterRole::None, + invariant(serverGlobalParams.clusterRole.has(ClusterRole::None), "Acquiring the user writes recoverable critical section directly to start blocking " "writes is only allowed on non-sharded cluster."); @@ -190,7 +190,7 @@ void UserWritesRecoverableCriticalSectionService:: void UserWritesRecoverableCriticalSectionService:: acquireRecoverableCriticalSectionBlockNewShardedDDL(OperationContext* opCtx, const NamespaceString& nss) { - invariant(serverGlobalParams.clusterRole != ClusterRole::None, + invariant(!serverGlobalParams.clusterRole.has(ClusterRole::None), "Acquiring the user writes recoverable critical section blocking only sharded DDL is " "only allowed on sharded clusters"); @@ -202,7 +202,7 @@ void UserWritesRecoverableCriticalSectionService:: void UserWritesRecoverableCriticalSectionService:: promoteRecoverableCriticalSectionToBlockUserWrites(OperationContext* opCtx, const NamespaceString& nss) { - invariant(serverGlobalParams.clusterRole != ClusterRole::None, + invariant(!serverGlobalParams.clusterRole.has(ClusterRole::None), "Promoting the user writes recoverable critical section to also block user writes is " "only allowed on sharded clusters"); @@ -258,7 +258,7 @@ void UserWritesRecoverableCriticalSectionService:: void UserWritesRecoverableCriticalSectionService:: demoteRecoverableCriticalSectionToNoLongerBlockUserWrites(OperationContext* opCtx, const NamespaceString& nss) { - invariant(serverGlobalParams.clusterRole != ClusterRole::None, + invariant(!serverGlobalParams.clusterRole.has(ClusterRole::None), "Demoting the user writes recoverable critical section to also block user writes is " "only allowed on sharded clusters"); diff --git a/src/mongo/db/server_options_test.cpp b/src/mongo/db/server_options_test.cpp index c2c11ce3e47..5f0b44e9447 100644 --- a/src/mongo/db/server_options_test.cpp +++ b/src/mongo/db/server_options_test.cpp @@ -935,25 +935,22 @@ TEST(SetupOptions, ForkOptionAlwaysFalseWithNoforkEnvVar) { #endif TEST(ClusterRole, Equality) { - ASSERT_TRUE(ClusterRole(ClusterRole::None) == ClusterRole::None); - ASSERT_TRUE(ClusterRole(ClusterRole::None) != ClusterRole::ConfigServer); - ASSERT_TRUE(ClusterRole(ClusterRole::None) != ClusterRole::ShardServer); + ASSERT_TRUE(ClusterRole(ClusterRole::None).has(ClusterRole::None)); + ASSERT_TRUE(!ClusterRole(ClusterRole::None).has(ClusterRole::ConfigServer)); + ASSERT_TRUE(!ClusterRole(ClusterRole::None).has(ClusterRole::ShardServer)); - ASSERT_TRUE(ClusterRole(ClusterRole::ConfigServer) != ClusterRole::None); - ASSERT_TRUE(ClusterRole(ClusterRole::ConfigServer) == ClusterRole::ConfigServer); - ASSERT_TRUE(ClusterRole(ClusterRole::ConfigServer) == ClusterRole::ShardServer); + ASSERT_TRUE(!ClusterRole(ClusterRole::ConfigServer).has(ClusterRole::None)); + ASSERT_TRUE(ClusterRole(ClusterRole::ConfigServer).has(ClusterRole::ConfigServer)); + ASSERT_TRUE(ClusterRole(ClusterRole::ConfigServer).has(ClusterRole::ShardServer)); - ASSERT_TRUE(ClusterRole(ClusterRole::ShardServer) != ClusterRole::None); - ASSERT_TRUE(ClusterRole(ClusterRole::ShardServer) != ClusterRole::ConfigServer); - ASSERT_TRUE(ClusterRole(ClusterRole::ShardServer) == ClusterRole::ShardServer); + ASSERT_TRUE(!ClusterRole(ClusterRole::ShardServer).has(ClusterRole::None)); + ASSERT_TRUE(!ClusterRole(ClusterRole::ShardServer).has(ClusterRole::ConfigServer)); + ASSERT_TRUE(ClusterRole(ClusterRole::ShardServer).has(ClusterRole::ShardServer)); - ASSERT_TRUE(ClusterRole(ClusterRole::ShardServer).isShardRole()); - ASSERT_TRUE(ClusterRole(ClusterRole::ConfigServer).isShardRole()); + ASSERT_TRUE(ClusterRole(ClusterRole::ShardServer).exclusivelyHasShardRole()); + ASSERT_FALSE(ClusterRole(ClusterRole::ConfigServer).exclusivelyHasShardRole()); - ASSERT_TRUE(ClusterRole(ClusterRole::ShardServer).isExclusivelyShardRole()); - ASSERT_FALSE(ClusterRole(ClusterRole::ConfigServer).isExclusivelyShardRole()); - - ASSERT_FALSE(ClusterRole(ClusterRole::ConfigServer).isExclusivelyConfigSvrRole()); + ASSERT_FALSE(ClusterRole(ClusterRole::ConfigServer).exclusivelyHasConfigRole()); } #if !defined(_WIN32) && !(defined(__APPLE__) && TARGET_OS_TV) diff --git a/src/mongo/db/serverless/shard_split_commands.cpp b/src/mongo/db/serverless/shard_split_commands.cpp index b01d3867c08..936f51405d1 100644 --- a/src/mongo/db/serverless/shard_split_commands.cpp +++ b/src/mongo/db/serverless/shard_split_commands.cpp @@ -56,8 +56,8 @@ public: serverGlobalParams.featureCompatibility)); uassert(ErrorCodes::IllegalOperation, "Shard split is not available on config servers", - serverGlobalParams.clusterRole == ClusterRole::None || - serverGlobalParams.clusterRole == ClusterRole::ShardServer); + serverGlobalParams.clusterRole.has(ClusterRole::None) || + serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); uassert(ErrorCodes::CommandNotSupported, "Shard split is only supported in serverless mode", getGlobalReplSettings().isServerless()); diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp index 2c6c3bbd2f1..82415819c54 100644 --- a/src/mongo/db/service_entry_point_common.cpp +++ b/src/mongo/db/service_entry_point_common.cpp @@ -312,8 +312,8 @@ StatusWith<repl::ReadConcernArgs> _extractReadConcern(OperationContext* opCtx, "received command without explicit readConcern on an internalClient connection {}"_format( redact(cmdObj.toString())), readConcernArgs.isSpecified()); - } else if (serverGlobalParams.clusterRole == ClusterRole::ShardServer || - serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + } else if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) || + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { if (!readConcernArgs.isSpecified()) { // TODO: Disabled until after SERVER-44539, to avoid log spam. // LOGV2(21954, "Missing readConcern on {command}", "Missing readConcern " @@ -1161,8 +1161,8 @@ Future<void> CheckoutSessionAndInvokeCommand::_commitInvocation() { // succeeds. _stashTransaction(txnParticipant); - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer || - serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) || + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { auto txnResponseMetadata = txnParticipant.getResponseMetadata(); auto bodyBuilder = replyBuilder->getBodyBuilder(); txnResponseMetadata.serialize(&bodyBuilder); @@ -1361,8 +1361,8 @@ void RunCommandAndWaitForWriteConcern::_setup() { "received command without explicit writeConcern on an internalClient connection {}"_format( redact(request.body.toString())), request.body.hasField(WriteConcernOptions::kWriteConcernField)); - } else if (serverGlobalParams.clusterRole == ClusterRole::ShardServer || - serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + } else if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) || + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { if (!request.body.hasField(WriteConcernOptions::kWriteConcernField)) { // TODO: Disabled until after SERVER-44539, to avoid log spam. // LOGV2(21959, "Missing writeConcern on {command}", "Missing " @@ -1508,8 +1508,8 @@ void ExecCommandDatabase::_initiateCommand() { _scopedMetrics.emplace(opCtx, dbname, collect); const auto allowTransactionsOnConfigDatabase = - (serverGlobalParams.clusterRole == ClusterRole::ConfigServer || - serverGlobalParams.clusterRole == ClusterRole::ShardServer); + (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) || + serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); const auto invocationNss = _invocation->ns(); @@ -1805,8 +1805,7 @@ Future<void> ExecCommandDatabase::_commandExec() { auto opCtx = _execContext->getOpCtx(); if (!opCtx->getClient()->isInDirectClient() && - !serverGlobalParams.clusterRole.isExclusivelyConfigSvrRole() && - !_refreshedDatabase) { + !serverGlobalParams.clusterRole.exclusivelyHasConfigRole() && !_refreshedDatabase) { auto sce = s.extraInfo<StaleDbRoutingVersion>(); invariant(sce); @@ -1837,7 +1836,7 @@ Future<void> ExecCommandDatabase::_commandExec() { ShardingStatistics::get(opCtx).countStaleConfigErrors.addAndFetch(1); if (!opCtx->getClient()->isInDirectClient() && - !serverGlobalParams.clusterRole.isExclusivelyConfigSvrRole() && + !serverGlobalParams.clusterRole.exclusivelyHasConfigRole() && !_refreshedCollection) { if (auto sce = s.extraInfo<StaleConfigInfo>()) { bool inCriticalSection = sce->getCriticalSectionSignal().has_value(); @@ -1885,7 +1884,7 @@ Future<void> ExecCommandDatabase::_commandExec() { .onError<ErrorCodes::ShardCannotRefreshDueToLocksHeld>([this](Status s) -> Future<void> { // This exception can never happen on the config server. Config servers can't receive // SSV either, because they never have commands with shardVersion sent. - invariant(!serverGlobalParams.clusterRole.isExclusivelyConfigSvrRole()); + invariant(!serverGlobalParams.clusterRole.exclusivelyHasConfigRole()); auto opCtx = _execContext->getOpCtx(); if (!opCtx->getClient()->isInDirectClient() && !_refreshedCatalogCache) { diff --git a/src/mongo/db/service_entry_point_mongod.cpp b/src/mongo/db/service_entry_point_mongod.cpp index 58d3f664274..a09e32be199 100644 --- a/src/mongo/db/service_entry_point_mongod.cpp +++ b/src/mongo/db/service_entry_point_mongod.cpp @@ -87,7 +87,7 @@ public: if (!rcStatus.isOK()) { if (ErrorCodes::isExceededTimeLimitError(rcStatus.code())) { const int debugLevel = - serverGlobalParams.clusterRole == ClusterRole::ConfigServer ? 0 : 2; + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) ? 0 : 2; LOGV2_DEBUG(21975, debugLevel, "Command on database {db} timed out waiting for read concern to be " diff --git a/src/mongo/db/stats/storage_stats.cpp b/src/mongo/db/stats/storage_stats.cpp index 950bafb4d7f..bb33926554d 100644 --- a/src/mongo/db/stats/storage_stats.cpp +++ b/src/mongo/db/stats/storage_stats.cpp @@ -112,7 +112,8 @@ void _appendRecordStats(OperationContext* opCtx, } } - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer && !isNamespaceAlwaysUnsharded) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) && + !isNamespaceAlwaysUnsharded) { result->appendNumber( kOrphanCountField, BalancerStatsRegistry::get(opCtx)->getCollNumOrphanDocsFromDiskIfNeeded( diff --git a/src/mongo/db/transaction/transaction_participant.cpp b/src/mongo/db/transaction/transaction_participant.cpp index 5647e9f1694..00e81290f98 100644 --- a/src/mongo/db/transaction/transaction_participant.cpp +++ b/src/mongo/db/transaction/transaction_participant.cpp @@ -710,7 +710,7 @@ bool TransactionParticipant::Participant::_verifyCanBeginMultiDocumentTransactio uassert(ErrorCodes::ConflictingOperationInProgress, "Only servers in a sharded cluster can start a new transaction at the active " "transaction number", - serverGlobalParams.clusterRole != ClusterRole::None); + !serverGlobalParams.clusterRole.has(ClusterRole::None)); if (_isInternalSessionForRetryableWrite() && o().txnState.isInSet(TransactionState::kCommitted)) { @@ -942,7 +942,7 @@ void TransactionParticipant::Participant::beginOrContinue( "Transactions are not allowed on shard servers when " "writeConcernMajorityJournalDefault=false", replCoord->getWriteConcernMajorityShouldJournal() || - serverGlobalParams.clusterRole != ClusterRole::ShardServer || !autocommit || + !serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) || !autocommit || getTestCommandsEnabled()); } @@ -987,7 +987,7 @@ void TransactionParticipant::Participant::beginOrContinue( if (txnNumberAndRetryCounter.getTxnRetryCounter()) { uassert(ErrorCodes::InvalidOptions, "txnRetryCounter is only supported in sharded clusters", - serverGlobalParams.clusterRole != ClusterRole::None); + !serverGlobalParams.clusterRole.has(ClusterRole::None)); invariant(*txnNumberAndRetryCounter.getTxnRetryCounter() >= 0, "Cannot specify a negative txnRetryCounter"); } else { diff --git a/src/mongo/db/vector_clock_mongod.cpp b/src/mongo/db/vector_clock_mongod.cpp index b896fd91f80..bee92fd5d2d 100644 --- a/src/mongo/db/vector_clock_mongod.cpp +++ b/src/mongo/db/vector_clock_mongod.cpp @@ -202,7 +202,7 @@ void VectorClockMongoD::onStepDown() { void VectorClockMongoD::onInitialDataAvailable(OperationContext* opCtx, bool isMajorityDataAvailable) { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { const auto maxTopologyTime{[&opCtx]() -> boost::optional<Timestamp> { DBDirectClient client{opCtx}; FindCommandRequest findRequest{NamespaceString::kConfigsvrShardsNamespace}; @@ -409,8 +409,8 @@ Future<void> VectorClockMongoD::_doWhileQueueNotEmptyOrError(ServiceContext* ser VectorClock::ComponentSet VectorClockMongoD::_gossipOutInternal() const { VectorClock::ComponentSet toGossip{Component::ClusterTime}; - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer || - serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) || + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { toGossip.insert(Component::ConfigTime); toGossip.insert(Component::TopologyTime); } @@ -419,7 +419,7 @@ VectorClock::ComponentSet VectorClockMongoD::_gossipOutInternal() const { VectorClock::ComponentSet VectorClockMongoD::_gossipInInternal() const { VectorClock::ComponentSet toGossip{Component::ClusterTime}; - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { toGossip.insert(Component::ConfigTime); toGossip.insert(Component::TopologyTime); } @@ -463,7 +463,7 @@ void VectorClockMongoD::_tickTo(Component component, LogicalTime newTime) { } if (component == Component::TopologyTime && - serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { _advanceComponentTimeTo(component, std::move(newTime)); return; } diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp index 8febdc098ed..b9d04b112cd 100644 --- a/src/mongo/db/write_concern.cpp +++ b/src/mongo/db/write_concern.cpp @@ -97,7 +97,7 @@ StatusWith<WriteConcernOptions> extractWriteConcern(OperationContext* opCtx, writeConcern = ([&]() { // WriteConcern defaults can only be applied on regular replica set members. Operations // received by shard and config servers should always have WC explicitly specified. - if (serverGlobalParams.clusterRole == ClusterRole::None && + if (serverGlobalParams.clusterRole.has(ClusterRole::None) && repl::ReplicationCoordinator::get(opCtx)->isReplEnabled() && (!opCtx->inMultiDocumentTransaction() || isTransactionCommand(cmdObj.firstElementFieldName())) && diff --git a/src/mongo/s/analyze_shard_key_role.cpp b/src/mongo/s/analyze_shard_key_role.cpp index ba6e8016ab7..188aec06172 100644 --- a/src/mongo/s/analyze_shard_key_role.cpp +++ b/src/mongo/s/analyze_shard_key_role.cpp @@ -65,8 +65,8 @@ bool supportsCoordinatingQueryAnalysis(bool isReplEnabled, bool ignoreFCV) { return false; } return isReplEnabled && !gMultitenancySupport && - (serverGlobalParams.clusterRole == ClusterRole::ConfigServer || - serverGlobalParams.clusterRole == ClusterRole::None); + (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) || + serverGlobalParams.clusterRole.has(ClusterRole::None)); } bool supportsCoordinatingQueryAnalysis(OperationContext* opCtx, bool ignoreFCV) { @@ -81,8 +81,8 @@ bool supportsPersistingSampledQueries(bool isReplEnabled, bool ignoreFCV) { return false; } return isReplEnabled && !gMultitenancySupport && - (serverGlobalParams.clusterRole == ClusterRole::ShardServer || - serverGlobalParams.clusterRole == ClusterRole::None); + (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) || + serverGlobalParams.clusterRole.has(ClusterRole::None)); } bool supportsPersistingSampledQueries(OperationContext* opCtx, bool ignoreFCV) { @@ -97,8 +97,8 @@ bool supportsSamplingQueries(bool isReplEnabled, bool ignoreFCV) { return true; } return isReplEnabled && !gMultitenancySupport && - (serverGlobalParams.clusterRole == ClusterRole::ShardServer || - serverGlobalParams.clusterRole == ClusterRole::None); + (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) || + serverGlobalParams.clusterRole.has(ClusterRole::None)); } bool supportsSamplingQueries(ServiceContext* serviceContext, bool ignoreFCV) { diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp index 5c16c9f65d8..564d68d38ff 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp +++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp @@ -453,7 +453,7 @@ std::vector<BSONObj> ShardingCatalogClientImpl::runCatalogAggregation( aggRequest.setWriteConcern(WriteConcernOptions()); const auto readPref = [&]() -> ReadPreferenceSetting { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer && + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && !gFeatureFlagCatalogShard.isEnabledAndIgnoreFCV()) { // When the feature flag is on, the config server may read from any node in its replica // set, so we should use the typical config server read preference. @@ -468,7 +468,7 @@ std::vector<BSONObj> ShardingCatalogClientImpl::runCatalogAggregation( aggRequest.setUnwrappedReadPref(readPref.toContainingBSON()); - if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) { + if (!serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { // Don't use a timeout on the config server to guarantee it can always refresh. const Milliseconds maxTimeMS = std::min(opCtx->getRemainingMaxTimeMillis(), maxTimeout); aggRequest.setMaxTimeMS(durationCount<Milliseconds>(maxTimeMS)); @@ -682,7 +682,7 @@ StatusWith<std::vector<ChunkType>> ShardingCatalogClientImpl::getChunks( const Timestamp& timestamp, repl::ReadConcernLevel readConcern, const boost::optional<BSONObj>& hint) { - invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer || + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) || readConcern == repl::ReadConcernLevel::kMajorityReadConcern); // Convert boost::optional<int> to boost::optional<long long>. @@ -870,7 +870,7 @@ std::vector<NamespaceString> ShardingCatalogClientImpl::getAllNssThatHaveZonesFo // Run the aggregation const auto readConcern = [&]() -> repl::ReadConcernArgs { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { return {repl::ReadConcernLevel::kMajorityReadConcern}; } else { const auto time = VectorClock::get(opCtx)->getTime(); @@ -1250,7 +1250,7 @@ HistoricalPlacement ShardingCatalogClientImpl::getShardsThatOwnDataForCollAtClus "A full collection namespace must be specified", !collName.coll().empty()); - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { return getHistoricalPlacement(opCtx, clusterTime, collName); } @@ -1265,7 +1265,7 @@ HistoricalPlacement ShardingCatalogClientImpl::getShardsThatOwnDataForDbAtCluste "A full db namespace must be specified", dbName.coll().empty() && !dbName.db().empty()); - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { return getHistoricalPlacement(opCtx, clusterTime, dbName); } @@ -1275,7 +1275,7 @@ HistoricalPlacement ShardingCatalogClientImpl::getShardsThatOwnDataForDbAtCluste HistoricalPlacement ShardingCatalogClientImpl::getShardsThatOwnDataAtClusterTime( OperationContext* opCtx, const Timestamp& clusterTime) { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { return getHistoricalPlacement(opCtx, clusterTime, boost::none); } @@ -1290,7 +1290,7 @@ HistoricalPlacement ShardingCatalogClientImpl::getHistoricalPlacement( const boost::optional<NamespaceString>& nss) { // TODO (SERVER-73029): Remove the invariant - invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); auto configShard = _getConfigShard(opCtx); /* The aggregation pipeline is split in 2 sub pipelines: @@ -1507,7 +1507,7 @@ HistoricalPlacement ShardingCatalogClientImpl::getHistoricalPlacement( // Run the aggregation const auto readConcern = [&]() -> repl::ReadConcernArgs { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer && + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && !gFeatureFlagCatalogShard.isEnabledAndIgnoreFCV()) { // When the feature flag is on, the config server may read from a secondary which may // need to wait for replication, so we should use afterClusterTime. diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp index d6e69d7e204..7712952b0cc 100644 --- a/src/mongo/s/catalog_cache.cpp +++ b/src/mongo/s/catalog_cache.cpp @@ -957,7 +957,7 @@ CatalogCache::IndexCache::LookupResult CatalogCache::IndexCache::_lookupIndexes( "timeInStore"_attr = previousVersion); const auto readConcern = [&]() -> repl::ReadConcernArgs { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer && + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && !gFeatureFlagCatalogShard.isEnabledAndIgnoreFCV()) { // When the feature flag is on, the config server may read from a secondary which // may need to wait for replication, so we should use afterClusterTime. diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp index 3f194e72edb..7f29af4ce65 100644 --- a/src/mongo/s/client/shard_registry.cpp +++ b/src/mongo/s/client/shard_registry.cpp @@ -424,7 +424,7 @@ std::unique_ptr<Shard> ShardRegistry::createConnection(const ConnectionString& c } std::shared_ptr<Shard> ShardRegistry::createLocalConfigShard() const { - invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer); + invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); return _shardFactory->createShard(ShardId::kConfigServerId, ConnectionString::forLocal()); } diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp index aa84b435102..8f7e7443e23 100644 --- a/src/mongo/s/client/shard_remote.cpp +++ b/src/mongo/s/client/shard_remote.cpp @@ -315,7 +315,7 @@ StatusWith<Shard::QueryResponse> ShardRemote::_runExhaustiveCursorCommand( Milliseconds getExhaustiveFindOnConfigMaxTimeMS(OperationContext* opCtx, const NamespaceString& nss) { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { // Don't use a timeout on the config server to guarantee it can always refresh. return Milliseconds::max(); } diff --git a/src/mongo/s/config_server_catalog_cache_loader.cpp b/src/mongo/s/config_server_catalog_cache_loader.cpp index 2c11b6b2c95..948360c0333 100644 --- a/src/mongo/s/config_server_catalog_cache_loader.cpp +++ b/src/mongo/s/config_server_catalog_cache_loader.cpp @@ -51,7 +51,7 @@ CollectionAndChangedChunks getChangedChunks(OperationContext* opCtx, const NamespaceString& nss, ChunkVersion sinceVersion) { const auto readConcern = [&]() -> repl::ReadConcernArgs { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer && + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && !gFeatureFlagCatalogShard.isEnabledAndIgnoreFCV()) { // When the feature flag is on, the config server may read from a secondary which may // need to wait for replication, so we should use afterClusterTime. diff --git a/src/mongo/s/is_mongos.cpp b/src/mongo/s/is_mongos.cpp index 2e3ba77d045..6c85241db18 100644 --- a/src/mongo/s/is_mongos.cpp +++ b/src/mongo/s/is_mongos.cpp @@ -47,7 +47,7 @@ void setMongos(const bool state) { } bool isClusterNode() { - return serverGlobalParams.clusterRole != ClusterRole::None; + return !serverGlobalParams.clusterRole.has(ClusterRole::None); } bool isClusterNodeOrRouter() { diff --git a/src/mongo/s/query_analysis_sampler.cpp b/src/mongo/s/query_analysis_sampler.cpp index acab8a53ba1..4e2b4dfa048 100644 --- a/src/mongo/s/query_analysis_sampler.cpp +++ b/src/mongo/s/query_analysis_sampler.cpp @@ -73,7 +73,7 @@ StatusWith<std::vector<CollectionQueryAnalyzerConfiguration>> executeRefreshComm cmd.setNumQueriesExecutedPerSecond(lastAvgCount); BSONObj resObj; - if (isMongos() || serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (isMongos() || serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { const auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); auto swResponse = configShard->runCommandWithFixedRetryAttempts( opCtx, @@ -85,7 +85,7 @@ StatusWith<std::vector<CollectionQueryAnalyzerConfiguration>> executeRefreshComm return status; } resObj = swResponse.getValue().response; - } else if (serverGlobalParams.clusterRole == ClusterRole::None) { + } else if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { resObj = executeCommandOnPrimary( opCtx, DatabaseName::kAdmin, cmd.toBSON({}), [&](const BSONObj& resObj) {}); if (auto status = getStatusFromCommandResult(resObj); !status.isOK()) { @@ -170,11 +170,11 @@ double QueryAnalysisSampler::QueryStats::_calculateExponentialMovingAverage( void QueryAnalysisSampler::QueryStats::refreshTotalCount() { long long newTotalCount = [&] { - if (isMongos() || serverGlobalParams.clusterRole == ClusterRole::None) { + if (isMongos() || serverGlobalParams.clusterRole.has(ClusterRole::None)) { return globalOpCounters.getUpdate()->load() + globalOpCounters.getDelete()->load() + _lastFindAndModifyQueriesCount + globalOpCounters.getQuery()->load() + _lastAggregateQueriesCount + _lastCountQueriesCount + _lastDistinctQueriesCount; - } else if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + } else if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { return globalOpCounters.getNestedAggregate()->load(); } MONGO_UNREACHABLE; diff --git a/src/mongo/s/query_analysis_sampler_util.h b/src/mongo/s/query_analysis_sampler_util.h index 86286871145..2ae61dfb583 100644 --- a/src/mongo/s/query_analysis_sampler_util.h +++ b/src/mongo/s/query_analysis_sampler_util.h @@ -121,7 +121,7 @@ boost::optional<UUID> getOrGenerateSampleId(OperationContext* opCtx, if (!supportsPersistingSampledQueries(opCtx)) { return boost::none; } - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { const auto isInternalClient = !opCtx->getClient()->session() || (opCtx->getClient()->session()->getTags() & transport::Session::kInternalClient); uassert(ErrorCodes::InvalidOptions, @@ -129,7 +129,7 @@ boost::optional<UUID> getOrGenerateSampleId(OperationContext* opCtx, !request.getSampleId() || (isInternalClient || getTestCommandsEnabled())); return request.getSampleId(); } - if (serverGlobalParams.clusterRole == ClusterRole::None) { + if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { uassert(ErrorCodes::InvalidOptions, "Cannot specify 'sampleRate' since it is an internal field", !request.getSampleId()); diff --git a/src/mongo/s/sharding_initialization.cpp b/src/mongo/s/sharding_initialization.cpp index a4a22df1b29..67f82bc9bc0 100644 --- a/src/mongo/s/sharding_initialization.cpp +++ b/src/mongo/s/sharding_initialization.cpp @@ -220,7 +220,7 @@ Status initializeGlobalShardingState( } void loadCWWCFromConfigServerForReplication(OperationContext* opCtx) { - if (!serverGlobalParams.clusterRole.isExclusivelyShardRole()) { + if (!serverGlobalParams.clusterRole.exclusivelyHasShardRole()) { // Cluster wide read/write concern in a sharded cluster lives on the config server, so a // config server node's local cache will be correct and explicitly checking for a default // write concern via remote command is unnecessary. @@ -245,7 +245,7 @@ Status loadGlobalSettingsFromConfigServer(OperationContext* opCtx, // inserting a cluster id and adding a shard, there is at least one majority write on // the added shard (dropping the sessions collection), so we should be guaranteed the // cluster id cannot roll back. - auto readConcern = serverGlobalParams.clusterRole == ClusterRole::ConfigServer + auto readConcern = serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) ? repl::ReadConcernLevel::kLocalReadConcern : repl::ReadConcernLevel::kMajorityReadConcern; uassertStatusOK(ClusterIdentityLoader::get(opCtx)->loadClusterId( @@ -278,7 +278,7 @@ void preCacheMongosRoutingInfo(OperationContext* opCtx) { // mongos, and we'd need to consider the implications of it running on either kind of mongod. tassert(71960, "Unexpectedly pre caching mongos routing info on shard or config server node", - serverGlobalParams.clusterRole == ClusterRole::None); + serverGlobalParams.clusterRole.has(ClusterRole::None)); auto grid = Grid::get(opCtx); auto catalogClient = grid->catalogClient(); @@ -308,7 +308,7 @@ Status preWarmConnectionPool(OperationContext* opCtx) { // mongos, and we'd need to consider the implications of it running on either kind of mongod. tassert(71961, "Unexpectedly pre warming connection pool on shard or config server node", - serverGlobalParams.clusterRole == ClusterRole::None); + serverGlobalParams.clusterRole.has(ClusterRole::None)); std::vector<HostAndPort> allHosts; auto const grid = Grid::get(opCtx); |