diff options
Diffstat (limited to 'src/mongo/db')
-rw-r--r-- | src/mongo/db/repl/initial_syncer.cpp | 5 | ||||
-rw-r--r-- | src/mongo/db/repl/replication_coordinator_external_state_impl.cpp | 18 |
2 files changed, 11 insertions, 12 deletions
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp index f3a84eb2ad3..66f01046277 100644 --- a/src/mongo/db/repl/initial_syncer.cpp +++ b/src/mongo/db/repl/initial_syncer.cpp @@ -1098,14 +1098,13 @@ void InitialSyncer::_rollbackCheckerCheckForRollbackCallback( // Set UUIDs for all non-replicated collections on secondaries. See comment in // ReplicationCoordinatorExternalStateImpl::initializeReplSetStorage() for the explanation of // why we do this and why it is not necessary for sharded clusters. - if (serverGlobalParams.clusterRole != ClusterRole::ShardServer && - serverGlobalParams.clusterRole != ClusterRole::ConfigServer) { + if (serverGlobalParams.clusterRole != ClusterRole::ShardServer) { const NamespaceString nss("admin", "system.version"); auto opCtx = makeOpCtx(); auto statusWithUUID = _storage->getCollectionUUID(opCtx.get(), nss); if (!statusWithUUID.isOK()) { // If the admin database does not exist, we intentionally fail initial sync. As part of - // SERVER-29448, we will disallow dropping the admin database, so failing here is fine. + // SERVER-29448, we disallow dropping the admin database, so failing here is fine. onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, statusWithUUID.getStatus()); return; diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp index f9c5ce13b78..8c31e36eb8e 100644 --- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp @@ -402,15 +402,15 @@ Status ReplicationCoordinatorExternalStateImpl::initializeReplSetStorage(Operati }); // Set UUIDs for all non-replicated collections. This is necessary for independent replica - // sets started with no data files because collections in local are created prior to the - // featureCompatibilityVersion being set to 3.6, so the collections are not created with - // UUIDs. This is not an issue for sharded clusters because the config server sends a - // setFeatureCompatibilityVersion command with the featureCompatibilityVersion equal to the - // cluster's featureCompatibilityVersion during addShard, which will add UUIDs to all - // collections that do not already have them. Here, we add UUIDs to the non-replicated - // collections on the primary. We add them on the secondaries during InitialSync. - if (serverGlobalParams.clusterRole != ClusterRole::ShardServer && - serverGlobalParams.clusterRole != ClusterRole::ConfigServer) { + // sets and config server replica sets started with no data files because collections in + // local are created prior to the featureCompatibilityVersion being set to 3.6, so the + // collections are not created with UUIDs. This is not an issue for shard servers because + // the config server sends a setFeatureCompatibilityVersion command with the + // featureCompatibilityVersion equal to the cluster's featureCompatibilityVersion during + // addShard, which will add UUIDs to all collections that do not already have them. Here, + // we add UUIDs to the non-replicated collections on the primary. We add them on the + // secondaries during InitialSync. + if (serverGlobalParams.clusterRole != ClusterRole::ShardServer) { auto schemaStatus = updateUUIDSchemaVersionNonReplicated(opCtx, true); if (!schemaStatus.isOK()) { return schemaStatus; |