From 7a4ea53a34703a51186c9d1ffffa31cf566ef11c Mon Sep 17 00:00:00 2001 From: Tommaso Tocci Date: Wed, 5 Feb 2020 18:56:33 +0100 Subject: Revert "SERVER-44771 Introduce 2nd CatalogCache for filtering only" This reverts commit 84bdc9478401355656ab7eaec745dbae2e4294cb. --- ...replication_coordinator_external_state_impl.cpp | 5 +- .../db/s/flush_database_cache_updates_command.cpp | 2 +- .../flush_routing_table_cache_updates_command.cpp | 2 +- src/mongo/db/s/migration_source_manager.cpp | 2 +- src/mongo/db/s/migration_util_test.cpp | 23 ++++++- .../db/s/shard_filtering_metadata_refresh.cpp | 77 +--------------------- src/mongo/db/s/shard_filtering_metadata_refresh.h | 27 -------- src/mongo/db/s/shard_server_op_observer.cpp | 3 +- src/mongo/db/s/sharding_initialization_mongod.cpp | 53 ++++----------- src/mongo/db/s/split_chunk_test.cpp | 2 +- 10 files changed, 42 insertions(+), 154 deletions(-) diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp index baac7aa6921..38ffb397076 100644 --- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp @@ -81,7 +81,6 @@ #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/migration_util.h" #include "mongo/db/s/periodic_balancer_config_refresher.h" -#include "mongo/db/s/shard_filtering_metadata_refresh.h" #include "mongo/db/s/sharding_initialization_mongod.h" #include "mongo/db/s/sharding_state_recovery.h" #include "mongo/db/s/transaction_coordinator_service.h" @@ -712,7 +711,7 @@ void ReplicationCoordinatorExternalStateImpl::shardingOnStepDownHook() { TransactionCoordinatorService::get(_service)->onStepDown(); } else if (ShardingState::get(_service)->enabled()) { ChunkSplitter::get(_service).onStepDown(); - getCatalogCacheLoaderForFiltering(_service).onStepDown(); + CatalogCacheLoader::get(_service).onStepDown(); PeriodicBalancerConfigRefresher::get(_service).onStepDown(); TransactionCoordinatorService::get(_service)->onStepDown(); } @@ -814,7 +813,7 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook ShardingInitializationMongoD::get(opCtx)->updateShardIdentityConfigString(opCtx, configsvrConnStr); - getCatalogCacheLoaderForFiltering(_service).onStepUp(); + CatalogCacheLoader::get(_service).onStepUp(); ChunkSplitter::get(_service).onStepUp(); PeriodicBalancerConfigRefresher::get(_service).onStepUp(_service); TransactionCoordinatorService::get(_service)->onStepUp(opCtx); diff --git a/src/mongo/db/s/flush_database_cache_updates_command.cpp b/src/mongo/db/s/flush_database_cache_updates_command.cpp index 845fc0b2b34..5e4fadfaae0 100644 --- a/src/mongo/db/s/flush_database_cache_updates_command.cpp +++ b/src/mongo/db/s/flush_database_cache_updates_command.cpp @@ -137,7 +137,7 @@ public: forceDatabaseRefresh(opCtx, _dbName()); } - getCatalogCacheLoaderForFiltering(opCtx).waitForDatabaseFlush(opCtx, _dbName()); + CatalogCacheLoader::get(opCtx).waitForDatabaseFlush(opCtx, _dbName()); repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx); } diff --git a/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp b/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp index 9b330d042d0..f2791bf1fbd 100644 --- a/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp +++ b/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp @@ -136,7 +136,7 @@ public: forceShardFilteringMetadataRefresh(opCtx, ns()); } - getCatalogCacheLoaderForFiltering(opCtx).waitForCollectionFlush(opCtx, ns()); + CatalogCacheLoader::get(opCtx).waitForCollectionFlush(opCtx, ns()); repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx); } diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp index b65700b8127..0ea9147e6dd 100644 --- a/src/mongo/db/s/migration_source_manager.cpp +++ b/src/mongo/db/s/migration_source_manager.cpp @@ -721,7 +721,7 @@ void MigrationSourceManager::_cleanup() { // possible that the persisted metadata is rolled back after step down, but the write which // cleared the 'inMigration' flag is not, a secondary node will report itself at an older // shard version. - getCatalogCacheLoaderForFiltering(_opCtx).waitForCollectionFlush(_opCtx, getNss()); + CatalogCacheLoader::get(_opCtx).waitForCollectionFlush(_opCtx, getNss()); // Clear the 'minOpTime recovery' document so that the next time a node from this shard // becomes a primary, it won't have to recover the config server optime. diff --git a/src/mongo/db/s/migration_util_test.cpp b/src/mongo/db/s/migration_util_test.cpp index b9906b08958..2aafa4e6c5f 100644 --- a/src/mongo/db/s/migration_util_test.cpp +++ b/src/mongo/db/s/migration_util_test.cpp @@ -57,7 +57,7 @@ protected: WaitForMajorityService::get(getServiceContext()).setUp(getServiceContext()); - getCatalogCacheLoaderForFiltering(operationContext()).initializeReplicaSetRole(true); + CatalogCacheLoader::get(operationContext()).initializeReplicaSetRole(true); setupNShards(2); } @@ -231,6 +231,27 @@ protected: chunk4.toConfigBSON()}; }()); } + + void respondToMetadataRefreshRequestsWithError() { + // Return an empty database (need to return it twice because for missing databases, the + // CatalogClient tries twice) + expectFindSendBSONObjVector(kConfigHostAndPort, {}); + expectFindSendBSONObjVector(kConfigHostAndPort, {}); + + // getCollectionRoutingInfoWithRefresh calls _getCollectionRoutingInfo twice + expectFindSendBSONObjVector(kConfigHostAndPort, {}); + expectFindSendBSONObjVector(kConfigHostAndPort, {}); + } + + boost::optional getRoutingInfo() { + auto future = scheduleRoutingInfoRefresh(kNss); + + respondToMetadataRefreshRequests(); + + auto routingInfo = future.default_timed_get(); + + return routingInfo; + } }; UUID getCollectionUuid(OperationContext* opCtx, const NamespaceString& nss) { diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp index 1c6ccc9f744..5926ddcb456 100644 --- a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp +++ b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp @@ -36,15 +36,12 @@ #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/commands/feature_compatibility_version.h" -#include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/operation_context.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/database_sharding_state.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/sharding_statistics.h" -#include "mongo/db/server_options.h" -#include "mongo/db/storage/storage_options.h" #include "mongo/s/catalog_cache.h" #include "mongo/s/grid.h" #include "mongo/util/fail_point.h" @@ -127,63 +124,8 @@ void onDbVersionMismatch(OperationContext* opCtx, forceDatabaseRefresh(opCtx, dbName); } -const auto catalogCacheForFilteringDecoration = - ServiceContext::declareDecoration>(); - -const auto catalogCacheLoaderForFilteringDecoration = - ServiceContext::declareDecoration>(); - -CatalogCache& getCatalogCacheForFiltering(ServiceContext* serviceContext) { - if (hasAdditionalCatalogCacheForFiltering()) { - auto& catalogCacheForFiltering = catalogCacheForFilteringDecoration(serviceContext); - invariant(catalogCacheForFiltering); - return *catalogCacheForFiltering; - } - return *Grid::get(serviceContext)->catalogCache(); -} - -CatalogCache& getCatalogCacheForFiltering(OperationContext* opCtx) { - return getCatalogCacheForFiltering(opCtx->getServiceContext()); -} - } // namespace - -bool hasAdditionalCatalogCacheForFiltering() { - invariant(serverGlobalParams.clusterRole == ClusterRole::ShardServer); - return getTestCommandsEnabled() && !storageGlobalParams.readOnly; -} - -void setCatalogCacheForFiltering(ServiceContext* serviceContext, - std::unique_ptr catalogCache) { - invariant(hasAdditionalCatalogCacheForFiltering()); - auto& catalogCacheForFiltering = catalogCacheForFilteringDecoration(serviceContext); - invariant(!catalogCacheForFiltering); - catalogCacheForFiltering = std::move(catalogCache); -} - -void setCatalogCacheLoaderForFiltering(ServiceContext* serviceContext, - std::unique_ptr loader) { - invariant(hasAdditionalCatalogCacheForFiltering()); - auto& catalogCacheLoader = catalogCacheLoaderForFilteringDecoration(serviceContext); - invariant(!catalogCacheLoader); - catalogCacheLoader = std::move(loader); -} - -CatalogCacheLoader& getCatalogCacheLoaderForFiltering(ServiceContext* serviceContext) { - if (hasAdditionalCatalogCacheForFiltering()) { - auto& catalogCacheLoader = catalogCacheLoaderForFilteringDecoration(serviceContext); - invariant(catalogCacheLoader); - return *catalogCacheLoader; - } - return CatalogCacheLoader::get(serviceContext); -} - -CatalogCacheLoader& getCatalogCacheLoaderForFiltering(OperationContext* opCtx) { - return getCatalogCacheLoaderForFiltering(opCtx->getServiceContext()); -} - - Status onShardVersionMismatchNoExcept(OperationContext* opCtx, const NamespaceString& nss, ChunkVersion shardVersionReceived, @@ -206,15 +148,8 @@ ChunkVersion forceShardFilteringMetadataRefresh(OperationContext* opCtx, auto* const shardingState = ShardingState::get(opCtx); invariant(shardingState->canAcceptShardedCommands()); - if (hasAdditionalCatalogCacheForFiltering()) { - Grid::get(opCtx) - ->catalogCache() - ->getCollectionRoutingInfoWithRefresh(opCtx, nss, forceRefreshFromThisThread) - .getStatus() - .ignore(); - } auto routingInfo = - uassertStatusOK(getCatalogCacheForFiltering(opCtx).getCollectionRoutingInfoWithRefresh( + uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfoWithRefresh( opCtx, nss, forceRefreshFromThisThread)); auto cm = routingInfo.cm(); @@ -305,16 +240,8 @@ void forceDatabaseRefresh(OperationContext* opCtx, const StringData dbName) { DatabaseVersion refreshedDbVersion; try { - if (hasAdditionalCatalogCacheForFiltering()) { - Grid::get(opCtx) - ->catalogCache() - ->getDatabaseWithRefresh(opCtx, dbName) - .getStatus() - .ignore(); - } refreshedDbVersion = - uassertStatusOK( - getCatalogCacheForFiltering(opCtx).getDatabaseWithRefresh(opCtx, dbName)) + uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabaseWithRefresh(opCtx, dbName)) .databaseVersion(); } catch (const ExceptionFor&) { // db has been dropped, set the db version to boost::none diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.h b/src/mongo/db/s/shard_filtering_metadata_refresh.h index 7ca5cf1b992..f378c60622f 100644 --- a/src/mongo/db/s/shard_filtering_metadata_refresh.h +++ b/src/mongo/db/s/shard_filtering_metadata_refresh.h @@ -35,35 +35,8 @@ namespace mongo { -class CatalogCache; -class CatalogCacheLoader; -class ServiceContext; class OperationContext; -/** - * Returns True when a separate CatalogCache must be used for filtering - */ -bool hasAdditionalCatalogCacheForFiltering(); - -/** - * CatalogCacheForFiltering is only used on a shard for obtaining the orphan filtering metadata - */ -void setCatalogCacheForFiltering(ServiceContext* serviceContext, - std::unique_ptr catalogCache); - - -/** - * CatalogCacheLoaderForFiltering is only used on a shard for obtaining the orphan filtering - * metadata - */ -void setCatalogCacheLoaderForFiltering(ServiceContext* serviceContext, - std::unique_ptr loader); - -// For routing use `CatalogCacheLoader::get()` -CatalogCacheLoader& getCatalogCacheLoaderForFiltering(ServiceContext* serviceContext); -CatalogCacheLoader& getCatalogCacheLoaderForFiltering(OperationContext* opCtx); - - /** * Must be invoked whenever code, which is executing on a shard encounters a StaleConfig exception * and should be passed the 'version received' from the exception. If the shard's current version is diff --git a/src/mongo/db/s/shard_server_op_observer.cpp b/src/mongo/db/s/shard_server_op_observer.cpp index 597554bc979..b58479cbcde 100644 --- a/src/mongo/db/s/shard_server_op_observer.cpp +++ b/src/mongo/db/s/shard_server_op_observer.cpp @@ -42,7 +42,6 @@ #include "mongo/db/s/migration_source_manager.h" #include "mongo/db/s/migration_util.h" #include "mongo/db/s/range_deletion_task_gen.h" -#include "mongo/db/s/shard_filtering_metadata_refresh.h" #include "mongo/db/s/shard_identity_rollback_notifier.h" #include "mongo/db/s/sharding_initialization_mongod.h" #include "mongo/db/s/type_shard_identity.h" @@ -79,7 +78,7 @@ public: void commit(boost::optional) override { invariant(_opCtx->lockState()->isCollectionLockedForMode(_nss, MODE_IX)); - getCatalogCacheLoaderForFiltering(_opCtx).notifyOfCollectionVersionUpdate(_nss); + CatalogCacheLoader::get(_opCtx).notifyOfCollectionVersionUpdate(_nss); // Force subsequent uses of the namespace to refresh the filtering metadata so they can // synchronize with any work happening on the primary (e.g., migration critical section). diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp index 24100e2d682..7c81c06766c 100644 --- a/src/mongo/db/s/sharding_initialization_mongod.cpp +++ b/src/mongo/db/s/sharding_initialization_mongod.cpp @@ -49,7 +49,6 @@ #include "mongo/db/s/chunk_splitter.h" #include "mongo/db/s/periodic_balancer_config_refresher.h" #include "mongo/db/s/read_only_catalog_cache_loader.h" -#include "mongo/db/s/shard_filtering_metadata_refresh.h" #include "mongo/db/s/shard_server_catalog_cache_loader.h" #include "mongo/db/s/sharding_config_optime_gossip.h" #include "mongo/db/s/transaction_coordinator_service.h" @@ -65,7 +64,6 @@ #include "mongo/s/config_server_catalog_cache_loader.h" #include "mongo/s/grid.h" #include "mongo/s/sharding_initialization.h" -#include "mongo/util/exit.h" #include "mongo/util/log.h" namespace mongo { @@ -167,7 +165,7 @@ void ShardingInitializationMongoD::initializeShardingEnvironmentOnShardServer( bool isStandaloneOrPrimary = !isReplSet || (replCoord->getMemberState() == repl::MemberState::RS_PRIMARY); - getCatalogCacheLoaderForFiltering(opCtx).initializeReplicaSetRole(isStandaloneOrPrimary); + CatalogCacheLoader::get(opCtx).initializeReplicaSetRole(isStandaloneOrPrimary); ChunkSplitter::get(opCtx).onShardingInitialization(isStandaloneOrPrimary); PeriodicBalancerConfigRefresher::get(opCtx).onShardingInitialization(opCtx->getServiceContext(), isStandaloneOrPrimary); @@ -412,46 +410,17 @@ void initializeGlobalShardingStateForMongoD(OperationContext* opCtx, auto const service = opCtx->getServiceContext(); - - if (serverGlobalParams.clusterRole == ClusterRole::ShardServer && - hasAdditionalCatalogCacheForFiltering()) { - // Setup additional CatalogCache for filtering only - setCatalogCacheLoaderForFiltering(service, - std::make_unique( - std::make_unique())); - setCatalogCacheForFiltering( - service, std::make_unique(getCatalogCacheLoaderForFiltering(opCtx))); - registerShutdownTask( - [service]() { getCatalogCacheLoaderForFiltering(service).shutDown(); }); - } - - // Make primary CatalogCacheLoader according to the cluster Role - auto makeCatalogCacheLoader = []() -> std::unique_ptr { - switch (serverGlobalParams.clusterRole) { - case ClusterRole::ShardServer: - if (storageGlobalParams.readOnly) { - return std::make_unique(); - } - if (hasAdditionalCatalogCacheForFiltering()) { - // The primary CatalogCache will be used only for routing - return std::make_unique(); - } - - // Normal ShardServer without additional cache for filtering - return std::make_unique( - std::make_unique()); - break; - - case ClusterRole::ConfigServer: - return std::make_unique(); - break; - - default: - MONGO_UNREACHABLE; + if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { + if (storageGlobalParams.readOnly) { + CatalogCacheLoader::set(service, std::make_unique()); + } else { + CatalogCacheLoader::set(service, + std::make_unique( + std::make_unique())); } - }; - - CatalogCacheLoader::set(service, makeCatalogCacheLoader()); + } else { + CatalogCacheLoader::set(service, std::make_unique()); + } auto validator = LogicalTimeValidator::get(service); if (validator) { // The keyManager may be existing if the node was a part of a standalone RS. diff --git a/src/mongo/db/s/split_chunk_test.cpp b/src/mongo/db/s/split_chunk_test.cpp index fb2adc3c6ea..5adb6c0b359 100644 --- a/src/mongo/db/s/split_chunk_test.cpp +++ b/src/mongo/db/s/split_chunk_test.cpp @@ -68,7 +68,7 @@ public: ShardServerTestFixture::setUp(); ShardingState::get(operationContext())->setInitialized(_shardId, OID::gen()); - getCatalogCacheLoaderForFiltering(getServiceContext()).initializeReplicaSetRole(true); + CatalogCacheLoader::get(getServiceContext()).initializeReplicaSetRole(true); // Instantiate names. _epoch = OID::gen(); -- cgit v1.2.1