diff options
88 files changed, 739 insertions, 729 deletions
diff --git a/src/mongo/SConscript b/src/mongo/SConscript index c7ae4b645c4..6d6b8dfbdb0 100644 --- a/src/mongo/SConscript +++ b/src/mongo/SConscript @@ -65,7 +65,6 @@ env.Library( 'base/initializer_dependency_graph.cpp', 'base/make_string_vector.cpp', 'base/parse_number.cpp', - 'base/shim.cpp', 'base/simple_string_data_comparator.cpp', 'base/status.cpp', 'base/string_data.cpp', diff --git a/src/mongo/base/init.h b/src/mongo/base/init.h index 50a8eee3529..1d6595592aa 100644 --- a/src/mongo/base/init.h +++ b/src/mongo/base/init.h @@ -117,15 +117,15 @@ * of the function to declare would be options. */ #define MONGO_INITIALIZER_GENERAL(NAME, PREREQUISITES, DEPENDENTS) \ - ::mongo::Status MONGO_INITIALIZER_FUNCTION_NAME_(NAME)(::mongo::InitializerContext*); \ + ::mongo::Status _MONGO_INITIALIZER_FUNCTION_NAME(NAME)(::mongo::InitializerContext*); \ namespace { \ ::mongo::GlobalInitializerRegisterer _mongoInitializerRegisterer_##NAME( \ std::string(#NAME), \ MONGO_MAKE_STRING_VECTOR PREREQUISITES, \ MONGO_MAKE_STRING_VECTOR DEPENDENTS, \ - mongo::InitializerFunction(MONGO_INITIALIZER_FUNCTION_NAME_(NAME))); \ + mongo::InitializerFunction(_MONGO_INITIALIZER_FUNCTION_NAME(NAME))); \ } \ - ::mongo::Status MONGO_INITIALIZER_FUNCTION_NAME_(NAME) + ::mongo::Status _MONGO_INITIALIZER_FUNCTION_NAME(NAME) /** * Macro to define an initializer group. @@ -143,4 +143,4 @@ * Macro to produce a name for a mongo initializer function for an initializer operation * named "NAME". */ -#define MONGO_INITIALIZER_FUNCTION_NAME_(NAME) _mongoInitializerFunction_##NAME +#define _MONGO_INITIALIZER_FUNCTION_NAME(NAME) _mongoInitializerFunction_##NAME diff --git a/src/mongo/base/shim.cpp b/src/mongo/base/shim.cpp deleted file mode 100644 index b5834086563..00000000000 --- a/src/mongo/base/shim.cpp +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright (C) 2018 MongoDB Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License, version 3, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the GNU Affero General Public License in all respects - * for all of the code used other than as permitted herein. If you modify - * file(s) with this exception, you may extend this exception to your - * version of the file(s), but you are not obligated to do so. If you do not - * wish to do so, delete this exception statement from your version. If you - * delete this exception statement from all source files in the program, - * then also delete it in the license file. - */ - -#include "mongo/base/shim.h" - - -namespace mongo { -namespace { -MONGO_INITIALIZER_GROUP(ShimHooks, MONGO_NO_PREREQUISITES, ("default")); -} // namespace -} // namespace mongo diff --git a/src/mongo/base/shim.h b/src/mongo/base/shim.h deleted file mode 100644 index 28437322b8b..00000000000 --- a/src/mongo/base/shim.h +++ /dev/null @@ -1,256 +0,0 @@ -/** - * Copyright (C) 2018 MongoDB Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License, version 3, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the GNU Affero General Public License in all respects - * for all of the code used other than as permitted herein. If you modify - * file(s) with this exception, you may extend this exception to your - * version of the file(s), but you are not obligated to do so. If you do not - * wish to do so, delete this exception statement from your version. If you - * delete this exception statement from all source files in the program, - * then also delete it in the license file. - */ - -#pragma once - -#include <functional> - -#include "mongo/base/init.h" - -/** - * The `SHIM` mechanism allows for the creation of "weak-symbol-like" functions which can have their - * actual implementation injected in the final binary without creating a link dependency upon any - * actual implementation. One uses it like this: - * - * In a header: - * ``` - * class MyClass { - * public: - * static MONGO_DECLARE_SHIM((int)->std::string) helloWorldFunction; - * }; - * ``` - * - * In the corresponding C++ file (which is a link dependency): - * ``` - * MONGO_DEFINE_SHIM(MyClass::helloWorldFunction); - * ``` - * - * And in any number of implementation files: - * ``` - * MONGO_REGISTER_SHIM(MyClass::helloWorldFunction)(int value)->std::string { - * if (value == 42) { - * return "Hello World"; - * } else { - * return "No way!"; - * } - * } - * ``` - * - * This can be useful for making auto-registering and auto-constructing mock and release class - * factories, among other useful things - */ - -namespace mongo { -template <typename T> -struct PrivateCall; - -/** - * When declaring shim functions that should be private, they really need to be public; however, - * this type can be used as a parameter to permit the function to only be called by the type - * specified in the template parameter. - */ -template <typename T> -struct PrivateTo { -private: - friend PrivateCall<T>; - - PrivateTo() = default; -}; - -/** - * When calling shim functions that should be private, you pass an immediately created instance of - * the type `PrivateCall< T >`, where `T` is the type that `PrivateTo` requires as a template - * parameter. - */ -template <typename T> -struct PrivateCall { -private: - friend T; - PrivateCall() {} - -public: - operator PrivateTo<T>() { - return {}; - } -}; -} // namespace mongo - -namespace shim_detail { -/** - * This type, `storage`, is used as a workaround for needing C++17 `inline` variables. The template - * static member is effectively `inline` already. - */ -template <typename T, typename tag = void> -struct storage { - static T data; -}; - -template <typename T, typename tag> -T storage<T, tag>::data = {}; -} // namespace shim_detail - -#define MONGO_SHIM_DEPENDENTS ("ShimHooks") - -namespace mongo { -#ifdef MONGO_CONFIG_CHECK_SHIM_DEPENDENCIES -const bool checkShimsViaTUHook = true; -#define MONGO_SHIM_TU_HOOK(name) \ - name {} -#else -const bool checkShimsViaTUHook = false; -#define MONGO_SHIM_TU_HOOK(name) -#endif -} // namespace mongo - -/** - * Declare a shimmable function with signature `SHIM_SIGNATURE`. Declare such constructs in a C++ - * header as static members of a class. - */ -#define MONGO_DECLARE_SHIM(/*SHIM_SIGNATURE*/...) MONGO_DECLARE_SHIM_1(__LINE__, __VA_ARGS__) -#define MONGO_DECLARE_SHIM_1(LN, ...) MONGO_DECLARE_SHIM_2(LN, __VA_ARGS__) -#define MONGO_DECLARE_SHIM_2(LN, ...) \ - const struct ShimBasis_##LN { \ - ShimBasis_##LN() = default; \ - struct MongoShimImplGuts { \ - template <bool required = mongo::checkShimsViaTUHook> \ - struct AbiCheckType { \ - AbiCheckType() = default; \ - }; \ - using AbiCheck = AbiCheckType<>; \ - struct LibTUHookTypeBase { \ - LibTUHookTypeBase(); \ - }; \ - template <bool required = mongo::checkShimsViaTUHook> \ - struct LibTUHookType : LibTUHookTypeBase {}; \ - using LibTUHook = LibTUHookType<>; \ - struct ImplTUHookTypeBase { \ - ImplTUHookTypeBase(); \ - }; \ - template <bool required = mongo::checkShimsViaTUHook> \ - struct ImplTUHookType : ImplTUHookTypeBase {}; \ - using ImplTUHook = ImplTUHookType<>; \ - \ - static auto functionTypeHelper __VA_ARGS__; \ - /* Workaround for Microsoft -- by taking the address of this function pointer, we \ - * avoid the problems that their compiler has with default * arguments in deduced \ - * typedefs. */ \ - using function_type_pointer = decltype(&MongoShimImplGuts::functionTypeHelper); \ - using function_type = std::remove_pointer_t<function_type_pointer>; \ - MongoShimImplGuts* abi(const AbiCheck* const) { \ - return this; \ - } \ - MongoShimImplGuts* lib(const LibTUHook* const) { \ - MONGO_SHIM_TU_HOOK(LibTUHook); \ - return this; \ - } \ - MongoShimImplGuts* impl(const ImplTUHook* const) { \ - MONGO_SHIM_TU_HOOK(ImplTUHook); \ - return this; \ - } \ - virtual auto implementation __VA_ARGS__ = 0; \ - \ - using tag = \ - std::tuple<MongoShimImplGuts::function_type, AbiCheck, LibTUHook, ImplTUHook>; \ - }; \ - \ - using storage = shim_detail::storage<MongoShimImplGuts*, MongoShimImplGuts::tag>; \ - \ - /* TODO: When the dependency graph is fixed, add the `impl()->` call to the call chain */ \ - template <typename... Args> \ - auto operator()(Args&&... args) const \ - noexcept(noexcept(storage::data->abi(nullptr)->lib(nullptr)->implementation( \ - std::forward<Args>(args)...))) \ - -> decltype(storage::data->abi(nullptr)->lib(nullptr)->implementation( \ - std::forward<Args>(args)...)) { \ - return storage::data->abi(nullptr)->lib(nullptr)->implementation( \ - std::forward<Args>(args)...); \ - } \ - } - -/** - * Define a shimmable function with name `SHIM_NAME`, returning a value of type `RETURN_TYPE`, with - * any arguments. This shim definition macro should go in the associated C++ file to the header - * where a SHIM was defined. This macro does not emit a function definition, only the customization - * point's machinery. - */ -#define MONGO_DEFINE_SHIM(/*SHIM_NAME*/...) MONGO_DEFINE_SHIM_1(__LINE__, __VA_ARGS__) -#define MONGO_DEFINE_SHIM_1(LN, ...) MONGO_DEFINE_SHIM_2(LN, __VA_ARGS__) -#define MONGO_DEFINE_SHIM_2(LN, ...) \ - namespace { \ - namespace shim_namespace##LN { \ - using ShimType = decltype(__VA_ARGS__); \ - } /*namespace shim_namespace*/ \ - } /*namespace*/ \ - shim_namespace##LN::ShimType::MongoShimImplGuts::LibTUHookTypeBase::LibTUHookTypeBase() = \ - default; \ - shim_namespace##LN::ShimType __VA_ARGS__{}; - -#define MONGO_SHIM_EVIL_STRINGIFY_(args) #args - - -/** - * Define an implementation of a shimmable function with name `SHIM_NAME`. The compiler will check - * supplied parameters for correctness. This shim registration macro should go in the associated - * C++ implementation file to the header where a SHIM was defined. Such a file would be a mock - * implementation or a real implementation, for example - */ -#define MONGO_REGISTER_SHIM(/*SHIM_NAME*/...) MONGO_REGISTER_SHIM_1(__LINE__, __VA_ARGS__) -#define MONGO_REGISTER_SHIM_1(LN, ...) MONGO_REGISTER_SHIM_2(LN, __VA_ARGS__) -#define MONGO_REGISTER_SHIM_2(LN, ...) \ - namespace { \ - namespace shim_namespace##LN { \ - using ShimType = decltype(__VA_ARGS__); \ - \ - class Implementation final : public ShimType::MongoShimImplGuts { \ - /* Some compilers don't work well with the trailing `override` in this kind of \ - * function declaration. */ \ - ShimType::MongoShimImplGuts::function_type implementation; /* override */ \ - }; \ - \ - ::mongo::Status createInitializerRegistration(::mongo::InitializerContext* const) { \ - static Implementation impl; \ - ShimType::storage::data = &impl; \ - return Status::OK(); \ - } \ - \ - const ::mongo::GlobalInitializerRegisterer registrationHook{ \ - std::string(MONGO_SHIM_EVIL_STRINGIFY_((__VA_ARGS__))), \ - {}, \ - {MONGO_SHIM_DEPENDENTS}, \ - mongo::InitializerFunction(createInitializerRegistration)}; \ - } /*namespace shim_namespace*/ \ - } /*namespace*/ \ - \ - shim_namespace##LN::ShimType::MongoShimImplGuts::ImplTUHookTypeBase::ImplTUHookTypeBase() = \ - default; \ - \ - auto shim_namespace##LN::Implementation::implementation /* After this point someone just \ - writes the signature's arguments \ - and return value (using arrow \ - notation). Then they write the \ - body. */ diff --git a/src/mongo/client/embedded/embedded.cpp b/src/mongo/client/embedded/embedded.cpp index bc5b744d16c..6ad81043143 100644 --- a/src/mongo/client/embedded/embedded.cpp +++ b/src/mongo/client/embedded/embedded.cpp @@ -147,7 +147,7 @@ void shutdown(ServiceContext* srvContext) { { UninterruptibleLockGuard noInterrupt(shutdownOpCtx->lockState()); Lock::GlobalLock lk(shutdownOpCtx.get(), MODE_X, Date_t::max()); - DatabaseHolder::getDatabaseHolder().closeAll(shutdownOpCtx.get(), "shutdown"); + dbHolder().closeAll(shutdownOpCtx.get(), "shutdown"); // Shut down the background periodic task runner if (auto runner = serviceContext->getPeriodicRunner()) { diff --git a/src/mongo/client/embedded/index_create_impl_embedded.cpp b/src/mongo/client/embedded/index_create_impl_embedded.cpp index a58560f3945..de56784d039 100644 --- a/src/mongo/client/embedded/index_create_impl_embedded.cpp +++ b/src/mongo/client/embedded/index_create_impl_embedded.cpp @@ -44,12 +44,14 @@ class MultiIndexBlockImplEmbedded : public MultiIndexBlockImpl { return false; } }; -} // namespace -MONGO_REGISTER_SHIM(MultiIndexBlock::makeImpl) -(OperationContext* const opCtx, Collection* const collection, PrivateTo<MultiIndexBlock>) - ->std::unique_ptr<MultiIndexBlock::Impl> { - return std::make_unique<MultiIndexBlockImplEmbedded>(opCtx, collection); +MONGO_INITIALIZER(InitializeMultiIndexBlockFactory)(InitializerContext* const) { + MultiIndexBlock::registerFactory( + [](OperationContext* const opCtx, Collection* const collection) { + return stdx::make_unique<MultiIndexBlockImplEmbedded>(opCtx, collection); + }); + return Status::OK(); } +} // namespace } // namespace mongo diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript index 5ed9e0ad896..c2f57ca82a4 100644 --- a/src/mongo/db/catalog/SConscript +++ b/src/mongo/db/catalog/SConscript @@ -130,7 +130,6 @@ env.Library( "database.cpp", ], LIBDEPS=[ - '$BUILD_DIR/mongo/base', ], ) diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp index f97cb254d72..684862b6a9f 100644 --- a/src/mongo/db/catalog/catalog_control.cpp +++ b/src/mongo/db/catalog/catalog_control.cpp @@ -56,7 +56,7 @@ void closeCatalog(OperationContext* opCtx) { // Close all databases. log() << "closeCatalog: closing all databases"; constexpr auto reason = "closing databases for closeCatalog"; - DatabaseHolder::getDatabaseHolder().closeAll(opCtx, reason); + dbHolder().closeAll(opCtx, reason); // Close the storage engine's catalog. log() << "closeCatalog: closing storage engine catalog"; @@ -145,7 +145,7 @@ void openCatalog(OperationContext* opCtx) { storageEngine->listDatabases(&databasesToOpen); for (auto&& dbName : databasesToOpen) { LOG(1) << "openCatalog: dbholder reopening database " << dbName; - auto db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName); + auto db = dbHolder().openDb(opCtx, dbName); invariant(db, str::stream() << "failed to reopen database " << dbName); std::list<std::string> collections; diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp index 915980a34d3..13bb493e5a6 100644 --- a/src/mongo/db/catalog/collection.cpp +++ b/src/mongo/db/catalog/collection.cpp @@ -70,14 +70,56 @@ namespace mongo { // Emit the vtable in this TU Collection::Impl::~Impl() = default; -MONGO_DEFINE_SHIM(Collection::makeImpl); +namespace { +stdx::function<Collection::factory_function_type> factory; +} // namespace -MONGO_DEFINE_SHIM(Collection::parseValidationLevel); +void Collection::registerFactory(decltype(factory) newFactory) { + factory = std::move(newFactory); +} -MONGO_DEFINE_SHIM(Collection::parseValidationAction); +auto Collection::makeImpl(Collection* _this, + OperationContext* const opCtx, + const StringData fullNS, + OptionalCollectionUUID uuid, + CollectionCatalogEntry* const details, + RecordStore* const recordStore, + DatabaseCatalogEntry* const dbce) -> std::unique_ptr<Impl> { + return factory(_this, opCtx, fullNS, uuid, details, recordStore, dbce); +} void Collection::TUHook::hook() noexcept {} + +namespace { +stdx::function<decltype(Collection::parseValidationLevel)> parseValidationLevelImpl; +} // namespace + +void Collection::registerParseValidationLevelImpl( + stdx::function<decltype(parseValidationLevel)> impl) { + parseValidationLevelImpl = std::move(impl); +} + +auto Collection::parseValidationLevel(const StringData data) -> StatusWith<ValidationLevel> { + return parseValidationLevelImpl(data); +} + +namespace { +stdx::function<decltype(Collection::parseValidationAction)> parseValidationActionImpl; +} // namespace + +void Collection::registerParseValidationActionImpl( + stdx::function<decltype(parseValidationAction)> impl) { + parseValidationActionImpl = std::move(impl); +} + +auto Collection::parseValidationAction(const StringData data) -> StatusWith<ValidationAction> { + return parseValidationActionImpl(data); +} +} // namespace mongo + + +namespace mongo { std::string CompactOptions::toString() const { std::stringstream ss; ss << "paddingMode: "; diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h index 9c41f8478f3..042c7a3a0c5 100644 --- a/src/mongo/db/catalog/collection.h +++ b/src/mongo/db/catalog/collection.h @@ -330,16 +330,19 @@ public: virtual const CollatorInterface* getDefaultCollator() const = 0; }; +private: + static std::unique_ptr<Impl> makeImpl(Collection* _this, + OperationContext* opCtx, + StringData fullNS, + OptionalCollectionUUID uuid, + CollectionCatalogEntry* details, + RecordStore* recordStore, + DatabaseCatalogEntry* dbce); + public: - static MONGO_DECLARE_SHIM((Collection * _this, - OperationContext* opCtx, - StringData fullNS, - OptionalCollectionUUID uuid, - CollectionCatalogEntry* details, - RecordStore* recordStore, - DatabaseCatalogEntry* dbce, - PrivateTo<Collection>) - ->std::unique_ptr<Impl>) makeImpl; + using factory_function_type = decltype(makeImpl); + + static void registerFactory(stdx::function<factory_function_type> factory); explicit inline Collection(OperationContext* const opCtx, const StringData fullNS, @@ -347,8 +350,7 @@ public: CollectionCatalogEntry* const details, // does not own RecordStore* const recordStore, // does not own DatabaseCatalogEntry* const dbce) // does not own - : _pimpl(makeImpl( - this, opCtx, fullNS, uuid, details, recordStore, dbce, PrivateCall<Collection>{})) { + : _pimpl(makeImpl(this, opCtx, fullNS, uuid, details, recordStore, dbce)) { this->_impl().init(opCtx); } @@ -625,8 +627,13 @@ public: opCtx, validator, allowedFeatures, maxFeatureCompatibilityVersion); } - static MONGO_DECLARE_SHIM((StringData)->StatusWith<ValidationLevel>) parseValidationLevel; - static MONGO_DECLARE_SHIM((StringData)->StatusWith<ValidationAction>) parseValidationAction; + static StatusWith<ValidationLevel> parseValidationLevel(StringData); + static StatusWith<ValidationAction> parseValidationAction(StringData); + + static void registerParseValidationLevelImpl( + stdx::function<decltype(parseValidationLevel)> impl); + static void registerParseValidationActionImpl( + stdx::function<decltype(parseValidationAction)> impl); /** * Sets the validator for this collection. diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp index f08d5be410e..74909e46f0c 100644 --- a/src/mongo/db/catalog/collection_impl.cpp +++ b/src/mongo/db/catalog/collection_impl.cpp @@ -76,30 +76,34 @@ namespace mongo { -MONGO_REGISTER_SHIM(Collection::makeImpl) -(Collection* const _this, - OperationContext* const opCtx, - const StringData fullNS, - OptionalCollectionUUID uuid, - CollectionCatalogEntry* const details, - RecordStore* const recordStore, - DatabaseCatalogEntry* const dbce, - PrivateTo<Collection>) - ->std::unique_ptr<Collection::Impl> { - return std::make_unique<CollectionImpl>(_this, opCtx, fullNS, uuid, details, recordStore, dbce); +namespace { +MONGO_INITIALIZER(InitializeCollectionFactory)(InitializerContext* const) { + Collection::registerFactory( + [](Collection* const _this, + OperationContext* const opCtx, + const StringData fullNS, + OptionalCollectionUUID uuid, + CollectionCatalogEntry* const details, + RecordStore* const recordStore, + DatabaseCatalogEntry* const dbce) -> std::unique_ptr<Collection::Impl> { + return stdx::make_unique<CollectionImpl>( + _this, opCtx, fullNS, uuid, details, recordStore, dbce); + }); + return Status::OK(); } -MONGO_REGISTER_SHIM(Collection::parseValidationLevel) -(const StringData data)->StatusWith<Collection::ValidationLevel> { - return CollectionImpl::parseValidationLevel(data); +MONGO_INITIALIZER(InitializeParseValidationLevelImpl)(InitializerContext* const) { + Collection::registerParseValidationLevelImpl( + [](const StringData data) { return CollectionImpl::parseValidationLevel(data); }); + return Status::OK(); } -MONGO_REGISTER_SHIM(Collection::parseValidationAction) -(const StringData data)->StatusWith<Collection::ValidationAction> { - return CollectionImpl::parseValidationAction(data); +MONGO_INITIALIZER(InitializeParseValidationActionImpl)(InitializerContext* const) { + Collection::registerParseValidationActionImpl( + [](const StringData data) { return CollectionImpl::parseValidationAction(data); }); + return Status::OK(); } -namespace { // Used below to fail during inserts. MONGO_FP_DECLARE(failCollectionInserts); diff --git a/src/mongo/db/catalog/collection_info_cache.cpp b/src/mongo/db/catalog/collection_info_cache.cpp index 0a36bb07448..4bd1e4715f7 100644 --- a/src/mongo/db/catalog/collection_info_cache.cpp +++ b/src/mongo/db/catalog/collection_info_cache.cpp @@ -33,9 +33,20 @@ #include "mongo/db/catalog/collection_info_cache.h" namespace mongo { +namespace { +stdx::function<CollectionInfoCache::factory_function_type> factory; +} // namespace + CollectionInfoCache::Impl::~Impl() = default; -MONGO_DEFINE_SHIM(CollectionInfoCache::makeImpl); +void CollectionInfoCache::registerFactory(decltype(factory) newFactory) { + factory = std::move(newFactory); +} + +auto CollectionInfoCache::makeImpl(Collection* const collection, const NamespaceString& ns) + -> std::unique_ptr<Impl> { + return factory(collection, ns); +} void CollectionInfoCache::TUHook::hook() noexcept {} } // namespace mongo diff --git a/src/mongo/db/catalog/collection_info_cache.h b/src/mongo/db/catalog/collection_info_cache.h index 6fb0fa5278e..9bc07e80c61 100644 --- a/src/mongo/db/catalog/collection_info_cache.h +++ b/src/mongo/db/catalog/collection_info_cache.h @@ -28,7 +28,6 @@ #pragma once -#include "mongo/base/shim.h" #include "mongo/db/collection_index_usage_tracker.h" #include "mongo/db/query/plan_cache.h" #include "mongo/db/query/query_settings.h" @@ -70,15 +69,16 @@ public: const std::set<std::string>& indexesUsed) = 0; }; +private: + static std::unique_ptr<Impl> makeImpl(Collection* collection, const NamespaceString& ns); public: - static MONGO_DECLARE_SHIM((Collection * collection, - const NamespaceString& ns, - PrivateTo<CollectionInfoCache>) - ->std::unique_ptr<Impl>) makeImpl; + using factory_function_type = decltype(makeImpl); + + static void registerFactory(stdx::function<factory_function_type> factory); explicit inline CollectionInfoCache(Collection* const collection, const NamespaceString& ns) - : _pimpl(makeImpl(collection, ns, PrivateCall<CollectionInfoCache>{})) {} + : _pimpl(makeImpl(collection, ns)) {} inline ~CollectionInfoCache() = default; diff --git a/src/mongo/db/catalog/collection_info_cache_impl.cpp b/src/mongo/db/catalog/collection_info_cache_impl.cpp index d23da8c5a81..b3366f70ae2 100644 --- a/src/mongo/db/catalog/collection_info_cache_impl.cpp +++ b/src/mongo/db/catalog/collection_info_cache_impl.cpp @@ -49,11 +49,15 @@ #include "mongo/util/log.h" namespace mongo { -MONGO_REGISTER_SHIM(CollectionInfoCache::makeImpl) -(Collection* const collection, const NamespaceString& ns, PrivateTo<CollectionInfoCache>) - ->std::unique_ptr<CollectionInfoCache::Impl> { - return std::make_unique<CollectionInfoCacheImpl>(collection, ns); +namespace { +MONGO_INITIALIZER(InitializeCollectionInfoCacheFactory)(InitializerContext* const) { + CollectionInfoCache::registerFactory( + [](Collection* const collection, const NamespaceString& ns) { + return stdx::make_unique<CollectionInfoCacheImpl>(collection, ns); + }); + return Status::OK(); } +} // namespace CollectionInfoCacheImpl::CollectionInfoCacheImpl(Collection* collection, const NamespaceString& ns) : _collection(collection), diff --git a/src/mongo/db/catalog/collection_info_cache_impl.h b/src/mongo/db/catalog/collection_info_cache_impl.h index 38e03f68a7b..58be19ed045 100644 --- a/src/mongo/db/catalog/collection_info_cache_impl.h +++ b/src/mongo/db/catalog/collection_info_cache_impl.h @@ -30,7 +30,6 @@ #include "mongo/db/catalog/collection_info_cache.h" -#include "mongo/base/shim.h" #include "mongo/db/collection_index_usage_tracker.h" #include "mongo/db/query/plan_cache.h" #include "mongo/db/query/query_settings.h" diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp index f8258d36f07..950420fa787 100644 --- a/src/mongo/db/catalog/create_collection.cpp +++ b/src/mongo/db/catalog/create_collection.cpp @@ -101,8 +101,8 @@ Status createCollection(OperationContext* opCtx, // Create collection. const bool createDefaultIndexes = true; - status = Database::userCreateNS( - opCtx, ctx.db(), nss.ns(), options, kind, createDefaultIndexes, idIndex); + status = + userCreateNS(opCtx, ctx.db(), nss.ns(), options, kind, createDefaultIndexes, idIndex); if (!status.isOK()) { return status; @@ -137,7 +137,7 @@ Status createCollectionForApplyOps(OperationContext* opCtx, auto newCmd = cmdObj; auto* const serviceContext = opCtx->getServiceContext(); - auto* const db = DatabaseHolder::getDatabaseHolder().get(opCtx, dbName); + auto* const db = dbHolder().get(opCtx, dbName); // If a UUID is given, see if we need to rename a collection out of the way, and whether the // collection already exists under a different name. If so, rename it into place. As this is diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp index a9963a2dc7d..527c9e847ab 100644 --- a/src/mongo/db/catalog/database.cpp +++ b/src/mongo/db/catalog/database.cpp @@ -36,13 +36,66 @@ namespace mongo { Database::Impl::~Impl() = default; -MONGO_DEFINE_SHIM(Database::makeImpl); +namespace { +stdx::function<Database::factory_function_type> factory; +} // namespace + +void Database::registerFactory(decltype(factory) newFactory) { + factory = std::move(newFactory); +} + +auto Database::makeImpl(Database* const this_, + OperationContext* const opCtx, + const StringData name, + DatabaseCatalogEntry* const dbEntry) -> std::unique_ptr<Impl> { + return factory(this_, opCtx, name, dbEntry); +} void Database::TUHook::hook() noexcept {} -MONGO_DEFINE_SHIM(Database::dropDatabase); +namespace { +stdx::function<decltype(Database::dropDatabase)> dropDatabaseImpl; +} + +void Database::dropDatabase(OperationContext* const opCtx, Database* const db) { + return dropDatabaseImpl(opCtx, db); +} -MONGO_DEFINE_SHIM(Database::userCreateNS); +void Database::registerDropDatabaseImpl(stdx::function<decltype(dropDatabase)> impl) { + dropDatabaseImpl = std::move(impl); +} -MONGO_DEFINE_SHIM(Database::dropAllDatabasesExceptLocal); +namespace { +stdx::function<decltype(userCreateNS)> userCreateNSImpl; +stdx::function<decltype(dropAllDatabasesExceptLocal)> dropAllDatabasesExceptLocalImpl; +} // namespace } // namespace mongo + +auto mongo::userCreateNS(OperationContext* const opCtx, + Database* const db, + const StringData ns, + const BSONObj options, + const CollectionOptions::ParseKind parseKind, + const bool createDefaultIndexes, + const BSONObj& idIndex) -> Status { + return userCreateNSImpl(opCtx, db, ns, options, parseKind, createDefaultIndexes, idIndex); +} + +void mongo::registerUserCreateNSImpl(stdx::function<decltype(userCreateNS)> impl) { + userCreateNSImpl = std::move(impl); +} + +void mongo::dropAllDatabasesExceptLocal(OperationContext* const opCtx) { + return dropAllDatabasesExceptLocalImpl(opCtx); +} + +/** + * Registers an implementation of `dropAllDatabaseExceptLocal` for use by library clients. + * This is necessary to allow `catalog/database` to be a vtable edge. + * @param impl Implementation of `dropAllDatabaseExceptLocal` to install. + * @note This call is not thread safe. + */ +void mongo::registerDropAllDatabasesExceptLocalImpl( + stdx::function<decltype(dropAllDatabasesExceptLocal)> impl) { + dropAllDatabasesExceptLocalImpl = std::move(impl); +} diff --git a/src/mongo/db/catalog/database.h b/src/mongo/db/catalog/database.h index 2a5e281c1b5..e5fab444357 100644 --- a/src/mongo/db/catalog/database.h +++ b/src/mongo/db/catalog/database.h @@ -31,8 +31,6 @@ #include <memory> #include <string> -#include "mongo/base/shim.h" -#include "mongo/base/status.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/collection.h" @@ -47,7 +45,6 @@ #include "mongo/util/string_map.h" namespace mongo { - /** * Represents a logical database containing Collections. * @@ -126,34 +123,16 @@ public: virtual const CollectionMap& collections() const = 0; }; +private: + static std::unique_ptr<Impl> makeImpl(Database* _this, + OperationContext* opCtx, + StringData name, + DatabaseCatalogEntry* dbEntry); + public: - static MONGO_DECLARE_SHIM((OperationContext * opCtx)->void) dropAllDatabasesExceptLocal; + using factory_function_type = decltype(makeImpl); - /** - * Creates the namespace 'ns' in the database 'db' according to 'options'. If - * 'createDefaultIndexes' - * is true, creates the _id index for the collection (and the system indexes, in the case of - * system - * collections). Creates the collection's _id index according to 'idIndex', if it is non-empty. - * When - * 'idIndex' is empty, creates the default _id index. - */ - static MONGO_DECLARE_SHIM( - (OperationContext * opCtx, - Database* db, - StringData ns, - BSONObj options, - CollectionOptions::ParseKind parseKind = CollectionOptions::parseForCommand, - bool createDefaultIndexes = true, - const BSONObj& idIndex = BSONObj()) - ->Status) userCreateNS; - - static MONGO_DECLARE_SHIM((Database * this_, - OperationContext* opCtx, - StringData name, - DatabaseCatalogEntry*, - PrivateTo<Database>) - ->std::unique_ptr<Impl>) makeImpl; + static void registerFactory(stdx::function<factory_function_type> factory); /** * Iterating over a Database yields Collection* pointers. @@ -203,7 +182,7 @@ public: explicit inline Database(OperationContext* const opCtx, const StringData name, DatabaseCatalogEntry* const dbEntry) - : _pimpl(makeImpl(this, opCtx, name, dbEntry, PrivateCall<Database>{})) { + : _pimpl(makeImpl(this, opCtx, name, dbEntry)) { this->_impl().init(opCtx); } @@ -356,7 +335,17 @@ public: * * Must be called with the specified database locked in X mode. */ - static MONGO_DECLARE_SHIM((OperationContext * opCtx, Database* db)->void) dropDatabase; + static void dropDatabase(OperationContext* opCtx, Database* db); + + /** + * Registers an implementation of `Database::dropDatabase` for use by library clients. + * This is necessary to allow `catalog/database` to be a vtable edge. + * @param impl Implementation of `dropDatabase` to install. + * @note This call is not thread safe. + */ + static void registerDropDatabaseImpl(stdx::function<decltype(dropDatabase)> impl); + + // static Status validateDBName( StringData dbname ); inline const NamespaceString& getSystemIndexesName() const { return this->_impl().getSystemIndexesName(); @@ -406,4 +395,37 @@ private: std::unique_ptr<Impl> _pimpl; }; + +void dropAllDatabasesExceptLocal(OperationContext* opCtx); + +/** + * Registers an implementation of `dropAllDatabaseExceptLocal` for use by library clients. + * This is necessary to allow `catalog/database` to be a vtable edge. + * @param impl Implementation of `dropAllDatabaseExceptLocal` to install. + * @note This call is not thread safe. + */ +void registerDropAllDatabasesExceptLocalImpl( + stdx::function<decltype(dropAllDatabasesExceptLocal)> impl); + +/** + * Creates the namespace 'ns' in the database 'db' according to 'options'. If 'createDefaultIndexes' + * is true, creates the _id index for the collection (and the system indexes, in the case of system + * collections). Creates the collection's _id index according to 'idIndex', if it is non-empty. When + * 'idIndex' is empty, creates the default _id index. + */ +Status userCreateNS(OperationContext* opCtx, + Database* db, + StringData ns, + BSONObj options, + CollectionOptions::ParseKind parseKind = CollectionOptions::parseForCommand, + bool createDefaultIndexes = true, + const BSONObj& idIndex = BSONObj()); + +/** + * Registers an implementation of `userCreateNS` for use by library clients. + * This is necessary to allow `catalog/database` to be a vtable edge. + * @param impl Implementation of `userCreateNS` to install. + * @note This call is not thread safe. + */ +void registerUserCreateNSImpl(stdx::function<decltype(userCreateNS)> impl); } // namespace mongo diff --git a/src/mongo/db/catalog/database_holder.cpp b/src/mongo/db/catalog/database_holder.cpp index b40577d797f..0b0c4336662 100644 --- a/src/mongo/db/catalog/database_holder.cpp +++ b/src/mongo/db/catalog/database_holder.cpp @@ -36,9 +36,32 @@ namespace mongo { DatabaseHolder::Impl::~Impl() = default; -void DatabaseHolder::TUHook::hook() noexcept {} +namespace { +stdx::function<DatabaseHolder::factory_function_type> factory; +} // namespace + +void DatabaseHolder::registerFactory(decltype(factory) newFactory) { + factory = std::move(newFactory); +} + +auto DatabaseHolder::makeImpl() -> std::unique_ptr<Impl> { + return factory(); +} -MONGO_DEFINE_SHIM(DatabaseHolder::makeImpl); -MONGO_DEFINE_SHIM(DatabaseHolder::getDatabaseHolder); +void DatabaseHolder::TUHook::hook() noexcept {} +namespace { +stdx::function<decltype(dbHolder)> dbHolderImpl; +} // namespace } // namespace mongo + +// The `mongo::` prefix is necessary to placate MSVC -- it is unable to properly identify anonymous +// nested namespace members in `decltype` expressions when defining functions using scope-resolution +// syntax. +void mongo::registerDbHolderImpl(decltype(mongo::dbHolderImpl) impl) { + dbHolderImpl = std::move(impl); +} + +auto mongo::dbHolder() -> DatabaseHolder& { + return dbHolderImpl(); +} diff --git a/src/mongo/db/catalog/database_holder.h b/src/mongo/db/catalog/database_holder.h index 3af319ab963..0edde9cfccf 100644 --- a/src/mongo/db/catalog/database_holder.h +++ b/src/mongo/db/catalog/database_holder.h @@ -31,7 +31,6 @@ #include <set> #include <string> -#include "mongo/base/shim.h" #include "mongo/base/string_data.h" #include "mongo/db/namespace_string.h" #include "mongo/stdx/functional.h" @@ -63,14 +62,17 @@ public: virtual std::set<std::string> getNamesWithConflictingCasing(StringData name) = 0; }; +private: + static std::unique_ptr<Impl> makeImpl(); + public: - static MONGO_DECLARE_SHIM(()->DatabaseHolder&) getDatabaseHolder; + using factory_function_type = decltype(makeImpl); - static MONGO_DECLARE_SHIM((PrivateTo<DatabaseHolder>)->std::unique_ptr<Impl>) makeImpl; + static void registerFactory(stdx::function<factory_function_type> factory); inline ~DatabaseHolder() = default; - inline explicit DatabaseHolder() : _pimpl(makeImpl(PrivateCall<DatabaseHolder>{})) {} + inline explicit DatabaseHolder() : _pimpl(makeImpl()) {} /** * Retrieves an already opened database or returns NULL. Must be called with the database @@ -145,4 +147,7 @@ private: std::unique_ptr<Impl> _pimpl; }; + +extern DatabaseHolder& dbHolder(); +extern void registerDbHolderImpl(stdx::function<decltype(dbHolder)> impl); } // namespace mongo diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp index df70ef25c27..d5b5560df67 100644 --- a/src/mongo/db/catalog/database_holder_impl.cpp +++ b/src/mongo/db/catalog/database_holder_impl.cpp @@ -50,29 +50,32 @@ namespace mongo { namespace { -std::unique_ptr<DatabaseHolder> dbHolderStorage; + +DatabaseHolder* _dbHolder = nullptr; + +DatabaseHolder& dbHolderImpl() { + return *_dbHolder; +} GlobalInitializerRegisterer dbHolderImplInitializer("InitializeDbHolderimpl", + {"InitializeDatabaseHolderFactory"}, [](InitializerContext* const) { - dbHolderStorage = - std::make_unique<DatabaseHolder>(); + _dbHolder = new DatabaseHolder(); + registerDbHolderImpl(dbHolderImpl); return Status::OK(); }, [](DeinitializerContext* const) { - dbHolderStorage = nullptr; + delete _dbHolder; + _dbHolder = nullptr; return Status::OK(); }); -} // namespace -MONGO_REGISTER_SHIM(DatabaseHolder::getDatabaseHolder) -()->DatabaseHolder& { - return *dbHolderStorage; +MONGO_INITIALIZER(InitializeDatabaseHolderFactory)(InitializerContext* const) { + DatabaseHolder::registerFactory([] { return stdx::make_unique<DatabaseHolderImpl>(); }); + return Status::OK(); } -MONGO_REGISTER_SHIM(DatabaseHolder::makeImpl) -(PrivateTo<DatabaseHolder>)->std::unique_ptr<DatabaseHolder::Impl> { - return std::make_unique<DatabaseHolderImpl>(); -} +} // namespace using std::set; using std::size_t; diff --git a/src/mongo/db/catalog/database_holder_mock.cpp b/src/mongo/db/catalog/database_holder_mock.cpp index 561713fb555..694091f8afc 100644 --- a/src/mongo/db/catalog/database_holder_mock.cpp +++ b/src/mongo/db/catalog/database_holder_mock.cpp @@ -33,15 +33,23 @@ #include "mongo/stdx/memory.h" namespace mongo { +namespace { -MONGO_REGISTER_SHIM(DatabaseHolder::getDatabaseHolder)()->DatabaseHolder& { +DatabaseHolder& dbHolderImpl() { static DatabaseHolder _dbHolder; return _dbHolder; } -MONGO_REGISTER_SHIM(DatabaseHolder::makeImpl) -(PrivateTo<DatabaseHolder>)->std::unique_ptr<DatabaseHolder::Impl> { - return stdx::make_unique<DatabaseHolderMock>(); +MONGO_INITIALIZER_WITH_PREREQUISITES(InitializeDbHolderimpl, ("InitializeDatabaseHolderFactory")) +(InitializerContext* const) { + registerDbHolderImpl(dbHolderImpl); + return Status::OK(); } +MONGO_INITIALIZER(InitializeDatabaseHolderFactory)(InitializerContext* const) { + DatabaseHolder::registerFactory([] { return stdx::make_unique<DatabaseHolderMock>(); }); + return Status::OK(); +} + +} // namespace } // namespace mongo diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp index 4d195d3cf58..fca3b112818 100644 --- a/src/mongo/db/catalog/database_impl.cpp +++ b/src/mongo/db/catalog/database_impl.cpp @@ -78,17 +78,16 @@ #include "mongo/util/log.h" namespace mongo { -MONGO_REGISTER_SHIM(Database::makeImpl) -(Database* const this_, - OperationContext* const opCtx, - const StringData name, - DatabaseCatalogEntry* const dbEntry, - PrivateTo<Database>) - ->std::unique_ptr<Database::Impl> { - return stdx::make_unique<DatabaseImpl>(this_, opCtx, name, dbEntry); -} - namespace { +MONGO_INITIALIZER(InitializeDatabaseFactory)(InitializerContext* const) { + Database::registerFactory([](Database* const this_, + OperationContext* const opCtx, + const StringData name, + DatabaseCatalogEntry* const dbEntry) { + return stdx::make_unique<DatabaseImpl>(this_, opCtx, name, dbEntry); + }); + return Status::OK(); +} MONGO_FP_DECLARE(hangBeforeLoggingCreateCollection); } // namespace @@ -866,7 +865,7 @@ void DatabaseImpl::dropDatabase(OperationContext* opCtx, Database* db) { Top::get(serviceContext).collectionDropped(coll->ns().ns(), true); } - DatabaseHolder::getDatabaseHolder().close(opCtx, name, "database dropped"); + dbHolder().close(opCtx, name, "database dropped"); auto const storageEngine = serviceContext->getGlobalStorageEngine(); writeConflictRetry(opCtx, "dropDatabase", name, [&] { @@ -896,7 +895,7 @@ StatusWith<NamespaceString> DatabaseImpl::makeUniqueCollectionNamespace( if (!_uniqueCollectionNamespacePseudoRandom) { _uniqueCollectionNamespacePseudoRandom = - std::make_unique<PseudoRandom>(Date_t::now().asInt64()); + stdx::make_unique<PseudoRandom>(Date_t::now().asInt64()); } const auto charsToChooseFrom = @@ -936,19 +935,60 @@ StatusWith<NamespaceString> DatabaseImpl::makeUniqueCollectionNamespace( << " attempts due to namespace conflicts with existing collections."); } -MONGO_REGISTER_SHIM(Database::dropDatabase)(OperationContext* opCtx, Database* db)->void { - return DatabaseImpl::dropDatabase(opCtx, db); +namespace { +MONGO_INITIALIZER(InitializeDropDatabaseImpl)(InitializerContext* const) { + Database::registerDropDatabaseImpl(DatabaseImpl::dropDatabase); + return Status::OK(); +} +MONGO_INITIALIZER(InitializeUserCreateNSImpl)(InitializerContext* const) { + registerUserCreateNSImpl(userCreateNSImpl); + return Status::OK(); +} + +MONGO_INITIALIZER(InitializeDropAllDatabasesExceptLocalImpl)(InitializerContext* const) { + registerDropAllDatabasesExceptLocalImpl(dropAllDatabasesExceptLocalImpl); + return Status::OK(); } +} // namespace +} // namespace mongo + +void mongo::dropAllDatabasesExceptLocalImpl(OperationContext* opCtx) { + Lock::GlobalWrite lk(opCtx); + + vector<string> n; + StorageEngine* storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine(); + storageEngine->listDatabases(&n); + + if (n.size() == 0) + return; + log() << "dropAllDatabasesExceptLocal " << n.size(); + + repl::ReplicationCoordinator::get(opCtx)->dropAllSnapshots(); + + for (const auto& dbName : n) { + if (dbName != "local") { + writeConflictRetry(opCtx, "dropAllDatabasesExceptLocal", dbName, [&opCtx, &dbName] { + Database* db = dbHolder().get(opCtx, dbName); -MONGO_REGISTER_SHIM(Database::userCreateNS) -(OperationContext* opCtx, - Database* db, - StringData ns, - BSONObj options, - CollectionOptions::ParseKind parseKind, - bool createDefaultIndexes, - const BSONObj& idIndex) - ->Status { + // This is needed since dropDatabase can't be rolled back. + // This is safe be replaced by "invariant(db);dropDatabase(opCtx, db);" once fixed + if (db == nullptr) { + log() << "database disappeared after listDatabases but before drop: " << dbName; + } else { + DatabaseImpl::dropDatabase(opCtx, db); + } + }); + } + } +} + +auto mongo::userCreateNSImpl(OperationContext* opCtx, + Database* db, + StringData ns, + BSONObj options, + CollectionOptions::ParseKind parseKind, + bool createDefaultIndexes, + const BSONObj& idIndex) -> Status { invariant(db); LOG(1) << "create collection " << ns << ' ' << options; @@ -1047,34 +1087,3 @@ MONGO_REGISTER_SHIM(Database::userCreateNS) return Status::OK(); } - -MONGO_REGISTER_SHIM(Database::dropAllDatabasesExceptLocal)(OperationContext* opCtx)->void { - Lock::GlobalWrite lk(opCtx); - - vector<string> n; - StorageEngine* storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine(); - storageEngine->listDatabases(&n); - - if (n.size() == 0) - return; - log() << "dropAllDatabasesExceptLocal " << n.size(); - - repl::ReplicationCoordinator::get(opCtx)->dropAllSnapshots(); - - for (const auto& dbName : n) { - if (dbName != "local") { - writeConflictRetry(opCtx, "dropAllDatabasesExceptLocal", dbName, [&opCtx, &dbName] { - Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, dbName); - - // This is needed since dropDatabase can't be rolled back. - // This is safe be replaced by "invariant(db);dropDatabase(opCtx, db);" once fixed - if (db == nullptr) { - log() << "database disappeared after listDatabases but before drop: " << dbName; - } else { - DatabaseImpl::dropDatabase(opCtx, db); - } - }); - } - } -} -} // namespace mongo diff --git a/src/mongo/db/catalog/database_impl.h b/src/mongo/db/catalog/database_impl.h index 8a341037778..e2102e3154b 100644 --- a/src/mongo/db/catalog/database_impl.h +++ b/src/mongo/db/catalog/database_impl.h @@ -309,4 +309,18 @@ private: void dropAllDatabasesExceptLocalImpl(OperationContext* opCtx); +/** + * Creates the namespace 'ns' in the database 'db' according to 'options'. If 'createDefaultIndexes' + * is true, creates the _id index for the collection (and the system indexes, in the case of system + * collections). Creates the collection's _id index according to 'idIndex', if it is non-empty. When + * 'idIndex' is empty, creates the default _id index. + */ +Status userCreateNSImpl(OperationContext* opCtx, + Database* db, + StringData ns, + BSONObj options, + CollectionOptions::ParseKind parseKind = CollectionOptions::parseForCommand, + bool createDefaultIndexes = true, + const BSONObj& idIndex = BSONObj()); + } // namespace mongo diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp index 7a2a28a241a..4d9288e841d 100644 --- a/src/mongo/db/catalog/index_catalog.cpp +++ b/src/mongo/db/catalog/index_catalog.cpp @@ -37,7 +37,19 @@ namespace mongo { IndexCatalog::Impl::~Impl() = default; -MONGO_DEFINE_SHIM(IndexCatalog::makeImpl); +namespace { +IndexCatalog::factory_function_type factory; +} // namespace + +void IndexCatalog::registerFactory(decltype(factory) newFactory) { + factory = std::move(newFactory); +} + +auto IndexCatalog::makeImpl(IndexCatalog* const this_, + Collection* const collection, + const int maxNumIndexesAllowed) -> std::unique_ptr<Impl> { + return factory(this_, collection, maxNumIndexesAllowed); +} void IndexCatalog::TUHook::hook() noexcept {} @@ -51,11 +63,38 @@ IndexCatalogEntry* IndexCatalog::_setupInMemoryStructures( IndexCatalog::IndexIterator::Impl::~Impl() = default; -MONGO_DEFINE_SHIM(IndexCatalog::IndexIterator::makeImpl); +namespace { +IndexCatalog::IndexIterator::factory_function_type iteratorFactory; +} // namespace + +void IndexCatalog::IndexIterator::registerFactory(decltype(iteratorFactory) newFactory) { + iteratorFactory = std::move(newFactory); +} + +auto IndexCatalog::IndexIterator::makeImpl(OperationContext* const opCtx, + const IndexCatalog* const cat, + const bool includeUnfinishedIndexes) + -> std::unique_ptr<Impl> { + return iteratorFactory(opCtx, cat, includeUnfinishedIndexes); +} void IndexCatalog::IndexIterator::TUHook::hook() noexcept {} -MONGO_DEFINE_SHIM(IndexCatalog::fixIndexKey); +namespace { +stdx::function<decltype(IndexCatalog::fixIndexKey)> fixIndexKeyImpl; +} // namespace + +void IndexCatalog::registerFixIndexKeyImpl(decltype(fixIndexKeyImpl) impl) { + fixIndexKeyImpl = std::move(impl); +} + +BSONObj IndexCatalog::fixIndexKey(const BSONObj& key) { + return fixIndexKeyImpl(key); +} + +namespace { +stdx::function<decltype(IndexCatalog::prepareInsertDeleteOptions)> prepareInsertDeleteOptionsImpl; +} // namespace std::string::size_type IndexCatalog::getLongestIndexNameLength(OperationContext* opCtx) const { IndexCatalog::IndexIterator it = getIndexIterator(opCtx, true); @@ -68,5 +107,14 @@ std::string::size_type IndexCatalog::getLongestIndexNameLength(OperationContext* return longestIndexNameLength; } -MONGO_DEFINE_SHIM(IndexCatalog::prepareInsertDeleteOptions); +void IndexCatalog::prepareInsertDeleteOptions(OperationContext* const opCtx, + const IndexDescriptor* const desc, + InsertDeleteOptions* const options) { + return prepareInsertDeleteOptionsImpl(opCtx, desc, options); +} + +void IndexCatalog::registerPrepareInsertDeleteOptionsImpl( + stdx::function<decltype(prepareInsertDeleteOptions)> impl) { + prepareInsertDeleteOptionsImpl = std::move(impl); +} } // namespace mongo diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h index bb9902f91cd..5e5b17281e2 100644 --- a/src/mongo/db/catalog/index_catalog.h +++ b/src/mongo/db/catalog/index_catalog.h @@ -32,7 +32,6 @@ #include <vector> #include "mongo/base/clonable_ptr.h" -#include "mongo/base/shim.h" #include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/index/multikey_paths.h" #include "mongo/db/jsobj.h" @@ -77,20 +76,20 @@ public: virtual IndexCatalogEntry* catalogEntry(const IndexDescriptor* desc) = 0; }; - static MONGO_DECLARE_SHIM((OperationContext * opCtx, - const IndexCatalog* cat, - bool includeUnfinishedIndexes, - PrivateTo<IndexIterator>) - ->std::unique_ptr<Impl>) makeImpl; - private: + static std::unique_ptr<Impl> makeImpl(OperationContext* opCtx, + const IndexCatalog* cat, + bool includeUnfinishedIndexes); + explicit inline IndexIterator(OperationContext* const opCtx, const IndexCatalog* const cat, const bool includeUnfinishedIndexes) - : _pimpl(makeImpl(opCtx, cat, includeUnfinishedIndexes, PrivateCall<IndexIterator>{})) { - } + : _pimpl(makeImpl(opCtx, cat, includeUnfinishedIndexes)) {} public: + using factory_function_type = stdx::function<decltype(makeImpl)>; + static void registerFactory(factory_function_type factory); + inline ~IndexIterator() = default; inline IndexIterator(const IndexIterator& copy) = default; @@ -256,17 +255,19 @@ public: friend IndexCatalog; }; +private: + static std::unique_ptr<Impl> makeImpl(IndexCatalog* this_, + Collection* collection, + int maxNumIndexesAllowed); + public: - static MONGO_DECLARE_SHIM((IndexCatalog * this_, - Collection* collection, - int maxNumIndexesAllowed, - PrivateTo<IndexCatalog>) - ->std::unique_ptr<Impl>) makeImpl; + using factory_function_type = stdx::function<decltype(makeImpl)>; + static void registerFactory(factory_function_type factory); inline ~IndexCatalog() = default; explicit inline IndexCatalog(Collection* const collection, const int maxNumIndexesAllowed) - : _pimpl(makeImpl(this, collection, maxNumIndexesAllowed, PrivateCall<IndexCatalog>{})) {} + : _pimpl(makeImpl(this, collection, maxNumIndexesAllowed)) {} inline IndexCatalog(IndexCatalog&&) = delete; inline IndexCatalog& operator=(IndexCatalog&&) = delete; @@ -537,15 +538,18 @@ public: // public static helpers - static MONGO_DECLARE_SHIM((const BSONObj& key)->BSONObj) fixIndexKey; + static BSONObj fixIndexKey(const BSONObj& key); + static void registerFixIndexKeyImpl(stdx::function<decltype(fixIndexKey)> impl); /** * Fills out 'options' in order to indicate whether to allow dups or relax * index constraints, as needed by replication. */ - static MONGO_DECLARE_SHIM( - (OperationContext * opCtx, const IndexDescriptor* desc, InsertDeleteOptions* options)->void) - prepareInsertDeleteOptions; + static void prepareInsertDeleteOptions(OperationContext* opCtx, + const IndexDescriptor* desc, + InsertDeleteOptions* options); + static void registerPrepareInsertDeleteOptionsImpl( + stdx::function<decltype(prepareInsertDeleteOptions)> impl); private: inline const Collection* _getCollection() const { diff --git a/src/mongo/db/catalog/index_catalog_entry.cpp b/src/mongo/db/catalog/index_catalog_entry.cpp index 1972c2237d4..94a05fde7ac 100644 --- a/src/mongo/db/catalog/index_catalog_entry.cpp +++ b/src/mongo/db/catalog/index_catalog_entry.cpp @@ -38,7 +38,22 @@ namespace mongo { IndexCatalogEntry::Impl::~Impl() = default; -MONGO_DEFINE_SHIM(IndexCatalogEntry::makeImpl); +namespace { +stdx::function<IndexCatalogEntry::factory_function_type> factory; +} // namespace + +void IndexCatalogEntry::registerFactory(decltype(factory) newFactory) { + factory = std::move(newFactory); +} + +auto IndexCatalogEntry::makeImpl(IndexCatalogEntry* const this_, + OperationContext* const opCtx, + const StringData ns, + CollectionCatalogEntry* const collection, + std::unique_ptr<IndexDescriptor> descriptor, + CollectionInfoCache* const infoCache) -> std::unique_ptr<Impl> { + return factory(this_, opCtx, ns, collection, std::move(descriptor), infoCache); +} void IndexCatalogEntry::TUHook::hook() noexcept {} @@ -47,13 +62,7 @@ IndexCatalogEntry::IndexCatalogEntry(OperationContext* opCtx, CollectionCatalogEntry* collection, std::unique_ptr<IndexDescriptor> descriptor, CollectionInfoCache* infoCache) - : _pimpl(makeImpl(this, - opCtx, - ns, - collection, - std::move(descriptor), - infoCache, - PrivateCall<IndexCatalogEntry>{})) {} + : _pimpl(makeImpl(this, opCtx, ns, collection, std::move(descriptor), infoCache)) {} void IndexCatalogEntry::init(std::unique_ptr<IndexAccessMethod> accessMethod) { return this->_impl().init(std::move(accessMethod)); diff --git a/src/mongo/db/catalog/index_catalog_entry.h b/src/mongo/db/catalog/index_catalog_entry.h index 20d748c9846..fecfd197bb1 100644 --- a/src/mongo/db/catalog/index_catalog_entry.h +++ b/src/mongo/db/catalog/index_catalog_entry.h @@ -32,7 +32,6 @@ #include <string> #include "mongo/base/owned_pointer_vector.h" -#include "mongo/base/shim.h" #include "mongo/bson/ordering.h" #include "mongo/bson/timestamp.h" #include "mongo/db/index/multikey_paths.h" @@ -103,15 +102,18 @@ public: virtual void setMinimumVisibleSnapshot(Timestamp name) = 0; }; +private: + static std::unique_ptr<Impl> makeImpl(IndexCatalogEntry* this_, + OperationContext* opCtx, + StringData ns, + CollectionCatalogEntry* collection, + std::unique_ptr<IndexDescriptor> descriptor, + CollectionInfoCache* infoCache); + public: - static MONGO_DECLARE_SHIM((IndexCatalogEntry * this_, - OperationContext* opCtx, - StringData ns, - CollectionCatalogEntry* collection, - std::unique_ptr<IndexDescriptor> descriptor, - CollectionInfoCache* infoCache, - PrivateTo<IndexCatalogEntry>) - ->std::unique_ptr<Impl>) makeImpl; + using factory_function_type = decltype(makeImpl); + + static void registerFactory(stdx::function<factory_function_type> factory); explicit IndexCatalogEntry( OperationContext* opCtx, diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.cpp b/src/mongo/db/catalog/index_catalog_entry_impl.cpp index 1ef395b85b4..5e717b97e71 100644 --- a/src/mongo/db/catalog/index_catalog_entry_impl.cpp +++ b/src/mongo/db/catalog/index_catalog_entry_impl.cpp @@ -53,18 +53,20 @@ #include "mongo/util/scopeguard.h" namespace mongo { -MONGO_REGISTER_SHIM(IndexCatalogEntry::makeImpl) -(IndexCatalogEntry* const this_, - OperationContext* const opCtx, - const StringData ns, - CollectionCatalogEntry* const collection, - std::unique_ptr<IndexDescriptor> descriptor, - CollectionInfoCache* const infoCache, - PrivateTo<IndexCatalogEntry>) - ->std::unique_ptr<IndexCatalogEntry::Impl> { - return std::make_unique<IndexCatalogEntryImpl>( - this_, opCtx, ns, collection, std::move(descriptor), infoCache); +namespace { +MONGO_INITIALIZER(InitializeIndexCatalogEntryFactory)(InitializerContext* const) { + IndexCatalogEntry::registerFactory([](IndexCatalogEntry* const this_, + OperationContext* const opCtx, + const StringData ns, + CollectionCatalogEntry* const collection, + std::unique_ptr<IndexDescriptor> descriptor, + CollectionInfoCache* const infoCache) { + return stdx::make_unique<IndexCatalogEntryImpl>( + this_, opCtx, ns, collection, std::move(descriptor), infoCache); + }); + return Status::OK(); } +} // namespace using std::string; diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp index 76bb7aa4b8a..e19773b34fd 100644 --- a/src/mongo/db/catalog/index_catalog_impl.cpp +++ b/src/mongo/db/catalog/index_catalog_impl.cpp @@ -70,33 +70,38 @@ #include "mongo/util/represent_as.h" namespace mongo { -MONGO_REGISTER_SHIM(IndexCatalog::makeImpl) -(IndexCatalog* const this_, - Collection* const collection, - const int maxNumIndexesAllowed, - PrivateTo<IndexCatalog>) - ->std::unique_ptr<IndexCatalog::Impl> { - return std::make_unique<IndexCatalogImpl>(this_, collection, maxNumIndexesAllowed); +namespace { +MONGO_INITIALIZER(InitializeIndexCatalogFactory)(InitializerContext* const) { + IndexCatalog::registerFactory([]( + IndexCatalog* const this_, Collection* const collection, const int maxNumIndexesAllowed) { + return stdx::make_unique<IndexCatalogImpl>(this_, collection, maxNumIndexesAllowed); + }); + return Status::OK(); } -MONGO_REGISTER_SHIM(IndexCatalog::IndexIterator::makeImpl) -(OperationContext* const opCtx, - const IndexCatalog* const cat, - const bool includeUnfinishedIndexes, - PrivateTo<IndexCatalog::IndexIterator>) - ->std::unique_ptr<IndexCatalog::IndexIterator::Impl> { - return std::make_unique<IndexCatalogImpl::IndexIteratorImpl>( - opCtx, cat, includeUnfinishedIndexes); +MONGO_INITIALIZER(InitializeIndexCatalogIndexIteratorFactory)(InitializerContext* const) { + IndexCatalog::IndexIterator::registerFactory([](OperationContext* const opCtx, + const IndexCatalog* const cat, + const bool includeUnfinishedIndexes) { + return stdx::make_unique<IndexCatalogImpl::IndexIteratorImpl>( + opCtx, cat, includeUnfinishedIndexes); + }); + return Status::OK(); } -MONGO_REGISTER_SHIM(IndexCatalog::fixIndexKey)(const BSONObj& key)->BSONObj { - return IndexCatalogImpl::fixIndexKey(key); + +MONGO_INITIALIZER(InitializeFixIndexKeyImpl)(InitializerContext* const) { + IndexCatalog::registerFixIndexKeyImpl(&IndexCatalogImpl::fixIndexKey); + return Status::OK(); } -MONGO_REGISTER_SHIM(IndexCatalog::prepareInsertDeleteOptions) -(OperationContext* opCtx, const IndexDescriptor* desc, InsertDeleteOptions* options)->void { - return IndexCatalogImpl::prepareInsertDeleteOptions(opCtx, desc, options); +MONGO_INITIALIZER(InitializePrepareInsertDeleteOptionsImpl)(InitializerContext* const) { + IndexCatalog::registerPrepareInsertDeleteOptionsImpl( + &IndexCatalogImpl::prepareInsertDeleteOptions); + return Status::OK(); } +} // namespace + using std::unique_ptr; using std::endl; using std::string; diff --git a/src/mongo/db/catalog/index_consistency.cpp b/src/mongo/db/catalog/index_consistency.cpp index 7caf6e1ed86..f1237212a1b 100644 --- a/src/mongo/db/catalog/index_consistency.cpp +++ b/src/mongo/db/catalog/index_consistency.cpp @@ -506,7 +506,7 @@ uint32_t IndexConsistency::_hashKeyString(const KeyString& ks, int indexNumber) Status IndexConsistency::_throwExceptionIfError() { - Database* database = DatabaseHolder::getDatabaseHolder().get(_opCtx, _nss.db()); + Database* database = dbHolder().get(_opCtx, _nss.db()); // Ensure the database still exists. if (!database) { diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp index 1f4c169000c..06f9676ca4b 100644 --- a/src/mongo/db/catalog/index_create.cpp +++ b/src/mongo/db/catalog/index_create.cpp @@ -57,7 +57,19 @@ namespace mongo { MultiIndexBlock::Impl::~Impl() = default; -MONGO_DEFINE_SHIM(MultiIndexBlock::makeImpl); +namespace { +stdx::function<MultiIndexBlock::factory_function_type> factory; +} // namespace + +void MultiIndexBlock::registerFactory(decltype(factory) newFactory) { + factory = std::move(newFactory); +} + +auto MultiIndexBlock::makeImpl(OperationContext* const opCtx, Collection* const collection) + -> std::unique_ptr<Impl> { + return factory(opCtx, collection); +} + void MultiIndexBlock::TUHook::hook() noexcept {} } // namespace mongo diff --git a/src/mongo/db/catalog/index_create.h b/src/mongo/db/catalog/index_create.h index acdf744da9e..56c85609a1f 100644 --- a/src/mongo/db/catalog/index_create.h +++ b/src/mongo/db/catalog/index_create.h @@ -114,11 +114,12 @@ private: return *this->_pimpl; } + static std::unique_ptr<Impl> makeImpl(OperationContext* opCtx, Collection* collection); + public: - static MONGO_DECLARE_SHIM((OperationContext * opCtx, - Collection* collection, - PrivateTo<MultiIndexBlock>) - ->std::unique_ptr<Impl>) makeImpl; + using factory_function_type = decltype(makeImpl); + + static void registerFactory(stdx::function<factory_function_type> factory); inline ~MultiIndexBlock() = default; @@ -126,7 +127,7 @@ public: * Neither pointer is owned. */ inline explicit MultiIndexBlock(OperationContext* const opCtx, Collection* const collection) - : _pimpl(makeImpl(opCtx, collection, PrivateCall<MultiIndexBlock>{})) {} + : _pimpl(makeImpl(opCtx, collection)) {} /** * By default we ignore the 'background' flag in specs when building an index. If this is diff --git a/src/mongo/db/catalog/index_create_impl_servers.cpp b/src/mongo/db/catalog/index_create_impl_servers.cpp index 541a19a0434..5fab9ebee48 100644 --- a/src/mongo/db/catalog/index_create_impl_servers.cpp +++ b/src/mongo/db/catalog/index_create_impl_servers.cpp @@ -41,12 +41,14 @@ class MultiIndexBlockImplServers : public MultiIndexBlockImpl { return spec["background"].trueValue(); } }; -} // namespace -MONGO_REGISTER_SHIM(MultiIndexBlock::makeImpl) -(OperationContext* const opCtx, Collection* const collection, PrivateTo<MultiIndexBlock>) - ->std::unique_ptr<MultiIndexBlock::Impl> { - return std::make_unique<MultiIndexBlockImplServers>(opCtx, collection); +MONGO_INITIALIZER(InitializeMultiIndexBlockFactory)(InitializerContext* const) { + MultiIndexBlock::registerFactory( + [](OperationContext* const opCtx, Collection* const collection) { + return stdx::make_unique<MultiIndexBlockImplServers>(opCtx, collection); + }); + return Status::OK(); } +} // namespace } // namespace mongo diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp index 0d7722093e6..fd5d2df9706 100644 --- a/src/mongo/db/catalog/rename_collection.cpp +++ b/src/mongo/db/catalog/rename_collection.cpp @@ -152,7 +152,7 @@ Status renameCollectionCommon(OperationContext* opCtx, << target.ns()); } - Database* const sourceDB = DatabaseHolder::getDatabaseHolder().get(opCtx, source.db()); + Database* const sourceDB = dbHolder().get(opCtx, source.db()); if (sourceDB) { DatabaseShardingState::get(sourceDB).checkDbVersion(opCtx); } @@ -180,7 +180,7 @@ Status renameCollectionCommon(OperationContext* opCtx, BackgroundOperation::assertNoBgOpInProgForNs(source.ns()); - Database* const targetDB = DatabaseHolder::getDatabaseHolder().openDb(opCtx, target.db()); + Database* const targetDB = dbHolder().openDb(opCtx, target.db()); // Check if the target namespace exists and if dropTarget is true. // Return a non-OK status if target exists and dropTarget is not true or if the collection diff --git a/src/mongo/db/catalog/uuid_catalog.cpp b/src/mongo/db/catalog/uuid_catalog.cpp index 0a50cadfa71..3ec994d7ef0 100644 --- a/src/mongo/db/catalog/uuid_catalog.cpp +++ b/src/mongo/db/catalog/uuid_catalog.cpp @@ -92,7 +92,7 @@ repl::OpTime UUIDCatalogObserver::onRenameCollection(OperationContext* opCtx, if (!uuid) return {}; auto getNewCollection = [opCtx, toCollection] { - auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, toCollection.db()); + auto db = dbHolder().get(opCtx, toCollection.db()); auto newColl = db->getCollection(opCtx, toCollection); invariant(newColl); return newColl; diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp index 116ca47dd6e..94f7838e39c 100644 --- a/src/mongo/db/catalog_raii.cpp +++ b/src/mongo/db/catalog_raii.cpp @@ -59,7 +59,7 @@ AutoGetDb::AutoGetDb(OperationContext* opCtx, StringData dbName, LockMode mode, : _dbLock(opCtx, dbName, mode, deadline), _db([&] { uassertLockTimeout( str::stream() << "database " << dbName, mode, deadline, _dbLock.isLocked()); - return DatabaseHolder::getDatabaseHolder().get(opCtx, dbName); + return dbHolder().get(opCtx, dbName); }()) { if (_db) { DatabaseShardingState::get(_db).checkDbVersion(opCtx); @@ -179,7 +179,7 @@ AutoGetOrCreateDb::AutoGetOrCreateDb(OperationContext* opCtx, _autoDb.emplace(opCtx, dbName, MODE_X, deadline); } - _db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName, &_justCreated); + _db = dbHolder().openDb(opCtx, dbName, &_justCreated); } DatabaseShardingState::get(_db).checkDbVersion(opCtx); diff --git a/src/mongo/db/catalog_raii.h b/src/mongo/db/catalog_raii.h index 1d0b1e9238f..081e566e7de 100644 --- a/src/mongo/db/catalog_raii.h +++ b/src/mongo/db/catalog_raii.h @@ -40,8 +40,7 @@ namespace mongo { /** * RAII-style class, which acquires a lock on the specified database in the requested mode and - * obtains a reference to the database. Used as a shortcut for calls to - * DatabaseHolder::getDatabaseHolder().get(). + * obtains a reference to the database. Used as a shortcut for calls to dbHolder().get(). * * Use this when you want to do a database-level operation, like read a list of all collections, or * drop a collection. @@ -153,9 +152,9 @@ private: /** * RAII-style class, which acquires a lock on the specified database in the requested mode and * obtains a reference to the database, creating it was non-existing. Used as a shortcut for - * calls to DatabaseHolder::getDatabaseHolder().openDb(), taking care of locking details. The - * requested mode must be MODE_IX or MODE_X. If the database needs to be created, the lock will - * automatically be reacquired as MODE_X. + * calls to dbHolder().openDb(), taking care of locking details. The requested mode must be + * MODE_IX or MODE_X. If the database needs to be created, the lock will automatically be + * reacquired as MODE_X. * * Use this when you are about to perform a write, and want to create the database if it doesn't * already exist. diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp index 8af7e4f452d..4c1971b8585 100644 --- a/src/mongo/db/cloner.cpp +++ b/src/mongo/db/cloner.cpp @@ -1,3 +1,6 @@ + +// cloner.cpp - copy a database (export/import basically) + /** * Copyright (C) 2008 10gen Inc. * @@ -153,7 +156,7 @@ struct Cloner::Fun { repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, to_collection)); // Make sure database still exists after we resume from the temp release - Database* db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, _dbName); + Database* db = dbHolder().openDb(opCtx, _dbName); bool createdCollection = false; Collection* collection = NULL; @@ -169,14 +172,13 @@ struct Cloner::Fun { WriteUnitOfWork wunit(opCtx); const bool createDefaultIndexes = true; - Status s = Database::userCreateNS( - opCtx, - db, - to_collection.toString(), - from_options, - CollectionOptions::parseForCommand, - createDefaultIndexes, - fixIndexSpec(to_collection.db().toString(), from_id_index)); + Status s = userCreateNS(opCtx, + db, + to_collection.toString(), + from_options, + CollectionOptions::parseForCommand, + createDefaultIndexes, + fixIndexSpec(to_collection.db().toString(), from_id_index)); verify(s.isOK()); wunit.commit(); collection = db->getCollection(opCtx, to_collection); @@ -212,7 +214,7 @@ struct Cloner::Fun { } // TODO: SERVER-16598 abort if original db or collection is gone. - db = DatabaseHolder::getDatabaseHolder().get(opCtx, _dbName); + db = dbHolder().get(opCtx, _dbName); uassert(28593, str::stream() << "Database " << _dbName << " dropped while cloning", db != NULL); @@ -370,7 +372,7 @@ void Cloner::copyIndexes(OperationContext* opCtx, // We are under lock here again, so reload the database in case it may have disappeared // during the temp release - Database* db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, toDBName); + Database* db = dbHolder().openDb(opCtx, toDBName); Collection* collection = db->getCollection(opCtx, to_collection); if (!collection) { @@ -379,7 +381,7 @@ void Cloner::copyIndexes(OperationContext* opCtx, WriteUnitOfWork wunit(opCtx); const bool createDefaultIndexes = true; - Status s = Database::userCreateNS( + Status s = userCreateNS( opCtx, db, to_collection.toString(), @@ -477,7 +479,7 @@ bool Cloner::copyCollection(OperationContext* opCtx, !opCtx->writesAreReplicated() || repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)); - Database* db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbname); + Database* db = dbHolder().openDb(opCtx, dbname); if (shouldCreateCollection) { bool result = writeConflictRetry(opCtx, "createCollection", ns, [&] { @@ -485,7 +487,7 @@ bool Cloner::copyCollection(OperationContext* opCtx, WriteUnitOfWork wunit(opCtx); const bool createDefaultIndexes = true; - Status status = Database::userCreateNS( + Status status = userCreateNS( opCtx, db, ns, options, optionsParser, createDefaultIndexes, idIndexSpec); if (!status.isOK()) { errmsg = status.toString(); @@ -565,7 +567,7 @@ Status Cloner::createCollectionsForDb( const std::vector<CreateCollectionParams>& createCollectionParams, const std::string& dbName, const CloneOptions& opts) { - Database* db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName); + Database* db = dbHolder().openDb(opCtx, dbName); invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_X)); auto collCount = 0; @@ -637,13 +639,13 @@ Status Cloner::createCollectionsForDb( auto options = optionsBuilder.obj(); Status createStatus = - Database::userCreateNS(opCtx, - db, - nss.ns(), - options, - CollectionOptions::parseForStorage, - createDefaultIndexes, - fixIndexSpec(nss.db().toString(), params.idIndexSpec)); + userCreateNS(opCtx, + db, + nss.ns(), + options, + CollectionOptions::parseForStorage, + createDefaultIndexes, + fixIndexSpec(nss.db().toString(), params.idIndexSpec)); if (!createStatus.isOK()) { return createStatus; } diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp index bf1edf90884..eb3b197278f 100644 --- a/src/mongo/db/commands/create_indexes.cpp +++ b/src/mongo/db/commands/create_indexes.cpp @@ -269,9 +269,9 @@ public: str::stream() << "Not primary while creating indexes in " << ns.ns())); } - Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, ns.db()); + Database* db = dbHolder().get(opCtx, ns.db()); if (!db) { - db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, ns.db()); + db = dbHolder().openDb(opCtx, ns.db()); } DatabaseShardingState::get(db).checkDbVersion(opCtx); @@ -393,7 +393,7 @@ public: str::stream() << "Not primary while completing index build in " << dbname, repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, ns)); - Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, ns.db()); + Database* db = dbHolder().get(opCtx, ns.db()); if (db) { DatabaseShardingState::get(db).checkDbVersion(opCtx); } diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp index ed753c7392a..34e2ed22f5b 100644 --- a/src/mongo/db/commands/dbcommands.cpp +++ b/src/mongo/db/commands/dbcommands.cpp @@ -210,7 +210,7 @@ public: // Closing a database requires a global lock. Lock::GlobalWrite lk(opCtx); - auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, dbname); + auto db = dbHolder().get(opCtx, dbname); if (db) { if (db->isDropPending(opCtx)) { return CommandHelpers::appendCommandStatus( @@ -221,8 +221,7 @@ public: } } else { // If the name doesn't make an exact match, check for a case insensitive match. - std::set<std::string> otherCasing = - DatabaseHolder::getDatabaseHolder().getNamesWithConflictingCasing(dbname); + std::set<std::string> otherCasing = dbHolder().getNamesWithConflictingCasing(dbname); if (otherCasing.empty()) { // Database doesn't exist. Treat this as a success (historical behavior). return true; @@ -256,7 +255,7 @@ public: opCtx, engine, dbname, preserveClonedFilesOnFailure, backupOriginalFiles); // Open database before returning - DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbname); + dbHolder().openDb(opCtx, dbname); return CommandHelpers::appendCommandStatus(result, status); } } cmdRepairDatabase; diff --git a/src/mongo/db/commands/dbcommands_d.cpp b/src/mongo/db/commands/dbcommands_d.cpp index 90ab6c70bfa..a0b4be7ebd1 100644 --- a/src/mongo/db/commands/dbcommands_d.cpp +++ b/src/mongo/db/commands/dbcommands_d.cpp @@ -128,7 +128,7 @@ protected: if (!db) { // When setting the profiling level, create the database if it didn't already exist. // When just reading the profiling level, we do not create the database. - db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName); + db = dbHolder().openDb(opCtx, dbName); } uassertStatusOK(db->setProfilingLevel(opCtx, profilingLevel)); } diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp index 41c3e01c696..cc0902e9c4e 100644 --- a/src/mongo/db/commands/find_and_modify.cpp +++ b/src/mongo/db/commands/find_and_modify.cpp @@ -481,8 +481,8 @@ public: if (!collection) { uassertStatusOK(userAllowedCreateNS(nsString.db(), nsString.coll())); WriteUnitOfWork wuow(opCtx); - uassertStatusOK(Database::userCreateNS( - opCtx, autoDb->getDb(), nsString.ns(), BSONObj())); + uassertStatusOK( + userCreateNS(opCtx, autoDb->getDb(), nsString.ns(), BSONObj())); wuow.commit(); collection = autoDb->getDb()->getCollection(opCtx, nsString); diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp index c743a951ba5..476fd6b5d3e 100644 --- a/src/mongo/db/commands/mr.cpp +++ b/src/mongo/db/commands/mr.cpp @@ -393,8 +393,7 @@ void State::dropTempCollections() { writeConflictRetry(_opCtx, "M/R dropTempCollections", _config.incLong.ns(), [this] { Lock::DBLock lk(_opCtx, _config.incLong.db(), MODE_X); - if (Database* db = - DatabaseHolder::getDatabaseHolder().get(_opCtx, _config.incLong.ns())) { + if (Database* db = dbHolder().get(_opCtx, _config.incLong.ns())) { WriteUnitOfWork wunit(_opCtx); uassertStatusOK(db->dropCollection(_opCtx, _config.incLong.ns())); wunit.commit(); @@ -654,7 +653,7 @@ unsigned long long _collectionCount(OperationContext* opCtx, // If the global write lock is held, we must avoid using AutoGetCollectionForReadCommand as it // may lead to deadlock when waiting for a majority snapshot to be committed. See SERVER-24596. if (callerHoldsGlobalLock) { - Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, nss.ns()); + Database* db = dbHolder().get(opCtx, nss.ns()); if (db) { coll = db->getCollection(opCtx, nss); } diff --git a/src/mongo/db/commands/resize_oplog.cpp b/src/mongo/db/commands/resize_oplog.cpp index 726cf043b5d..d82c4321695 100644 --- a/src/mongo/db/commands/resize_oplog.cpp +++ b/src/mongo/db/commands/resize_oplog.cpp @@ -87,7 +87,7 @@ public: BSONObjBuilder& result) { const NamespaceString nss("local", "oplog.rs"); Lock::GlobalWrite global(opCtx); - Database* database = DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db()); + Database* database = dbHolder().get(opCtx, nss.db()); if (!database) { return CommandHelpers::appendCommandStatus( result, Status(ErrorCodes::NamespaceNotFound, "database local does not exist")); diff --git a/src/mongo/db/commands/restart_catalog_command.cpp b/src/mongo/db/commands/restart_catalog_command.cpp index f840f7954bc..a3aefcd3168 100644 --- a/src/mongo/db/commands/restart_catalog_command.cpp +++ b/src/mongo/db/commands/restart_catalog_command.cpp @@ -95,7 +95,7 @@ public: std::vector<std::string> allDbs; getGlobalServiceContext()->getGlobalStorageEngine()->listDatabases(&allDbs); for (auto&& dbName : allDbs) { - const auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, dbName); + const auto db = dbHolder().get(opCtx, dbName); if (db->isDropPending(opCtx)) { return CommandHelpers::appendCommandStatus( result, diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp index 5c4019e18d2..dd1843e160c 100644 --- a/src/mongo/db/db.cpp +++ b/src/mongo/db/db.cpp @@ -222,7 +222,7 @@ void logStartup(OperationContext* opCtx) { if (!collection) { BSONObj options = BSON("capped" << true << "size" << 10 * 1024 * 1024); repl::UnreplicatedWritesBlock uwb(opCtx); - uassertStatusOK(Database::userCreateNS(opCtx, db, startupLogCollectionName.ns(), options)); + uassertStatusOK(userCreateNS(opCtx, db, startupLogCollectionName.ns(), options)); collection = db->getCollection(opCtx, startupLogCollectionName); } invariant(collection); diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp index 3ff4649584a..36c61edb4c9 100644 --- a/src/mongo/db/db_raii.cpp +++ b/src/mongo/db/db_raii.cpp @@ -269,8 +269,7 @@ AutoGetCollectionForReadCommand::AutoGetCollectionForReadCommand( } OldClientContext::OldClientContext(OperationContext* opCtx, const std::string& ns, bool doVersion) - : OldClientContext( - opCtx, ns, doVersion, DatabaseHolder::getDatabaseHolder().get(opCtx, ns), false) {} + : OldClientContext(opCtx, ns, doVersion, dbHolder().get(opCtx, ns), false) {} OldClientContext::OldClientContext( OperationContext* opCtx, const std::string& ns, bool doVersion, Database* db, bool justCreated) @@ -278,7 +277,7 @@ OldClientContext::OldClientContext( if (!_db) { const auto dbName = nsToDatabaseSubstring(ns); invariant(_opCtx->lockState()->isDbLockedForMode(dbName, MODE_X)); - _db = DatabaseHolder::getDatabaseHolder().openDb(_opCtx, dbName, &_justCreated); + _db = dbHolder().openDb(_opCtx, dbName, &_justCreated); invariant(_db); } diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp index d915fc6590b..ab927cfc54b 100644 --- a/src/mongo/db/index_builder.cpp +++ b/src/mongo/db/index_builder.cpp @@ -98,7 +98,7 @@ void IndexBuilder::run() { Lock::DBLock dlk(opCtx.get(), ns.db(), MODE_X); OldClientContext ctx(opCtx.get(), ns.getSystemIndexesCollection()); - Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx.get(), ns.db().toString()); + Database* db = dbHolder().get(opCtx.get(), ns.db().toString()); Status status = _build(opCtx.get(), db, true, &dlk); if (!status.isOK()) { @@ -210,7 +210,7 @@ Status IndexBuilder::_build(OperationContext* opCtx, if (allowBackgroundBuilding) { dbLock->relockWithMode(MODE_X); - Database* reloadDb = DatabaseHolder::getDatabaseHolder().get(opCtx, ns.db()); + Database* reloadDb = dbHolder().get(opCtx, ns.db()); fassert(28553, reloadDb); fassert(28554, reloadDb->getCollection(opCtx, ns)); } diff --git a/src/mongo/db/keypattern.cpp b/src/mongo/db/keypattern.cpp index 37c7f69631f..ffc2a1f8e71 100644 --- a/src/mongo/db/keypattern.cpp +++ b/src/mongo/db/keypattern.cpp @@ -1,3 +1,5 @@ +// @file keypattern.cpp + /** * Copyright (C) 2012 10gen Inc. * diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp index c572c9c65a1..a92d5753d27 100644 --- a/src/mongo/db/op_observer_impl.cpp +++ b/src/mongo/db/op_observer_impl.cpp @@ -660,7 +660,7 @@ void OpObserverImpl::onCollMod(OperationContext* opCtx, // catalog are all present and equal, unless the collection is system.indexes or // system.namespaces (see SERVER-29926, SERVER-30095). invariant(opCtx->lockState()->isDbLockedForMode(nss.db(), MODE_X)); - Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db()); + Database* db = dbHolder().get(opCtx, nss.db()); // Some unit tests call the op observer on an unregistered Database. if (!db) { return; diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp index c401349d2c5..7edfd3d09d4 100644 --- a/src/mongo/db/ops/write_ops_exec.cpp +++ b/src/mongo/db/ops/write_ops_exec.cpp @@ -212,7 +212,7 @@ void makeCollection(OperationContext* opCtx, const NamespaceString& ns) { if (!db.getDb()->getCollection(opCtx, ns)) { // someone else may have beat us to it. uassertStatusOK(userAllowedCreateNS(ns.db(), ns.coll())); WriteUnitOfWork wuow(opCtx); - uassertStatusOK(Database::userCreateNS(opCtx, db.getDb(), ns.ns(), BSONObj())); + uassertStatusOK(userCreateNS(opCtx, db.getDb(), ns.ns(), BSONObj())); wuow.commit(); } }); diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp index 2deec30fd5f..54d56cfb313 100644 --- a/src/mongo/db/query/plan_executor.cpp +++ b/src/mongo/db/query/plan_executor.cpp @@ -446,7 +446,7 @@ std::shared_ptr<CappedInsertNotifier> PlanExecutor::getCappedInsertNotifier() { // We can only wait if we have a collection; otherwise we should retry immediately when // we hit EOF. dassert(_opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IS)); - auto db = DatabaseHolder::getDatabaseHolder().get(_opCtx, _nss.db()); + auto db = dbHolder().get(_opCtx, _nss.db()); invariant(db); auto collection = db->getCollection(_opCtx, _nss); invariant(collection); diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp index 7c106ec1d62..93773d7fdc4 100644 --- a/src/mongo/db/repair_database.cpp +++ b/src/mongo/db/repair_database.cpp @@ -266,14 +266,14 @@ Status repairDatabase(OperationContext* opCtx, } // Close the db and invalidate all current users and caches. - DatabaseHolder::getDatabaseHolder().close(opCtx, dbName, "database closed for repair"); + dbHolder().close(opCtx, dbName, "database closed for repair"); ON_BLOCK_EXIT([&dbName, &opCtx] { try { // Ensure that we don't trigger an exception when attempting to take locks. UninterruptibleLockGuard noInterrupt(opCtx->lockState()); // Open the db after everything finishes. - auto db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName); + auto db = dbHolder().openDb(opCtx, dbName); // Set the minimum snapshot for all Collections in this db. This ensures that readers // using majority readConcern level can only use the collections after their repaired diff --git a/src/mongo/db/repair_database_and_check_version.cpp b/src/mongo/db/repair_database_and_check_version.cpp index 523e635fbe4..37baffa3ab4 100644 --- a/src/mongo/db/repair_database_and_check_version.cpp +++ b/src/mongo/db/repair_database_and_check_version.cpp @@ -76,11 +76,11 @@ Status restoreMissingFeatureCompatibilityVersionDocument(OperationContext* opCtx // If the admin database, which contains the server configuration collection with the // featureCompatibilityVersion document, does not exist, create it. - Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, fcvNss.db()); + Database* db = dbHolder().get(opCtx, fcvNss.db()); if (!db) { log() << "Re-creating admin database that was dropped."; } - db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, fcvNss.db()); + db = dbHolder().openDb(opCtx, fcvNss.db()); invariant(db); // If the server configuration collection, which contains the FCV document, does not exist, then @@ -136,7 +136,7 @@ Status ensureAllCollectionsHaveUUIDs(OperationContext* opCtx, bool isMmapV1 = opCtx->getServiceContext()->getGlobalStorageEngine()->isMmapV1(); std::vector<NamespaceString> nonReplicatedCollNSSsWithoutUUIDs; for (const auto& dbName : dbNames) { - Database* db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName); + Database* db = dbHolder().openDb(opCtx, dbName); invariant(db); for (auto collectionIt = db->begin(); collectionIt != db->end(); ++collectionIt) { Collection* coll = *collectionIt; @@ -318,7 +318,7 @@ StatusWith<bool> repairDatabasesAndCheckVersion(OperationContext* opCtx) { // Attempt to restore the featureCompatibilityVersion document if it is missing. NamespaceString fcvNSS(NamespaceString::kServerConfigurationNamespace); - Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, fcvNSS.db()); + Database* db = dbHolder().get(opCtx, fcvNSS.db()); Collection* versionColl; BSONObj featureCompatibilityVersion; if (!db || !(versionColl = db->getCollection(opCtx, fcvNSS)) || @@ -349,7 +349,7 @@ StatusWith<bool> repairDatabasesAndCheckVersion(OperationContext* opCtx) { // it is fine to not open the "local" database and populate the catalog entries because we // won't attempt to drop the temporary collections anyway. Lock::DBLock dbLock(opCtx, kSystemReplSetCollection.db(), MODE_X); - DatabaseHolder::getDatabaseHolder().openDb(opCtx, kSystemReplSetCollection.db()); + dbHolder().openDb(opCtx, kSystemReplSetCollection.db()); } const repl::ReplSettings& replSettings = @@ -377,7 +377,7 @@ StatusWith<bool> repairDatabasesAndCheckVersion(OperationContext* opCtx) { } LOG(1) << " Recovering database: " << dbName; - Database* db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName); + Database* db = dbHolder().openDb(opCtx, dbName); invariant(db); // First thing after opening the database is to check for file compatibility, diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp index 81ebf365c62..3a732c407ec 100644 --- a/src/mongo/db/repl/apply_ops.cpp +++ b/src/mongo/db/repl/apply_ops.cpp @@ -137,7 +137,7 @@ Status _applyOps(OperationContext* opCtx, invariant(opCtx->lockState()->isW()); invariant(*opType != 'c'); - auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, nss.ns()); + auto db = dbHolder().get(opCtx, nss.ns()); if (!db) { // Retry in non-atomic mode, since MMAP cannot implicitly create a new database // within an active WriteUnitOfWork. @@ -339,7 +339,7 @@ Status _checkPrecondition(OperationContext* opCtx, BSONObj realres = db.findOne(nss.ns(), preCondition["q"].Obj()); // Get collection default collation. - Database* database = DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db()); + Database* database = dbHolder().get(opCtx, nss.db()); if (!database) { return {ErrorCodes::NamespaceNotFound, "database in ns does not exist: " + nss.ns()}; } diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index 2c6f4395590..6e083990f4e 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -227,7 +227,7 @@ void createIndexForApplyOps(OperationContext* opCtx, IncrementOpsAppliedStatsFn incrementOpsAppliedStats, OplogApplication::Mode mode) { // Check if collection exists. - Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, indexNss.ns()); + Database* db = dbHolder().get(opCtx, indexNss.ns()); auto indexCollection = db ? db->getCollection(opCtx, indexNss) : nullptr; uassert(ErrorCodes::NamespaceNotFound, str::stream() << "Failed to create index due to missing collection: " << indexNss.ns(), @@ -1516,7 +1516,7 @@ Status applyCommand_inlock(OperationContext* opCtx, return {ErrorCodes::InvalidNamespace, "invalid ns: " + std::string(nss.ns())}; } { - Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, nss.ns()); + Database* db = dbHolder().get(opCtx, nss.ns()); if (db && !db->getCollection(opCtx, nss) && db->getViewCatalog()->lookup(opCtx, nss.ns())) { return {ErrorCodes::CommandNotSupportedOnView, str::stream() << "applyOps not supported on view:" << nss.ns()}; diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp index d87cf4f73f8..4095ca06ef5 100644 --- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp @@ -394,7 +394,7 @@ Status ReplicationCoordinatorExternalStateImpl::runRepairOnLocalDB(OperationCont Status status = repairDatabase(opCtx, engine, localDbName, false, false); // Open database before returning - DatabaseHolder::getDatabaseHolder().openDb(opCtx, localDbName); + dbHolder().openDb(opCtx, localDbName); } catch (const DBException& ex) { return ex.toStatus(); } @@ -787,7 +787,7 @@ void ReplicationCoordinatorExternalStateImpl::_dropAllTempCollections(OperationC if (*it == "local") continue; LOG(2) << "Removing temporary collections from " << *it; - Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, *it); + Database* db = dbHolder().get(opCtx, *it); // Since we must be holding the global lock during this function, if listDatabases // returned this dbname, we should be able to get a reference to it - it can't have // been dropped. diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp index 6e96e6d0237..e92b0c9ab2d 100644 --- a/src/mongo/db/repl/rollback_impl.cpp +++ b/src/mongo/db/repl/rollback_impl.cpp @@ -857,7 +857,7 @@ void RollbackImpl::_resetDropPendingState(OperationContext* opCtx) { opCtx->getServiceContext()->getGlobalStorageEngine()->listDatabases(&dbNames); for (const auto& dbName : dbNames) { Lock::DBLock dbLock(opCtx, dbName, MODE_X); - Database* db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName); + Database* db = dbHolder().openDb(opCtx, dbName); checkForIdIndexesAndDropPendingCollections(opCtx, db); } } diff --git a/src/mongo/db/repl/rollback_test_fixture.cpp b/src/mongo/db/repl/rollback_test_fixture.cpp index b0d49b1473a..2e9c4f47889 100644 --- a/src/mongo/db/repl/rollback_test_fixture.cpp +++ b/src/mongo/db/repl/rollback_test_fixture.cpp @@ -102,8 +102,8 @@ void RollbackTest::tearDown() { SessionCatalog::get(_serviceContextMongoDTest.getServiceContext())->reset_forTest(); // We cannot unset the global replication coordinator because ServiceContextMongoD::tearDown() - // calls Databse::dropAllDatabasesExceptLocal() which requires the replication coordinator to - // clear all snapshots. + // calls dropAllDatabasesExceptLocal() which requires the replication coordinator to clear all + // snapshots. _serviceContextMongoDTest.tearDown(); // ServiceContextMongoD::tearDown() does not destroy service context so it is okay @@ -179,7 +179,7 @@ Collection* RollbackTest::_createCollection(OperationContext* opCtx, const CollectionOptions& options) { Lock::DBLock dbLock(opCtx, nss.db(), MODE_X); mongo::WriteUnitOfWork wuow(opCtx); - auto db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, nss.db()); + auto db = dbHolder().openDb(opCtx, nss.db()); ASSERT_TRUE(db); db->dropCollection(opCtx, nss.ns()).transitional_ignore(); auto coll = db->createCollection(opCtx, nss.ns(), options); diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp index eae8c70a468..3e9944988ba 100644 --- a/src/mongo/db/repl/rs_rollback.cpp +++ b/src/mongo/db/repl/rs_rollback.cpp @@ -844,7 +844,7 @@ void rollbackRenameCollection(OperationContext* opCtx, UUID uuid, RenameCollecti log() << "Attempting to rename collection with UUID: " << uuid << ", from: " << info.renameFrom << ", to: " << info.renameTo; Lock::DBLock dbLock(opCtx, dbName, MODE_X); - auto db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName); + auto db = dbHolder().openDb(opCtx, dbName); invariant(db); auto status = renameCollectionForRollback(opCtx, info.renameTo, uuid); @@ -1138,7 +1138,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx, Lock::DBLock dbLock(opCtx, nss.db(), MODE_X); - auto db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, nss.db().toString()); + auto db = dbHolder().openDb(opCtx, nss.db().toString()); invariant(db); Collection* collection = UUIDCatalog::get(opCtx).lookupCollectionByUUID(uuid); diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp index 93a47ec8044..b4858df9cba 100644 --- a/src/mongo/db/repl/rs_rollback_test.cpp +++ b/src/mongo/db/repl/rs_rollback_test.cpp @@ -357,7 +357,7 @@ int _testRollbackDelete(OperationContext* opCtx, Lock::DBLock dbLock(opCtx, "test", MODE_S); Lock::CollectionLock collLock(opCtx->lockState(), "test.t", MODE_S); - auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, "test"); + auto db = dbHolder().get(opCtx, "test"); ASSERT_TRUE(db); auto collection = db->getCollection(opCtx, "test.t"); if (!collection) { @@ -1777,7 +1777,7 @@ TEST_F(RSRollbackTest, RollbackCreateCollectionCommand) { _replicationProcess.get())); { Lock::DBLock dbLock(_opCtx.get(), "test", MODE_S); - auto db = DatabaseHolder::getDatabaseHolder().get(_opCtx.get(), "test"); + auto db = dbHolder().get(_opCtx.get(), "test"); ASSERT_TRUE(db); ASSERT_FALSE(db->getCollection(_opCtx.get(), "test.t")); } diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp index cd9dbcc1fd1..e4a53344305 100644 --- a/src/mongo/db/repl/storage_interface_impl.cpp +++ b/src/mongo/db/repl/storage_interface_impl.cpp @@ -370,7 +370,7 @@ Status StorageInterfaceImpl::insertDocuments(OperationContext* opCtx, } Status StorageInterfaceImpl::dropReplicatedDatabases(OperationContext* opCtx) { - Database::dropAllDatabasesExceptLocal(opCtx); + dropAllDatabasesExceptLocal(opCtx); return Status::OK(); } diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp index b70f3c56793..1dd997ce5a8 100644 --- a/src/mongo/db/repl/sync_tail.cpp +++ b/src/mongo/db/repl/sync_tail.cpp @@ -542,7 +542,7 @@ private: CollectionProperties collProperties; Lock::DBLock dbLock(opCtx, nsToDatabaseSubstring(ns), MODE_IS); - auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, ns); + auto db = dbHolder().get(opCtx, ns); if (!db) { return collProperties; } diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp index d995e4f61e2..3740407529a 100644 --- a/src/mongo/db/repl/sync_tail_test.cpp +++ b/src/mongo/db/repl/sync_tail_test.cpp @@ -193,7 +193,7 @@ auto createCollectionWithUuid(OperationContext* opCtx, const NamespaceString& ns void createDatabase(OperationContext* opCtx, StringData dbName) { Lock::GlobalWrite globalLock(opCtx); bool justCreated; - Database* db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName, &justCreated); + Database* db = dbHolder().openDb(opCtx, dbName, &justCreated); ASSERT_TRUE(db); ASSERT_TRUE(justCreated); } @@ -991,8 +991,7 @@ TEST_F(SyncTailTest, MultiInitialSyncApplyIgnoresUpdateOperationIfDocumentIsMiss { Lock::GlobalWrite globalLock(_opCtx.get()); bool justCreated = false; - Database* db = - DatabaseHolder::getDatabaseHolder().openDb(_opCtx.get(), nss.db(), &justCreated); + Database* db = dbHolder().openDb(_opCtx.get(), nss.db(), &justCreated); ASSERT_TRUE(db); ASSERT_TRUE(justCreated); } diff --git a/src/mongo/db/s/implicit_create_collection.cpp b/src/mongo/db/s/implicit_create_collection.cpp index 0ad6e536658..c15fc331d41 100644 --- a/src/mongo/db/s/implicit_create_collection.cpp +++ b/src/mongo/db/s/implicit_create_collection.cpp @@ -93,7 +93,7 @@ public: // Take the DBLock and CollectionLock directly rather than using AutoGetCollection // (which calls AutoGetDb) to avoid doing database and shard version checks. Lock::DBLock dbLock(opCtx, _ns.db(), MODE_IS); - const auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, _ns.db()); + const auto db = dbHolder().get(opCtx, _ns.db()); if (db) { Lock::CollectionLock collLock(opCtx->lockState(), _ns.ns(), MODE_IS); if (db->getCollection(opCtx, _ns.ns())) { diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp index b1545848bd5..06721587c68 100644 --- a/src/mongo/db/s/migration_destination_manager.cpp +++ b/src/mongo/db/s/migration_destination_manager.cpp @@ -677,13 +677,13 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* opCtx, // options. WriteUnitOfWork wuow(opCtx); const bool createDefaultIndexes = true; - Status status = Database::userCreateNS(opCtx, - db, - _nss.ns(), - donorOptions, - CollectionOptions::parseForStorage, - createDefaultIndexes, - donorIdIndexSpec); + Status status = userCreateNS(opCtx, + db, + _nss.ns(), + donorOptions, + CollectionOptions::parseForStorage, + createDefaultIndexes, + donorIdIndexSpec); if (!status.isOK()) { warning() << "failed to create collection [" << _nss << "] " << " with options " << donorOptions << ": " << redact(status); diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp index 90c776cae58..be2d5ad9db5 100644 --- a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp +++ b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp @@ -213,7 +213,7 @@ void forceDatabaseRefresh(OperationContext* opCtx, const StringData dbName) { // Take the DBLock directly rather than using AutoGetDb, to prevent a recursive call // into checkDbVersion(). Lock::DBLock dbLock(opCtx, dbName, MODE_IS); - const auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, dbName); + const auto db = dbHolder().get(opCtx, dbName); if (!db) { log() << "Database " << dbName << " has been dropped; not caching the refreshed databaseVersion"; @@ -234,7 +234,7 @@ void forceDatabaseRefresh(OperationContext* opCtx, const StringData dbName) { // The cached version is older than the refreshed version; update the cached version. Lock::DBLock dbLock(opCtx, dbName, MODE_X); - const auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, dbName); + const auto db = dbHolder().get(opCtx, dbName); if (!db) { log() << "Database " << dbName << " has been dropped; not caching the refreshed databaseVersion"; diff --git a/src/mongo/db/service_context_d_test_fixture.cpp b/src/mongo/db/service_context_d_test_fixture.cpp index df3811421d4..d42641381e9 100644 --- a/src/mongo/db/service_context_d_test_fixture.cpp +++ b/src/mongo/db/service_context_d_test_fixture.cpp @@ -99,7 +99,7 @@ void ServiceContextMongoDTest::_doTest() { } void ServiceContextMongoDTest::_dropAllDBs(OperationContext* opCtx) { - Database::dropAllDatabasesExceptLocal(opCtx); + dropAllDatabasesExceptLocal(opCtx); Lock::GlobalWrite lk(opCtx); AutoGetDb autoDBLocal(opCtx, "local", MODE_X); @@ -111,11 +111,10 @@ void ServiceContextMongoDTest::_dropAllDBs(OperationContext* opCtx) { }); } - // Database::dropAllDatabasesExceptLocal() does not close empty databases. However the holder - // still allocates resources to track these empty databases. These resources not released by - // Database::dropAllDatabasesExceptLocal() will be leaked at exit unless we call - // DatabaseHolder::closeAll. - DatabaseHolder::getDatabaseHolder().closeAll(opCtx, "all databases dropped"); + // dropAllDatabasesExceptLocal() does not close empty databases. However the holder still + // allocates resources to track these empty databases. These resources not released by + // dropAllDatabasesExceptLocal() will be leaked at exit unless we call DatabaseHolder::closeAll. + dbHolder().closeAll(opCtx, "all databases dropped"); } } // namespace mongo diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl_test.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl_test.cpp index 700628c4331..49cbc6d1762 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl_test.cpp +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl_test.cpp @@ -1,3 +1,5 @@ +// ephemeral_for_test_btree_impl_test.cpp + /** * Copyright (C) 2014 MongoDB Inc. * diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine_test.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine_test.cpp index 154daa7c9cd..949cdc5f670 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine_test.cpp +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine_test.cpp @@ -1,3 +1,5 @@ +// ephemeral_for_test_engine_test.cpp + /** * Copyright (C) 2014 MongoDB Inc. * diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_init.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_init.cpp index 1c5fff52e8b..b52507c20df 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_init.cpp +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_init.cpp @@ -1,3 +1,5 @@ +// ephemeral_for_test_init.cpp + /** * Copyright (C) 2014 MongoDB Inc. * diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp index 602311e4ce4..29f97fd98d6 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp @@ -1,3 +1,5 @@ +// ephemeral_for_test_record_store_test.cpp + /** * Copyright (C) 2014 MongoDB Inc. * diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp index e0eebfaa73d..4ca06132c0c 100644 --- a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp +++ b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp @@ -1,3 +1,5 @@ +// kv_engine_test_harness.cpp + /** * Copyright (C) 2014 MongoDB Inc. * diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.h b/src/mongo/db/storage/kv/kv_engine_test_harness.h index 64edb26da44..592e11b2a32 100644 --- a/src/mongo/db/storage/kv/kv_engine_test_harness.h +++ b/src/mongo/db/storage/kv/kv_engine_test_harness.h @@ -1,3 +1,5 @@ +// kv_engine_test_harness.h + /** * Copyright (C) 2014 MongoDB Inc. * diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_interface_test.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_interface_test.cpp index b49fd70ec26..59bde328780 100644 --- a/src/mongo/db/storage/mmap_v1/btree/btree_interface_test.cpp +++ b/src/mongo/db/storage/mmap_v1/btree/btree_interface_test.cpp @@ -1,3 +1,5 @@ +// btree_interface_test.cpp + /** * Copyright (C) 2014 MongoDB Inc. * diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp index 31440e91bc5..75ca4cda551 100644 --- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp +++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp @@ -408,7 +408,7 @@ void MMAPV1DatabaseCatalogEntry::invalidateSystemCollectionRecord( // violation, but at this point we're not going to add more MMAPv1 specific interfaces. StringData dbName = systemCollectionNamespace.db(); invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_X)); - Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, dbName); + Database* db = dbHolder().get(opCtx, dbName); Collection* systemCollection = db->getCollection(opCtx, systemCollectionNamespace); systemCollection->getCursorManager()->invalidateDocument(opCtx, record, INVALIDATION_DELETION); } diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp index 3a1e71fad40..dff4fab6f08 100644 --- a/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp +++ b/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp @@ -1,3 +1,5 @@ +// mmap_v1_record_store_test.cpp + /** * Copyright (C) 2014 MongoDB Inc. * diff --git a/src/mongo/db/storage/mmap_v1/repair_database.cpp b/src/mongo/db/storage/mmap_v1/repair_database.cpp index 416ff14063e..73114b71362 100644 --- a/src/mongo/db/storage/mmap_v1/repair_database.cpp +++ b/src/mongo/db/storage/mmap_v1/repair_database.cpp @@ -312,7 +312,7 @@ Status MMAPV1Engine::repairDatabase(OperationContext* opCtx, new RepairFileDeleter(opCtx, dbName, reservedPathString, reservedPath)); { - Database* originalDatabase = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName); + Database* originalDatabase = dbHolder().openDb(opCtx, dbName); if (originalDatabase == NULL) { return Status(ErrorCodes::NamespaceNotFound, "database does not exist to repair"); } @@ -454,7 +454,7 @@ Status MMAPV1Engine::repairDatabase(OperationContext* opCtx, repairFileDeleter->success(); // Close the database so we can rename/delete the original data files - DatabaseHolder::getDatabaseHolder().close(opCtx, dbName, "database closed for repair"); + dbHolder().close(opCtx, dbName, "database closed for repair"); if (backupOriginalFiles) { _renameForBackup(dbName, reservedPath); @@ -480,7 +480,7 @@ Status MMAPV1Engine::repairDatabase(OperationContext* opCtx, } // Reopen the database so it's discoverable - DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName); + dbHolder().openDb(opCtx, dbName); return Status::OK(); } diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.h b/src/mongo/db/storage/sorted_data_interface_test_harness.h index 5966755397e..e6f9443fd23 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_harness.h +++ b/src/mongo/db/storage/sorted_data_interface_test_harness.h @@ -1,3 +1,5 @@ +// sorted_data_interface_test_harness.h + /** * Copyright (C) 2014 MongoDB Inc. * @@ -134,4 +136,4 @@ inline void removeFromIndex(unowned_ptr<HarnessHelper> harness, inline std::unique_ptr<SortedDataInterfaceHarnessHelper> newSortedDataInterfaceHarnessHelper() { return dynamic_ptr_cast<SortedDataInterfaceHarnessHelper>(newHarnessHelper()); } -} // namespace mongo +} diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp index f5ac70627cc..fbf55d41b3c 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp @@ -1,3 +1,5 @@ +// wiredtiger_kv_engine_test.cpp + /** * Copyright (C) 2014 MongoDB Inc. * diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp index 50d0b2a2548..c42744ff769 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp @@ -251,4 +251,4 @@ TEST(WiredTigerRecordStoreTest, PrefixedSeekingCursor) { } } // namespace -} // namespace mongo +} // mongo diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp index 4041187ce36..9650fe4a021 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp @@ -451,4 +451,4 @@ TEST_F(SizeStorerValidateTest, InvalidSizeStorerAtCreation) { } } // namespace -} // namespace mongo +} // mongo diff --git a/src/mongo/db/views/durable_view_catalog.cpp b/src/mongo/db/views/durable_view_catalog.cpp index 199c201f07c..270bd1ce9c0 100644 --- a/src/mongo/db/views/durable_view_catalog.cpp +++ b/src/mongo/db/views/durable_view_catalog.cpp @@ -54,7 +54,7 @@ namespace mongo { void DurableViewCatalog::onExternalChange(OperationContext* opCtx, const NamespaceString& name) { dassert(opCtx->lockState()->isDbLockedForMode(name.db(), MODE_IX)); - Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, name.db()); + Database* db = dbHolder().get(opCtx, name.db()); if (db) { opCtx->recoveryUnit()->onCommit([db]() { db->getViewCatalog()->invalidate(); }); diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp index e7723e1bfa4..6aa07cb3414 100644 --- a/src/mongo/dbtests/namespacetests.cpp +++ b/src/mongo/dbtests/namespacetests.cpp @@ -1,3 +1,6 @@ +// namespacetests.cpp : namespace.{h,cpp} unit tests. +// + /** * Copyright (C) 2008-2014 MongoDB Inc. * @@ -547,7 +550,7 @@ public: Lock::DBLock lk(&opCtx, dbName, MODE_X); bool justCreated; - Database* db = DatabaseHolder::getDatabaseHolder().openDb(&opCtx, dbName, &justCreated); + Database* db = dbHolder().openDb(&opCtx, dbName, &justCreated); ASSERT(justCreated); Collection* committedColl; @@ -591,7 +594,7 @@ public: Lock::DBLock lk(&opCtx, dbName, MODE_X); bool justCreated; - Database* db = DatabaseHolder::getDatabaseHolder().openDb(&opCtx, dbName, &justCreated); + Database* db = dbHolder().openDb(&opCtx, dbName, &justCreated); ASSERT(justCreated); { diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp index c626a272b9f..d020971fecd 100644 --- a/src/mongo/dbtests/query_stage_cached_plan.cpp +++ b/src/mongo/dbtests/query_stage_cached_plan.cpp @@ -78,7 +78,7 @@ public: void dropCollection() { Lock::DBLock dbLock(&_opCtx, nss.db(), MODE_X); - Database* database = DatabaseHolder::getDatabaseHolder().get(&_opCtx, nss.db()); + Database* database = dbHolder().get(&_opCtx, nss.db()); if (!database) { return; } diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp index 3dd7e253f55..9da211526f4 100644 --- a/src/mongo/dbtests/querytests.cpp +++ b/src/mongo/dbtests/querytests.cpp @@ -1252,12 +1252,12 @@ public: // a bit. { WriteUnitOfWork wunit(&_opCtx); - ASSERT(Database::userCreateNS(&_opCtx, - ctx.db(), - ns(), - fromjson("{ capped : true, size : 2000, max: 10000 }"), - CollectionOptions::parseForCommand, - false) + ASSERT(userCreateNS(&_opCtx, + ctx.db(), + ns(), + fromjson("{ capped : true, size : 2000, max: 10000 }"), + CollectionOptions::parseForCommand, + false) .isOK()); wunit.commit(); } diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp index bade34cb7ea..3d3523e4268 100644 --- a/src/mongo/dbtests/rollbacktests.cpp +++ b/src/mongo/dbtests/rollbacktests.cpp @@ -54,7 +54,7 @@ const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2; void dropDatabase(OperationContext* opCtx, const NamespaceString& nss) { Lock::GlobalWrite globalWriteLock(opCtx); - Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db()); + Database* db = dbHolder().get(opCtx, nss.db()); if (db) { Database::dropDatabase(opCtx, db); @@ -72,7 +72,7 @@ void createCollection(OperationContext* opCtx, const NamespaceString& nss) { { WriteUnitOfWork uow(opCtx); ASSERT(!collectionExists(&ctx, nss.ns())); - ASSERT_OK(Database::userCreateNS( + ASSERT_OK(userCreateNS( opCtx, ctx.db(), nss.ns(), BSONObj(), CollectionOptions::parseForCommand, false)); ASSERT(collectionExists(&ctx, nss.ns())); uow.commit(); @@ -85,20 +85,17 @@ Status renameCollection(OperationContext* opCtx, return renameCollection(opCtx, source, target, {}); } Status truncateCollection(OperationContext* opCtx, const NamespaceString& nss) { - Collection* coll = - DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db())->getCollection(opCtx, nss); + Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(opCtx, nss); return coll->truncate(opCtx); } void insertRecord(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& data) { - Collection* coll = - DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db())->getCollection(opCtx, nss); + Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(opCtx, nss); OpDebug* const nullOpDebug = nullptr; ASSERT_OK(coll->insertDocument(opCtx, InsertStatement(data), nullOpDebug, false)); } void assertOnlyRecord(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& data) { - Collection* coll = - DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db())->getCollection(opCtx, nss); + Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(opCtx, nss); auto cursor = coll->getCursor(opCtx); auto record = cursor->next(); @@ -108,18 +105,15 @@ void assertOnlyRecord(OperationContext* opCtx, const NamespaceString& nss, const ASSERT(!cursor->next()); } void assertEmpty(OperationContext* opCtx, const NamespaceString& nss) { - Collection* coll = - DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db())->getCollection(opCtx, nss); + Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(opCtx, nss); ASSERT(!coll->getCursor(opCtx)->next()); } bool indexExists(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) { - Collection* coll = - DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db())->getCollection(opCtx, nss); + Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(opCtx, nss); return coll->getIndexCatalog()->findIndexByName(opCtx, idxName, true) != NULL; } bool indexReady(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) { - Collection* coll = - DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db())->getCollection(opCtx, nss); + Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(opCtx, nss); return coll->getIndexCatalog()->findIndexByName(opCtx, idxName, false) != NULL; } size_t getNumIndexEntries(OperationContext* opCtx, @@ -127,8 +121,7 @@ size_t getNumIndexEntries(OperationContext* opCtx, const string& idxName) { size_t numEntries = 0; - Collection* coll = - DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db())->getCollection(opCtx, nss); + Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(opCtx, nss); IndexCatalog* catalog = coll->getIndexCatalog(); IndexDescriptor* desc = catalog->findIndexByName(opCtx, idxName, false); @@ -144,8 +137,7 @@ size_t getNumIndexEntries(OperationContext* opCtx, } void dropIndex(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) { - Collection* coll = - DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db())->getCollection(opCtx, nss); + Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(opCtx, nss); IndexDescriptor* desc = coll->getIndexCatalog()->findIndexByName(opCtx, idxName); ASSERT(desc); ASSERT_OK(coll->getIndexCatalog()->dropIndex(opCtx, desc)); @@ -168,7 +160,7 @@ public: WriteUnitOfWork uow(&opCtx); ASSERT(!collectionExists(&ctx, ns)); auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj(); - ASSERT_OK(Database::userCreateNS( + ASSERT_OK(userCreateNS( &opCtx, ctx.db(), ns, options, CollectionOptions::parseForCommand, defaultIndexes)); ASSERT(collectionExists(&ctx, ns)); if (!rollback) { @@ -199,7 +191,7 @@ public: WriteUnitOfWork uow(&opCtx); ASSERT(!collectionExists(&ctx, ns)); auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj(); - ASSERT_OK(Database::userCreateNS( + ASSERT_OK(userCreateNS( &opCtx, ctx.db(), ns, options, CollectionOptions::parseForCommand, defaultIndexes)); uow.commit(); } @@ -244,12 +236,12 @@ public: ASSERT(!collectionExists(&ctx, source.ns())); ASSERT(!collectionExists(&ctx, target.ns())); auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj(); - ASSERT_OK(Database::userCreateNS(&opCtx, - ctx.db(), - source.ns(), - options, - CollectionOptions::parseForCommand, - defaultIndexes)); + ASSERT_OK(userCreateNS(&opCtx, + ctx.db(), + source.ns(), + options, + CollectionOptions::parseForCommand, + defaultIndexes)); uow.commit(); } ASSERT(collectionExists(&ctx, source.ns())); @@ -301,18 +293,18 @@ public: ASSERT(!collectionExists(&ctx, source.ns())); ASSERT(!collectionExists(&ctx, target.ns())); auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj(); - ASSERT_OK(Database::userCreateNS(&opCtx, - ctx.db(), - source.ns(), - options, - CollectionOptions::parseForCommand, - defaultIndexes)); - ASSERT_OK(Database::userCreateNS(&opCtx, - ctx.db(), - target.ns(), - options, - CollectionOptions::parseForCommand, - defaultIndexes)); + ASSERT_OK(userCreateNS(&opCtx, + ctx.db(), + source.ns(), + options, + CollectionOptions::parseForCommand, + defaultIndexes)); + ASSERT_OK(userCreateNS(&opCtx, + ctx.db(), + target.ns(), + options, + CollectionOptions::parseForCommand, + defaultIndexes)); insertRecord(&opCtx, source, sourceDoc); insertRecord(&opCtx, target, targetDoc); @@ -376,12 +368,12 @@ public: { WriteUnitOfWork uow(&opCtx); ASSERT(!collectionExists(&ctx, nss.ns())); - ASSERT_OK(Database::userCreateNS(&opCtx, - ctx.db(), - nss.ns(), - BSONObj(), - CollectionOptions::parseForCommand, - defaultIndexes)); + ASSERT_OK(userCreateNS(&opCtx, + ctx.db(), + nss.ns(), + BSONObj(), + CollectionOptions::parseForCommand, + defaultIndexes)); insertRecord(&opCtx, nss, oldDoc); uow.commit(); } @@ -400,12 +392,12 @@ public: {}, DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops)); ASSERT(!collectionExists(&ctx, nss.ns())); - ASSERT_OK(Database::userCreateNS(&opCtx, - ctx.db(), - nss.ns(), - BSONObj(), - CollectionOptions::parseForCommand, - defaultIndexes)); + ASSERT_OK(userCreateNS(&opCtx, + ctx.db(), + nss.ns(), + BSONObj(), + CollectionOptions::parseForCommand, + defaultIndexes)); ASSERT(collectionExists(&ctx, nss.ns())); insertRecord(&opCtx, nss, newDoc); assertOnlyRecord(&opCtx, nss, newDoc); @@ -441,12 +433,12 @@ public: { WriteUnitOfWork uow(&opCtx); - ASSERT_OK(Database::userCreateNS(&opCtx, - ctx.db(), - nss.ns(), - BSONObj(), - CollectionOptions::parseForCommand, - defaultIndexes)); + ASSERT_OK(userCreateNS(&opCtx, + ctx.db(), + nss.ns(), + BSONObj(), + CollectionOptions::parseForCommand, + defaultIndexes)); ASSERT(collectionExists(&ctx, nss.ns())); insertRecord(&opCtx, nss, doc); assertOnlyRecord(&opCtx, nss, doc); @@ -487,12 +479,12 @@ public: { WriteUnitOfWork uow(&opCtx); - ASSERT_OK(Database::userCreateNS(&opCtx, - ctx.db(), - nss.ns(), - BSONObj(), - CollectionOptions::parseForCommand, - defaultIndexes)); + ASSERT_OK(userCreateNS(&opCtx, + ctx.db(), + nss.ns(), + BSONObj(), + CollectionOptions::parseForCommand, + defaultIndexes)); ASSERT(collectionExists(&ctx, nss.ns())); insertRecord(&opCtx, nss, doc); assertOnlyRecord(&opCtx, nss, doc); @@ -751,7 +743,7 @@ public: { WriteUnitOfWork uow(&opCtx); ASSERT(!collectionExists(&ctx, nss.ns())); - ASSERT_OK(Database::userCreateNS( + ASSERT_OK(userCreateNS( &opCtx, ctx.db(), nss.ns(), BSONObj(), CollectionOptions::parseForCommand, false)); ASSERT(collectionExists(&ctx, nss.ns())); Collection* coll = ctx.db()->getCollection(&opCtx, nss); |