summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorADAM David Alan Martin <adam.martin@10gen.com>2018-04-30 18:34:19 -0400
committerADAM David Alan Martin <adam.martin@10gen.com>2018-04-30 18:34:19 -0400
commit00ca22fc7097455ab1301931015a86d84e038744 (patch)
treeb3886adb9020c09eab0e77d8ceb08d31cf8ebc7c
parentc68c5f493b042350f61dff7aaa32aa2e3e6d651e (diff)
downloadmongo-00ca22fc7097455ab1301931015a86d84e038744.tar.gz
SERVER-32645 Create a shim helper framework.
The `MONGO_DECLARE_SHIM`, `MONGO_DEFINE_SHIM`, and `MONGO_REGISTER_SHIM` macros can be used to create specialized types which are customization and auto-registration points for late-binding functions. In some sense they work like weak-symbols; however, they also are useful for tracking dependencies upon shimmed-out implementations.
-rw-r--r--src/mongo/SConscript1
-rw-r--r--src/mongo/base/CPPLINT.cfg6
-rw-r--r--src/mongo/base/init.h8
-rw-r--r--src/mongo/base/shim.cpp36
-rw-r--r--src/mongo/base/shim.h257
-rw-r--r--src/mongo/client/embedded/embedded.cpp2
-rw-r--r--src/mongo/client/embedded/index_create_impl_embedded.cpp12
-rw-r--r--src/mongo/db/catalog/SConscript1
-rw-r--r--src/mongo/db/catalog/catalog_control.cpp4
-rw-r--r--src/mongo/db/catalog/collection.cpp48
-rw-r--r--src/mongo/db/catalog/collection.h33
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp40
-rw-r--r--src/mongo/db/catalog/collection_info_cache.cpp13
-rw-r--r--src/mongo/db/catalog/collection_info_cache.h12
-rw-r--r--src/mongo/db/catalog/collection_info_cache_impl.cpp12
-rw-r--r--src/mongo/db/catalog/collection_info_cache_impl.h1
-rw-r--r--src/mongo/db/catalog/create_collection.cpp6
-rw-r--r--src/mongo/db/catalog/database.cpp61
-rw-r--r--src/mongo/db/catalog/database.h84
-rw-r--r--src/mongo/db/catalog/database_holder.cpp29
-rw-r--r--src/mongo/db/catalog/database_holder.h13
-rw-r--r--src/mongo/db/catalog/database_holder_impl.cpp27
-rw-r--r--src/mongo/db/catalog/database_holder_mock.cpp16
-rw-r--r--src/mongo/db/catalog/database_impl.cpp117
-rw-r--r--src/mongo/db/catalog/database_impl.h14
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp56
-rw-r--r--src/mongo/db/catalog/index_catalog.h42
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.cpp25
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.h20
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.cpp24
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp45
-rw-r--r--src/mongo/db/catalog/index_consistency.cpp2
-rw-r--r--src/mongo/db/catalog/index_create.cpp14
-rw-r--r--src/mongo/db/catalog/index_create.h11
-rw-r--r--src/mongo/db/catalog/index_create_impl_servers.cpp12
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp4
-rw-r--r--src/mongo/db/catalog/uuid_catalog.cpp2
-rw-r--r--src/mongo/db/catalog_raii.cpp4
-rw-r--r--src/mongo/db/catalog_raii.h9
-rw-r--r--src/mongo/db/cloner.cpp46
-rw-r--r--src/mongo/db/commands/create_indexes.cpp6
-rw-r--r--src/mongo/db/commands/dbcommands.cpp7
-rw-r--r--src/mongo/db/commands/dbcommands_d.cpp2
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp4
-rw-r--r--src/mongo/db/commands/mr.cpp5
-rw-r--r--src/mongo/db/commands/resize_oplog.cpp2
-rw-r--r--src/mongo/db/commands/restart_catalog_command.cpp2
-rw-r--r--src/mongo/db/db.cpp2
-rw-r--r--src/mongo/db/db_raii.cpp5
-rw-r--r--src/mongo/db/index_builder.cpp4
-rw-r--r--src/mongo/db/keypattern.cpp2
-rw-r--r--src/mongo/db/op_observer_impl.cpp2
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp2
-rw-r--r--src/mongo/db/query/plan_executor.cpp2
-rw-r--r--src/mongo/db/repair_database.cpp4
-rw-r--r--src/mongo/db/repair_database_and_check_version.cpp12
-rw-r--r--src/mongo/db/repl/apply_ops.cpp4
-rw-r--r--src/mongo/db/repl/oplog.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp4
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp2
-rw-r--r--src/mongo/db/repl/rollback_test_fixture.cpp6
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp4
-rw-r--r--src/mongo/db/repl/rs_rollback_test.cpp4
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp2
-rw-r--r--src/mongo/db/repl/sync_tail.cpp2
-rw-r--r--src/mongo/db/repl/sync_tail_test.cpp5
-rw-r--r--src/mongo/db/s/implicit_create_collection.cpp2
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp14
-rw-r--r--src/mongo/db/s/shard_filtering_metadata_refresh.cpp4
-rw-r--r--src/mongo/db/service_context_d_test_fixture.cpp11
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl_test.cpp2
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine_test.cpp2
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_init.cpp2
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp2
-rw-r--r--src/mongo/db/storage/kv/kv_engine_test_harness.cpp2
-rw-r--r--src/mongo/db/storage/kv/kv_engine_test_harness.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_interface_test.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/repair_database.cpp6
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_harness.h4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp2
-rw-r--r--src/mongo/db/views/durable_view_catalog.cpp2
-rw-r--r--src/mongo/dbtests/namespacetests.cpp7
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp2
-rw-r--r--src/mongo/dbtests/querytests.cpp12
-rw-r--r--src/mongo/dbtests/rollbacktests.cpp118
89 files changed, 736 insertions, 739 deletions
diff --git a/src/mongo/SConscript b/src/mongo/SConscript
index 6d6b8dfbdb0..c7ae4b645c4 100644
--- a/src/mongo/SConscript
+++ b/src/mongo/SConscript
@@ -65,6 +65,7 @@ env.Library(
'base/initializer_dependency_graph.cpp',
'base/make_string_vector.cpp',
'base/parse_number.cpp',
+ 'base/shim.cpp',
'base/simple_string_data_comparator.cpp',
'base/status.cpp',
'base/string_data.cpp',
diff --git a/src/mongo/base/CPPLINT.cfg b/src/mongo/base/CPPLINT.cfg
new file mode 100644
index 00000000000..600079e64a9
--- /dev/null
+++ b/src/mongo/base/CPPLINT.cfg
@@ -0,0 +1,6 @@
+# Stop cpplint from looking for other config above this level
+set noparent
+
+# `cpplint.py` has trouble with macros defining classes in complex ways.
+# So we disable it entirely for the shim header
+exclude_files=shim\.h
diff --git a/src/mongo/base/init.h b/src/mongo/base/init.h
index 1d6595592aa..50a8eee3529 100644
--- a/src/mongo/base/init.h
+++ b/src/mongo/base/init.h
@@ -117,15 +117,15 @@
* of the function to declare would be options.
*/
#define MONGO_INITIALIZER_GENERAL(NAME, PREREQUISITES, DEPENDENTS) \
- ::mongo::Status _MONGO_INITIALIZER_FUNCTION_NAME(NAME)(::mongo::InitializerContext*); \
+ ::mongo::Status MONGO_INITIALIZER_FUNCTION_NAME_(NAME)(::mongo::InitializerContext*); \
namespace { \
::mongo::GlobalInitializerRegisterer _mongoInitializerRegisterer_##NAME( \
std::string(#NAME), \
MONGO_MAKE_STRING_VECTOR PREREQUISITES, \
MONGO_MAKE_STRING_VECTOR DEPENDENTS, \
- mongo::InitializerFunction(_MONGO_INITIALIZER_FUNCTION_NAME(NAME))); \
+ mongo::InitializerFunction(MONGO_INITIALIZER_FUNCTION_NAME_(NAME))); \
} \
- ::mongo::Status _MONGO_INITIALIZER_FUNCTION_NAME(NAME)
+ ::mongo::Status MONGO_INITIALIZER_FUNCTION_NAME_(NAME)
/**
* Macro to define an initializer group.
@@ -143,4 +143,4 @@
* Macro to produce a name for a mongo initializer function for an initializer operation
* named "NAME".
*/
-#define _MONGO_INITIALIZER_FUNCTION_NAME(NAME) _mongoInitializerFunction_##NAME
+#define MONGO_INITIALIZER_FUNCTION_NAME_(NAME) _mongoInitializerFunction_##NAME
diff --git a/src/mongo/base/shim.cpp b/src/mongo/base/shim.cpp
new file mode 100644
index 00000000000..b5834086563
--- /dev/null
+++ b/src/mongo/base/shim.cpp
@@ -0,0 +1,36 @@
+/**
+ * Copyright (C) 2018 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#include "mongo/base/shim.h"
+
+
+namespace mongo {
+namespace {
+MONGO_INITIALIZER_GROUP(ShimHooks, MONGO_NO_PREREQUISITES, ("default"));
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/base/shim.h b/src/mongo/base/shim.h
new file mode 100644
index 00000000000..49a21dd81a4
--- /dev/null
+++ b/src/mongo/base/shim.h
@@ -0,0 +1,257 @@
+/**
+ * Copyright (C) 2018 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#pragma once
+
+#include <functional>
+
+#include "mongo/base/init.h"
+#include "mongo/config.h"
+
+/**
+ * The `SHIM` mechanism allows for the creation of "weak-symbol-like" functions which can have their
+ * actual implementation injected in the final binary without creating a link dependency upon any
+ * actual implementation. One uses it like this:
+ *
+ * In a header:
+ * ```
+ * class MyClass {
+ * public:
+ * static MONGO_DECLARE_SHIM((int)->std::string) helloWorldFunction;
+ * };
+ * ```
+ *
+ * In the corresponding C++ file (which is a link dependency):
+ * ```
+ * MONGO_DEFINE_SHIM(MyClass::helloWorldFunction);
+ * ```
+ *
+ * And in any number of implementation files:
+ * ```
+ * MONGO_REGISTER_SHIM(MyClass::helloWorldFunction)(int value)->std::string {
+ * if (value == 42) {
+ * return "Hello World";
+ * } else {
+ * return "No way!";
+ * }
+ * }
+ * ```
+ *
+ * This can be useful for making auto-registering and auto-constructing mock and release class
+ * factories, among other useful things
+ */
+
+namespace mongo {
+template <typename T>
+struct PrivateCall;
+
+/**
+ * When declaring shim functions that should be private, they really need to be public; however,
+ * this type can be used as a parameter to permit the function to only be called by the type
+ * specified in the template parameter.
+ */
+template <typename T>
+struct PrivateTo {
+private:
+ friend PrivateCall<T>;
+
+ PrivateTo() = default;
+};
+
+/**
+ * When calling shim functions that should be private, you pass an immediately created instance of
+ * the type `PrivateCall< T >`, where `T` is the type that `PrivateTo` requires as a template
+ * parameter.
+ */
+template <typename T>
+struct PrivateCall {
+private:
+ friend T;
+ PrivateCall() {}
+
+public:
+ operator PrivateTo<T>() {
+ return {};
+ }
+};
+} // namespace mongo
+
+namespace shim_detail {
+/**
+ * This type, `storage`, is used as a workaround for needing C++17 `inline` variables. The template
+ * static member is effectively `inline` already.
+ */
+template <typename T, typename tag = void>
+struct storage {
+ static T data;
+};
+
+template <typename T, typename tag>
+T storage<T, tag>::data = {};
+} // namespace shim_detail
+
+#define MONGO_SHIM_DEPENDENTS ("ShimHooks")
+
+namespace mongo {
+#ifdef MONGO_CONFIG_CHECK_SHIM_DEPENDENCIES
+const bool checkShimsViaTUHook = true;
+#define MONGO_SHIM_TU_HOOK(name) \
+ name {}
+#else
+const bool checkShimsViaTUHook = false;
+#define MONGO_SHIM_TU_HOOK(name)
+#endif
+} // namespace mongo
+
+/**
+ * Declare a shimmable function with signature `SHIM_SIGNATURE`. Declare such constructs in a C++
+ * header as static members of a class.
+ */
+#define MONGO_DECLARE_SHIM(/*SHIM_SIGNATURE*/...) MONGO_DECLARE_SHIM_1(__LINE__, __VA_ARGS__)
+#define MONGO_DECLARE_SHIM_1(LN, ...) MONGO_DECLARE_SHIM_2(LN, __VA_ARGS__)
+#define MONGO_DECLARE_SHIM_2(LN, ...) \
+ const struct ShimBasis_##LN { \
+ ShimBasis_##LN() = default; \
+ struct MongoShimImplGuts { \
+ template <bool required = mongo::checkShimsViaTUHook> \
+ struct AbiCheckType { \
+ AbiCheckType() = default; \
+ }; \
+ using AbiCheck = AbiCheckType<>; \
+ struct LibTUHookTypeBase { \
+ LibTUHookTypeBase(); \
+ }; \
+ template <bool required = mongo::checkShimsViaTUHook> \
+ struct LibTUHookType : LibTUHookTypeBase {}; \
+ using LibTUHook = LibTUHookType<>; \
+ struct ImplTUHookTypeBase { \
+ ImplTUHookTypeBase(); \
+ }; \
+ template <bool required = mongo::checkShimsViaTUHook> \
+ struct ImplTUHookType : ImplTUHookTypeBase {}; \
+ using ImplTUHook = ImplTUHookType<>; \
+ \
+ static auto functionTypeHelper __VA_ARGS__; \
+ /* Workaround for Microsoft -- by taking the address of this function pointer, we \
+ * avoid the problems that their compiler has with default * arguments in deduced \
+ * typedefs. */ \
+ using function_type_pointer = decltype(&MongoShimImplGuts::functionTypeHelper); \
+ using function_type = std::remove_pointer_t<function_type_pointer>; \
+ MongoShimImplGuts* abi(const AbiCheck* const) { \
+ return this; \
+ } \
+ MongoShimImplGuts* lib(const LibTUHook* const) { \
+ MONGO_SHIM_TU_HOOK(LibTUHook); \
+ return this; \
+ } \
+ MongoShimImplGuts* impl(const ImplTUHook* const) { \
+ MONGO_SHIM_TU_HOOK(ImplTUHook); \
+ return this; \
+ } \
+ virtual auto implementation __VA_ARGS__ = 0; \
+ \
+ using tag = \
+ std::tuple<MongoShimImplGuts::function_type, AbiCheck, LibTUHook, ImplTUHook>; \
+ }; \
+ \
+ using storage = shim_detail::storage<MongoShimImplGuts*, MongoShimImplGuts::tag>; \
+ \
+ /* TODO: When the dependency graph is fixed, add the `impl()->` call to the call chain */ \
+ template <typename... Args> \
+ auto operator()(Args&&... args) const \
+ noexcept(noexcept(storage::data->abi(nullptr)->lib(nullptr)->implementation( \
+ std::forward<Args>(args)...))) \
+ -> decltype(storage::data->abi(nullptr)->lib(nullptr)->implementation( \
+ std::forward<Args>(args)...)) { \
+ return storage::data->abi(nullptr)->lib(nullptr)->implementation( \
+ std::forward<Args>(args)...); \
+ } \
+ }
+
+/**
+ * Define a shimmable function with name `SHIM_NAME`, returning a value of type `RETURN_TYPE`, with
+ * any arguments. This shim definition macro should go in the associated C++ file to the header
+ * where a SHIM was defined. This macro does not emit a function definition, only the customization
+ * point's machinery.
+ */
+#define MONGO_DEFINE_SHIM(/*SHIM_NAME*/...) MONGO_DEFINE_SHIM_1(__LINE__, __VA_ARGS__)
+#define MONGO_DEFINE_SHIM_1(LN, ...) MONGO_DEFINE_SHIM_2(LN, __VA_ARGS__)
+#define MONGO_DEFINE_SHIM_2(LN, ...) \
+ namespace { \
+ namespace shim_namespace##LN { \
+ using ShimType = decltype(__VA_ARGS__); \
+ } /*namespace shim_namespace*/ \
+ } /*namespace*/ \
+ shim_namespace##LN::ShimType::MongoShimImplGuts::LibTUHookTypeBase::LibTUHookTypeBase() = \
+ default; \
+ shim_namespace##LN::ShimType __VA_ARGS__{};
+
+#define MONGO_SHIM_EVIL_STRINGIFY_(args) #args
+
+
+/**
+ * Define an implementation of a shimmable function with name `SHIM_NAME`. The compiler will check
+ * supplied parameters for correctness. This shim registration macro should go in the associated
+ * C++ implementation file to the header where a SHIM was defined. Such a file would be a mock
+ * implementation or a real implementation, for example
+ */
+#define MONGO_REGISTER_SHIM(/*SHIM_NAME*/...) MONGO_REGISTER_SHIM_1(__LINE__, __VA_ARGS__)
+#define MONGO_REGISTER_SHIM_1(LN, ...) MONGO_REGISTER_SHIM_2(LN, __VA_ARGS__)
+#define MONGO_REGISTER_SHIM_2(LN, ...) \
+ namespace { \
+ namespace shim_namespace##LN { \
+ using ShimType = decltype(__VA_ARGS__); \
+ \
+ class Implementation final : public ShimType::MongoShimImplGuts { \
+ /* Some compilers don't work well with the trailing `override` in this kind of \
+ * function declaration. */ \
+ ShimType::MongoShimImplGuts::function_type implementation; /* override */ \
+ }; \
+ \
+ ::mongo::Status createInitializerRegistration(::mongo::InitializerContext* const) { \
+ static Implementation impl; \
+ ShimType::storage::data = &impl; \
+ return Status::OK(); \
+ } \
+ \
+ const ::mongo::GlobalInitializerRegisterer registrationHook{ \
+ std::string(MONGO_SHIM_EVIL_STRINGIFY_((__VA_ARGS__))), \
+ {}, \
+ {MONGO_SHIM_DEPENDENTS}, \
+ mongo::InitializerFunction(createInitializerRegistration)}; \
+ } /*namespace shim_namespace*/ \
+ } /*namespace*/ \
+ \
+ shim_namespace##LN::ShimType::MongoShimImplGuts::ImplTUHookTypeBase::ImplTUHookTypeBase() = \
+ default; \
+ \
+ auto shim_namespace##LN::Implementation::implementation /* After this point someone just \
+ writes the signature's arguments \
+ and return value (using arrow \
+ notation). Then they write the \
+ body. */
diff --git a/src/mongo/client/embedded/embedded.cpp b/src/mongo/client/embedded/embedded.cpp
index a77b8fcecd7..580843414ea 100644
--- a/src/mongo/client/embedded/embedded.cpp
+++ b/src/mongo/client/embedded/embedded.cpp
@@ -147,7 +147,7 @@ void shutdown(ServiceContext* srvContext) {
{
UninterruptibleLockGuard noInterrupt(shutdownOpCtx->lockState());
Lock::GlobalLock lk(shutdownOpCtx.get(), MODE_X);
- dbHolder().closeAll(shutdownOpCtx.get(), "shutdown");
+ DatabaseHolder::getDatabaseHolder().closeAll(shutdownOpCtx.get(), "shutdown");
// Shut down the background periodic task runner
if (auto runner = serviceContext->getPeriodicRunner()) {
diff --git a/src/mongo/client/embedded/index_create_impl_embedded.cpp b/src/mongo/client/embedded/index_create_impl_embedded.cpp
index de56784d039..a58560f3945 100644
--- a/src/mongo/client/embedded/index_create_impl_embedded.cpp
+++ b/src/mongo/client/embedded/index_create_impl_embedded.cpp
@@ -44,14 +44,12 @@ class MultiIndexBlockImplEmbedded : public MultiIndexBlockImpl {
return false;
}
};
+} // namespace
-MONGO_INITIALIZER(InitializeMultiIndexBlockFactory)(InitializerContext* const) {
- MultiIndexBlock::registerFactory(
- [](OperationContext* const opCtx, Collection* const collection) {
- return stdx::make_unique<MultiIndexBlockImplEmbedded>(opCtx, collection);
- });
- return Status::OK();
+MONGO_REGISTER_SHIM(MultiIndexBlock::makeImpl)
+(OperationContext* const opCtx, Collection* const collection, PrivateTo<MultiIndexBlock>)
+ ->std::unique_ptr<MultiIndexBlock::Impl> {
+ return std::make_unique<MultiIndexBlockImplEmbedded>(opCtx, collection);
}
-} // namespace
} // namespace mongo
diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript
index c2f57ca82a4..5ed9e0ad896 100644
--- a/src/mongo/db/catalog/SConscript
+++ b/src/mongo/db/catalog/SConscript
@@ -130,6 +130,7 @@ env.Library(
"database.cpp",
],
LIBDEPS=[
+ '$BUILD_DIR/mongo/base',
],
)
diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp
index 684862b6a9f..f97cb254d72 100644
--- a/src/mongo/db/catalog/catalog_control.cpp
+++ b/src/mongo/db/catalog/catalog_control.cpp
@@ -56,7 +56,7 @@ void closeCatalog(OperationContext* opCtx) {
// Close all databases.
log() << "closeCatalog: closing all databases";
constexpr auto reason = "closing databases for closeCatalog";
- dbHolder().closeAll(opCtx, reason);
+ DatabaseHolder::getDatabaseHolder().closeAll(opCtx, reason);
// Close the storage engine's catalog.
log() << "closeCatalog: closing storage engine catalog";
@@ -145,7 +145,7 @@ void openCatalog(OperationContext* opCtx) {
storageEngine->listDatabases(&databasesToOpen);
for (auto&& dbName : databasesToOpen) {
LOG(1) << "openCatalog: dbholder reopening database " << dbName;
- auto db = dbHolder().openDb(opCtx, dbName);
+ auto db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName);
invariant(db, str::stream() << "failed to reopen database " << dbName);
std::list<std::string> collections;
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 13bb493e5a6..915980a34d3 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -70,56 +70,14 @@ namespace mongo {
// Emit the vtable in this TU
Collection::Impl::~Impl() = default;
-namespace {
-stdx::function<Collection::factory_function_type> factory;
-} // namespace
+MONGO_DEFINE_SHIM(Collection::makeImpl);
-void Collection::registerFactory(decltype(factory) newFactory) {
- factory = std::move(newFactory);
-}
+MONGO_DEFINE_SHIM(Collection::parseValidationLevel);
-auto Collection::makeImpl(Collection* _this,
- OperationContext* const opCtx,
- const StringData fullNS,
- OptionalCollectionUUID uuid,
- CollectionCatalogEntry* const details,
- RecordStore* const recordStore,
- DatabaseCatalogEntry* const dbce) -> std::unique_ptr<Impl> {
- return factory(_this, opCtx, fullNS, uuid, details, recordStore, dbce);
-}
+MONGO_DEFINE_SHIM(Collection::parseValidationAction);
void Collection::TUHook::hook() noexcept {}
-
-namespace {
-stdx::function<decltype(Collection::parseValidationLevel)> parseValidationLevelImpl;
-} // namespace
-
-void Collection::registerParseValidationLevelImpl(
- stdx::function<decltype(parseValidationLevel)> impl) {
- parseValidationLevelImpl = std::move(impl);
-}
-
-auto Collection::parseValidationLevel(const StringData data) -> StatusWith<ValidationLevel> {
- return parseValidationLevelImpl(data);
-}
-
-namespace {
-stdx::function<decltype(Collection::parseValidationAction)> parseValidationActionImpl;
-} // namespace
-
-void Collection::registerParseValidationActionImpl(
- stdx::function<decltype(parseValidationAction)> impl) {
- parseValidationActionImpl = std::move(impl);
-}
-
-auto Collection::parseValidationAction(const StringData data) -> StatusWith<ValidationAction> {
- return parseValidationActionImpl(data);
-}
-} // namespace mongo
-
-
-namespace mongo {
std::string CompactOptions::toString() const {
std::stringstream ss;
ss << "paddingMode: ";
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 042c7a3a0c5..9c41f8478f3 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -330,19 +330,16 @@ public:
virtual const CollatorInterface* getDefaultCollator() const = 0;
};
-private:
- static std::unique_ptr<Impl> makeImpl(Collection* _this,
- OperationContext* opCtx,
- StringData fullNS,
- OptionalCollectionUUID uuid,
- CollectionCatalogEntry* details,
- RecordStore* recordStore,
- DatabaseCatalogEntry* dbce);
-
public:
- using factory_function_type = decltype(makeImpl);
-
- static void registerFactory(stdx::function<factory_function_type> factory);
+ static MONGO_DECLARE_SHIM((Collection * _this,
+ OperationContext* opCtx,
+ StringData fullNS,
+ OptionalCollectionUUID uuid,
+ CollectionCatalogEntry* details,
+ RecordStore* recordStore,
+ DatabaseCatalogEntry* dbce,
+ PrivateTo<Collection>)
+ ->std::unique_ptr<Impl>) makeImpl;
explicit inline Collection(OperationContext* const opCtx,
const StringData fullNS,
@@ -350,7 +347,8 @@ public:
CollectionCatalogEntry* const details, // does not own
RecordStore* const recordStore, // does not own
DatabaseCatalogEntry* const dbce) // does not own
- : _pimpl(makeImpl(this, opCtx, fullNS, uuid, details, recordStore, dbce)) {
+ : _pimpl(makeImpl(
+ this, opCtx, fullNS, uuid, details, recordStore, dbce, PrivateCall<Collection>{})) {
this->_impl().init(opCtx);
}
@@ -627,13 +625,8 @@ public:
opCtx, validator, allowedFeatures, maxFeatureCompatibilityVersion);
}
- static StatusWith<ValidationLevel> parseValidationLevel(StringData);
- static StatusWith<ValidationAction> parseValidationAction(StringData);
-
- static void registerParseValidationLevelImpl(
- stdx::function<decltype(parseValidationLevel)> impl);
- static void registerParseValidationActionImpl(
- stdx::function<decltype(parseValidationAction)> impl);
+ static MONGO_DECLARE_SHIM((StringData)->StatusWith<ValidationLevel>) parseValidationLevel;
+ static MONGO_DECLARE_SHIM((StringData)->StatusWith<ValidationAction>) parseValidationAction;
/**
* Sets the validator for this collection.
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index 74909e46f0c..f08d5be410e 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -76,34 +76,30 @@
namespace mongo {
-namespace {
-MONGO_INITIALIZER(InitializeCollectionFactory)(InitializerContext* const) {
- Collection::registerFactory(
- [](Collection* const _this,
- OperationContext* const opCtx,
- const StringData fullNS,
- OptionalCollectionUUID uuid,
- CollectionCatalogEntry* const details,
- RecordStore* const recordStore,
- DatabaseCatalogEntry* const dbce) -> std::unique_ptr<Collection::Impl> {
- return stdx::make_unique<CollectionImpl>(
- _this, opCtx, fullNS, uuid, details, recordStore, dbce);
- });
- return Status::OK();
+MONGO_REGISTER_SHIM(Collection::makeImpl)
+(Collection* const _this,
+ OperationContext* const opCtx,
+ const StringData fullNS,
+ OptionalCollectionUUID uuid,
+ CollectionCatalogEntry* const details,
+ RecordStore* const recordStore,
+ DatabaseCatalogEntry* const dbce,
+ PrivateTo<Collection>)
+ ->std::unique_ptr<Collection::Impl> {
+ return std::make_unique<CollectionImpl>(_this, opCtx, fullNS, uuid, details, recordStore, dbce);
}
-MONGO_INITIALIZER(InitializeParseValidationLevelImpl)(InitializerContext* const) {
- Collection::registerParseValidationLevelImpl(
- [](const StringData data) { return CollectionImpl::parseValidationLevel(data); });
- return Status::OK();
+MONGO_REGISTER_SHIM(Collection::parseValidationLevel)
+(const StringData data)->StatusWith<Collection::ValidationLevel> {
+ return CollectionImpl::parseValidationLevel(data);
}
-MONGO_INITIALIZER(InitializeParseValidationActionImpl)(InitializerContext* const) {
- Collection::registerParseValidationActionImpl(
- [](const StringData data) { return CollectionImpl::parseValidationAction(data); });
- return Status::OK();
+MONGO_REGISTER_SHIM(Collection::parseValidationAction)
+(const StringData data)->StatusWith<Collection::ValidationAction> {
+ return CollectionImpl::parseValidationAction(data);
}
+namespace {
// Used below to fail during inserts.
MONGO_FP_DECLARE(failCollectionInserts);
diff --git a/src/mongo/db/catalog/collection_info_cache.cpp b/src/mongo/db/catalog/collection_info_cache.cpp
index 4bd1e4715f7..0a36bb07448 100644
--- a/src/mongo/db/catalog/collection_info_cache.cpp
+++ b/src/mongo/db/catalog/collection_info_cache.cpp
@@ -33,20 +33,9 @@
#include "mongo/db/catalog/collection_info_cache.h"
namespace mongo {
-namespace {
-stdx::function<CollectionInfoCache::factory_function_type> factory;
-} // namespace
-
CollectionInfoCache::Impl::~Impl() = default;
-void CollectionInfoCache::registerFactory(decltype(factory) newFactory) {
- factory = std::move(newFactory);
-}
-
-auto CollectionInfoCache::makeImpl(Collection* const collection, const NamespaceString& ns)
- -> std::unique_ptr<Impl> {
- return factory(collection, ns);
-}
+MONGO_DEFINE_SHIM(CollectionInfoCache::makeImpl);
void CollectionInfoCache::TUHook::hook() noexcept {}
} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_info_cache.h b/src/mongo/db/catalog/collection_info_cache.h
index 9bc07e80c61..6fb0fa5278e 100644
--- a/src/mongo/db/catalog/collection_info_cache.h
+++ b/src/mongo/db/catalog/collection_info_cache.h
@@ -28,6 +28,7 @@
#pragma once
+#include "mongo/base/shim.h"
#include "mongo/db/collection_index_usage_tracker.h"
#include "mongo/db/query/plan_cache.h"
#include "mongo/db/query/query_settings.h"
@@ -69,16 +70,15 @@ public:
const std::set<std::string>& indexesUsed) = 0;
};
-private:
- static std::unique_ptr<Impl> makeImpl(Collection* collection, const NamespaceString& ns);
public:
- using factory_function_type = decltype(makeImpl);
-
- static void registerFactory(stdx::function<factory_function_type> factory);
+ static MONGO_DECLARE_SHIM((Collection * collection,
+ const NamespaceString& ns,
+ PrivateTo<CollectionInfoCache>)
+ ->std::unique_ptr<Impl>) makeImpl;
explicit inline CollectionInfoCache(Collection* const collection, const NamespaceString& ns)
- : _pimpl(makeImpl(collection, ns)) {}
+ : _pimpl(makeImpl(collection, ns, PrivateCall<CollectionInfoCache>{})) {}
inline ~CollectionInfoCache() = default;
diff --git a/src/mongo/db/catalog/collection_info_cache_impl.cpp b/src/mongo/db/catalog/collection_info_cache_impl.cpp
index b3366f70ae2..d23da8c5a81 100644
--- a/src/mongo/db/catalog/collection_info_cache_impl.cpp
+++ b/src/mongo/db/catalog/collection_info_cache_impl.cpp
@@ -49,15 +49,11 @@
#include "mongo/util/log.h"
namespace mongo {
-namespace {
-MONGO_INITIALIZER(InitializeCollectionInfoCacheFactory)(InitializerContext* const) {
- CollectionInfoCache::registerFactory(
- [](Collection* const collection, const NamespaceString& ns) {
- return stdx::make_unique<CollectionInfoCacheImpl>(collection, ns);
- });
- return Status::OK();
+MONGO_REGISTER_SHIM(CollectionInfoCache::makeImpl)
+(Collection* const collection, const NamespaceString& ns, PrivateTo<CollectionInfoCache>)
+ ->std::unique_ptr<CollectionInfoCache::Impl> {
+ return std::make_unique<CollectionInfoCacheImpl>(collection, ns);
}
-} // namespace
CollectionInfoCacheImpl::CollectionInfoCacheImpl(Collection* collection, const NamespaceString& ns)
: _collection(collection),
diff --git a/src/mongo/db/catalog/collection_info_cache_impl.h b/src/mongo/db/catalog/collection_info_cache_impl.h
index 58be19ed045..38e03f68a7b 100644
--- a/src/mongo/db/catalog/collection_info_cache_impl.h
+++ b/src/mongo/db/catalog/collection_info_cache_impl.h
@@ -30,6 +30,7 @@
#include "mongo/db/catalog/collection_info_cache.h"
+#include "mongo/base/shim.h"
#include "mongo/db/collection_index_usage_tracker.h"
#include "mongo/db/query/plan_cache.h"
#include "mongo/db/query/query_settings.h"
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index 950420fa787..f8258d36f07 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -101,8 +101,8 @@ Status createCollection(OperationContext* opCtx,
// Create collection.
const bool createDefaultIndexes = true;
- status =
- userCreateNS(opCtx, ctx.db(), nss.ns(), options, kind, createDefaultIndexes, idIndex);
+ status = Database::userCreateNS(
+ opCtx, ctx.db(), nss.ns(), options, kind, createDefaultIndexes, idIndex);
if (!status.isOK()) {
return status;
@@ -137,7 +137,7 @@ Status createCollectionForApplyOps(OperationContext* opCtx,
auto newCmd = cmdObj;
auto* const serviceContext = opCtx->getServiceContext();
- auto* const db = dbHolder().get(opCtx, dbName);
+ auto* const db = DatabaseHolder::getDatabaseHolder().get(opCtx, dbName);
// If a UUID is given, see if we need to rename a collection out of the way, and whether the
// collection already exists under a different name. If so, rename it into place. As this is
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index 527c9e847ab..a9963a2dc7d 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -36,66 +36,13 @@
namespace mongo {
Database::Impl::~Impl() = default;
-namespace {
-stdx::function<Database::factory_function_type> factory;
-} // namespace
-
-void Database::registerFactory(decltype(factory) newFactory) {
- factory = std::move(newFactory);
-}
-
-auto Database::makeImpl(Database* const this_,
- OperationContext* const opCtx,
- const StringData name,
- DatabaseCatalogEntry* const dbEntry) -> std::unique_ptr<Impl> {
- return factory(this_, opCtx, name, dbEntry);
-}
+MONGO_DEFINE_SHIM(Database::makeImpl);
void Database::TUHook::hook() noexcept {}
-namespace {
-stdx::function<decltype(Database::dropDatabase)> dropDatabaseImpl;
-}
-
-void Database::dropDatabase(OperationContext* const opCtx, Database* const db) {
- return dropDatabaseImpl(opCtx, db);
-}
+MONGO_DEFINE_SHIM(Database::dropDatabase);
-void Database::registerDropDatabaseImpl(stdx::function<decltype(dropDatabase)> impl) {
- dropDatabaseImpl = std::move(impl);
-}
+MONGO_DEFINE_SHIM(Database::userCreateNS);
-namespace {
-stdx::function<decltype(userCreateNS)> userCreateNSImpl;
-stdx::function<decltype(dropAllDatabasesExceptLocal)> dropAllDatabasesExceptLocalImpl;
-} // namespace
+MONGO_DEFINE_SHIM(Database::dropAllDatabasesExceptLocal);
} // namespace mongo
-
-auto mongo::userCreateNS(OperationContext* const opCtx,
- Database* const db,
- const StringData ns,
- const BSONObj options,
- const CollectionOptions::ParseKind parseKind,
- const bool createDefaultIndexes,
- const BSONObj& idIndex) -> Status {
- return userCreateNSImpl(opCtx, db, ns, options, parseKind, createDefaultIndexes, idIndex);
-}
-
-void mongo::registerUserCreateNSImpl(stdx::function<decltype(userCreateNS)> impl) {
- userCreateNSImpl = std::move(impl);
-}
-
-void mongo::dropAllDatabasesExceptLocal(OperationContext* const opCtx) {
- return dropAllDatabasesExceptLocalImpl(opCtx);
-}
-
-/**
- * Registers an implementation of `dropAllDatabaseExceptLocal` for use by library clients.
- * This is necessary to allow `catalog/database` to be a vtable edge.
- * @param impl Implementation of `dropAllDatabaseExceptLocal` to install.
- * @note This call is not thread safe.
- */
-void mongo::registerDropAllDatabasesExceptLocalImpl(
- stdx::function<decltype(dropAllDatabasesExceptLocal)> impl) {
- dropAllDatabasesExceptLocalImpl = std::move(impl);
-}
diff --git a/src/mongo/db/catalog/database.h b/src/mongo/db/catalog/database.h
index e5fab444357..2a5e281c1b5 100644
--- a/src/mongo/db/catalog/database.h
+++ b/src/mongo/db/catalog/database.h
@@ -31,6 +31,8 @@
#include <memory>
#include <string>
+#include "mongo/base/shim.h"
+#include "mongo/base/status.h"
#include "mongo/base/string_data.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/db/catalog/collection.h"
@@ -45,6 +47,7 @@
#include "mongo/util/string_map.h"
namespace mongo {
+
/**
* Represents a logical database containing Collections.
*
@@ -123,16 +126,34 @@ public:
virtual const CollectionMap& collections() const = 0;
};
-private:
- static std::unique_ptr<Impl> makeImpl(Database* _this,
- OperationContext* opCtx,
- StringData name,
- DatabaseCatalogEntry* dbEntry);
-
public:
- using factory_function_type = decltype(makeImpl);
+ static MONGO_DECLARE_SHIM((OperationContext * opCtx)->void) dropAllDatabasesExceptLocal;
- static void registerFactory(stdx::function<factory_function_type> factory);
+ /**
+ * Creates the namespace 'ns' in the database 'db' according to 'options'. If
+ * 'createDefaultIndexes'
+ * is true, creates the _id index for the collection (and the system indexes, in the case of
+ * system
+ * collections). Creates the collection's _id index according to 'idIndex', if it is non-empty.
+ * When
+ * 'idIndex' is empty, creates the default _id index.
+ */
+ static MONGO_DECLARE_SHIM(
+ (OperationContext * opCtx,
+ Database* db,
+ StringData ns,
+ BSONObj options,
+ CollectionOptions::ParseKind parseKind = CollectionOptions::parseForCommand,
+ bool createDefaultIndexes = true,
+ const BSONObj& idIndex = BSONObj())
+ ->Status) userCreateNS;
+
+ static MONGO_DECLARE_SHIM((Database * this_,
+ OperationContext* opCtx,
+ StringData name,
+ DatabaseCatalogEntry*,
+ PrivateTo<Database>)
+ ->std::unique_ptr<Impl>) makeImpl;
/**
* Iterating over a Database yields Collection* pointers.
@@ -182,7 +203,7 @@ public:
explicit inline Database(OperationContext* const opCtx,
const StringData name,
DatabaseCatalogEntry* const dbEntry)
- : _pimpl(makeImpl(this, opCtx, name, dbEntry)) {
+ : _pimpl(makeImpl(this, opCtx, name, dbEntry, PrivateCall<Database>{})) {
this->_impl().init(opCtx);
}
@@ -335,17 +356,7 @@ public:
*
* Must be called with the specified database locked in X mode.
*/
- static void dropDatabase(OperationContext* opCtx, Database* db);
-
- /**
- * Registers an implementation of `Database::dropDatabase` for use by library clients.
- * This is necessary to allow `catalog/database` to be a vtable edge.
- * @param impl Implementation of `dropDatabase` to install.
- * @note This call is not thread safe.
- */
- static void registerDropDatabaseImpl(stdx::function<decltype(dropDatabase)> impl);
-
- // static Status validateDBName( StringData dbname );
+ static MONGO_DECLARE_SHIM((OperationContext * opCtx, Database* db)->void) dropDatabase;
inline const NamespaceString& getSystemIndexesName() const {
return this->_impl().getSystemIndexesName();
@@ -395,37 +406,4 @@ private:
std::unique_ptr<Impl> _pimpl;
};
-
-void dropAllDatabasesExceptLocal(OperationContext* opCtx);
-
-/**
- * Registers an implementation of `dropAllDatabaseExceptLocal` for use by library clients.
- * This is necessary to allow `catalog/database` to be a vtable edge.
- * @param impl Implementation of `dropAllDatabaseExceptLocal` to install.
- * @note This call is not thread safe.
- */
-void registerDropAllDatabasesExceptLocalImpl(
- stdx::function<decltype(dropAllDatabasesExceptLocal)> impl);
-
-/**
- * Creates the namespace 'ns' in the database 'db' according to 'options'. If 'createDefaultIndexes'
- * is true, creates the _id index for the collection (and the system indexes, in the case of system
- * collections). Creates the collection's _id index according to 'idIndex', if it is non-empty. When
- * 'idIndex' is empty, creates the default _id index.
- */
-Status userCreateNS(OperationContext* opCtx,
- Database* db,
- StringData ns,
- BSONObj options,
- CollectionOptions::ParseKind parseKind = CollectionOptions::parseForCommand,
- bool createDefaultIndexes = true,
- const BSONObj& idIndex = BSONObj());
-
-/**
- * Registers an implementation of `userCreateNS` for use by library clients.
- * This is necessary to allow `catalog/database` to be a vtable edge.
- * @param impl Implementation of `userCreateNS` to install.
- * @note This call is not thread safe.
- */
-void registerUserCreateNSImpl(stdx::function<decltype(userCreateNS)> impl);
} // namespace mongo
diff --git a/src/mongo/db/catalog/database_holder.cpp b/src/mongo/db/catalog/database_holder.cpp
index 0b0c4336662..b40577d797f 100644
--- a/src/mongo/db/catalog/database_holder.cpp
+++ b/src/mongo/db/catalog/database_holder.cpp
@@ -36,32 +36,9 @@ namespace mongo {
DatabaseHolder::Impl::~Impl() = default;
-namespace {
-stdx::function<DatabaseHolder::factory_function_type> factory;
-} // namespace
-
-void DatabaseHolder::registerFactory(decltype(factory) newFactory) {
- factory = std::move(newFactory);
-}
-
-auto DatabaseHolder::makeImpl() -> std::unique_ptr<Impl> {
- return factory();
-}
-
void DatabaseHolder::TUHook::hook() noexcept {}
-namespace {
-stdx::function<decltype(dbHolder)> dbHolderImpl;
-} // namespace
-} // namespace mongo
-
-// The `mongo::` prefix is necessary to placate MSVC -- it is unable to properly identify anonymous
-// nested namespace members in `decltype` expressions when defining functions using scope-resolution
-// syntax.
-void mongo::registerDbHolderImpl(decltype(mongo::dbHolderImpl) impl) {
- dbHolderImpl = std::move(impl);
-}
+MONGO_DEFINE_SHIM(DatabaseHolder::makeImpl);
+MONGO_DEFINE_SHIM(DatabaseHolder::getDatabaseHolder);
-auto mongo::dbHolder() -> DatabaseHolder& {
- return dbHolderImpl();
-}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/database_holder.h b/src/mongo/db/catalog/database_holder.h
index 0edde9cfccf..3af319ab963 100644
--- a/src/mongo/db/catalog/database_holder.h
+++ b/src/mongo/db/catalog/database_holder.h
@@ -31,6 +31,7 @@
#include <set>
#include <string>
+#include "mongo/base/shim.h"
#include "mongo/base/string_data.h"
#include "mongo/db/namespace_string.h"
#include "mongo/stdx/functional.h"
@@ -62,17 +63,14 @@ public:
virtual std::set<std::string> getNamesWithConflictingCasing(StringData name) = 0;
};
-private:
- static std::unique_ptr<Impl> makeImpl();
-
public:
- using factory_function_type = decltype(makeImpl);
+ static MONGO_DECLARE_SHIM(()->DatabaseHolder&) getDatabaseHolder;
- static void registerFactory(stdx::function<factory_function_type> factory);
+ static MONGO_DECLARE_SHIM((PrivateTo<DatabaseHolder>)->std::unique_ptr<Impl>) makeImpl;
inline ~DatabaseHolder() = default;
- inline explicit DatabaseHolder() : _pimpl(makeImpl()) {}
+ inline explicit DatabaseHolder() : _pimpl(makeImpl(PrivateCall<DatabaseHolder>{})) {}
/**
* Retrieves an already opened database or returns NULL. Must be called with the database
@@ -147,7 +145,4 @@ private:
std::unique_ptr<Impl> _pimpl;
};
-
-extern DatabaseHolder& dbHolder();
-extern void registerDbHolderImpl(stdx::function<decltype(dbHolder)> impl);
} // namespace mongo
diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp
index d5b5560df67..df70ef25c27 100644
--- a/src/mongo/db/catalog/database_holder_impl.cpp
+++ b/src/mongo/db/catalog/database_holder_impl.cpp
@@ -50,32 +50,29 @@
namespace mongo {
namespace {
-
-DatabaseHolder* _dbHolder = nullptr;
-
-DatabaseHolder& dbHolderImpl() {
- return *_dbHolder;
-}
+std::unique_ptr<DatabaseHolder> dbHolderStorage;
GlobalInitializerRegisterer dbHolderImplInitializer("InitializeDbHolderimpl",
- {"InitializeDatabaseHolderFactory"},
[](InitializerContext* const) {
- _dbHolder = new DatabaseHolder();
- registerDbHolderImpl(dbHolderImpl);
+ dbHolderStorage =
+ std::make_unique<DatabaseHolder>();
return Status::OK();
},
[](DeinitializerContext* const) {
- delete _dbHolder;
- _dbHolder = nullptr;
+ dbHolderStorage = nullptr;
return Status::OK();
});
+} // namespace
-MONGO_INITIALIZER(InitializeDatabaseHolderFactory)(InitializerContext* const) {
- DatabaseHolder::registerFactory([] { return stdx::make_unique<DatabaseHolderImpl>(); });
- return Status::OK();
+MONGO_REGISTER_SHIM(DatabaseHolder::getDatabaseHolder)
+()->DatabaseHolder& {
+ return *dbHolderStorage;
}
-} // namespace
+MONGO_REGISTER_SHIM(DatabaseHolder::makeImpl)
+(PrivateTo<DatabaseHolder>)->std::unique_ptr<DatabaseHolder::Impl> {
+ return std::make_unique<DatabaseHolderImpl>();
+}
using std::set;
using std::size_t;
diff --git a/src/mongo/db/catalog/database_holder_mock.cpp b/src/mongo/db/catalog/database_holder_mock.cpp
index 694091f8afc..561713fb555 100644
--- a/src/mongo/db/catalog/database_holder_mock.cpp
+++ b/src/mongo/db/catalog/database_holder_mock.cpp
@@ -33,23 +33,15 @@
#include "mongo/stdx/memory.h"
namespace mongo {
-namespace {
-DatabaseHolder& dbHolderImpl() {
+MONGO_REGISTER_SHIM(DatabaseHolder::getDatabaseHolder)()->DatabaseHolder& {
static DatabaseHolder _dbHolder;
return _dbHolder;
}
-MONGO_INITIALIZER_WITH_PREREQUISITES(InitializeDbHolderimpl, ("InitializeDatabaseHolderFactory"))
-(InitializerContext* const) {
- registerDbHolderImpl(dbHolderImpl);
- return Status::OK();
+MONGO_REGISTER_SHIM(DatabaseHolder::makeImpl)
+(PrivateTo<DatabaseHolder>)->std::unique_ptr<DatabaseHolder::Impl> {
+ return stdx::make_unique<DatabaseHolderMock>();
}
-MONGO_INITIALIZER(InitializeDatabaseHolderFactory)(InitializerContext* const) {
- DatabaseHolder::registerFactory([] { return stdx::make_unique<DatabaseHolderMock>(); });
- return Status::OK();
-}
-
-} // namespace
} // namespace mongo
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index fca3b112818..4d195d3cf58 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -78,16 +78,17 @@
#include "mongo/util/log.h"
namespace mongo {
-namespace {
-MONGO_INITIALIZER(InitializeDatabaseFactory)(InitializerContext* const) {
- Database::registerFactory([](Database* const this_,
- OperationContext* const opCtx,
- const StringData name,
- DatabaseCatalogEntry* const dbEntry) {
- return stdx::make_unique<DatabaseImpl>(this_, opCtx, name, dbEntry);
- });
- return Status::OK();
+MONGO_REGISTER_SHIM(Database::makeImpl)
+(Database* const this_,
+ OperationContext* const opCtx,
+ const StringData name,
+ DatabaseCatalogEntry* const dbEntry,
+ PrivateTo<Database>)
+ ->std::unique_ptr<Database::Impl> {
+ return stdx::make_unique<DatabaseImpl>(this_, opCtx, name, dbEntry);
}
+
+namespace {
MONGO_FP_DECLARE(hangBeforeLoggingCreateCollection);
} // namespace
@@ -865,7 +866,7 @@ void DatabaseImpl::dropDatabase(OperationContext* opCtx, Database* db) {
Top::get(serviceContext).collectionDropped(coll->ns().ns(), true);
}
- dbHolder().close(opCtx, name, "database dropped");
+ DatabaseHolder::getDatabaseHolder().close(opCtx, name, "database dropped");
auto const storageEngine = serviceContext->getGlobalStorageEngine();
writeConflictRetry(opCtx, "dropDatabase", name, [&] {
@@ -895,7 +896,7 @@ StatusWith<NamespaceString> DatabaseImpl::makeUniqueCollectionNamespace(
if (!_uniqueCollectionNamespacePseudoRandom) {
_uniqueCollectionNamespacePseudoRandom =
- stdx::make_unique<PseudoRandom>(Date_t::now().asInt64());
+ std::make_unique<PseudoRandom>(Date_t::now().asInt64());
}
const auto charsToChooseFrom =
@@ -935,60 +936,19 @@ StatusWith<NamespaceString> DatabaseImpl::makeUniqueCollectionNamespace(
<< " attempts due to namespace conflicts with existing collections.");
}
-namespace {
-MONGO_INITIALIZER(InitializeDropDatabaseImpl)(InitializerContext* const) {
- Database::registerDropDatabaseImpl(DatabaseImpl::dropDatabase);
- return Status::OK();
-}
-MONGO_INITIALIZER(InitializeUserCreateNSImpl)(InitializerContext* const) {
- registerUserCreateNSImpl(userCreateNSImpl);
- return Status::OK();
-}
-
-MONGO_INITIALIZER(InitializeDropAllDatabasesExceptLocalImpl)(InitializerContext* const) {
- registerDropAllDatabasesExceptLocalImpl(dropAllDatabasesExceptLocalImpl);
- return Status::OK();
+MONGO_REGISTER_SHIM(Database::dropDatabase)(OperationContext* opCtx, Database* db)->void {
+ return DatabaseImpl::dropDatabase(opCtx, db);
}
-} // namespace
-} // namespace mongo
-
-void mongo::dropAllDatabasesExceptLocalImpl(OperationContext* opCtx) {
- Lock::GlobalWrite lk(opCtx);
-
- vector<string> n;
- StorageEngine* storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine();
- storageEngine->listDatabases(&n);
-
- if (n.size() == 0)
- return;
- log() << "dropAllDatabasesExceptLocal " << n.size();
-
- repl::ReplicationCoordinator::get(opCtx)->dropAllSnapshots();
-
- for (const auto& dbName : n) {
- if (dbName != "local") {
- writeConflictRetry(opCtx, "dropAllDatabasesExceptLocal", dbName, [&opCtx, &dbName] {
- Database* db = dbHolder().get(opCtx, dbName);
- // This is needed since dropDatabase can't be rolled back.
- // This is safe be replaced by "invariant(db);dropDatabase(opCtx, db);" once fixed
- if (db == nullptr) {
- log() << "database disappeared after listDatabases but before drop: " << dbName;
- } else {
- DatabaseImpl::dropDatabase(opCtx, db);
- }
- });
- }
- }
-}
-
-auto mongo::userCreateNSImpl(OperationContext* opCtx,
- Database* db,
- StringData ns,
- BSONObj options,
- CollectionOptions::ParseKind parseKind,
- bool createDefaultIndexes,
- const BSONObj& idIndex) -> Status {
+MONGO_REGISTER_SHIM(Database::userCreateNS)
+(OperationContext* opCtx,
+ Database* db,
+ StringData ns,
+ BSONObj options,
+ CollectionOptions::ParseKind parseKind,
+ bool createDefaultIndexes,
+ const BSONObj& idIndex)
+ ->Status {
invariant(db);
LOG(1) << "create collection " << ns << ' ' << options;
@@ -1087,3 +1047,34 @@ auto mongo::userCreateNSImpl(OperationContext* opCtx,
return Status::OK();
}
+
+MONGO_REGISTER_SHIM(Database::dropAllDatabasesExceptLocal)(OperationContext* opCtx)->void {
+ Lock::GlobalWrite lk(opCtx);
+
+ vector<string> n;
+ StorageEngine* storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine();
+ storageEngine->listDatabases(&n);
+
+ if (n.size() == 0)
+ return;
+ log() << "dropAllDatabasesExceptLocal " << n.size();
+
+ repl::ReplicationCoordinator::get(opCtx)->dropAllSnapshots();
+
+ for (const auto& dbName : n) {
+ if (dbName != "local") {
+ writeConflictRetry(opCtx, "dropAllDatabasesExceptLocal", dbName, [&opCtx, &dbName] {
+ Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, dbName);
+
+ // This is needed since dropDatabase can't be rolled back.
+ // This is safe be replaced by "invariant(db);dropDatabase(opCtx, db);" once fixed
+ if (db == nullptr) {
+ log() << "database disappeared after listDatabases but before drop: " << dbName;
+ } else {
+ DatabaseImpl::dropDatabase(opCtx, db);
+ }
+ });
+ }
+ }
+}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/database_impl.h b/src/mongo/db/catalog/database_impl.h
index e2102e3154b..8a341037778 100644
--- a/src/mongo/db/catalog/database_impl.h
+++ b/src/mongo/db/catalog/database_impl.h
@@ -309,18 +309,4 @@ private:
void dropAllDatabasesExceptLocalImpl(OperationContext* opCtx);
-/**
- * Creates the namespace 'ns' in the database 'db' according to 'options'. If 'createDefaultIndexes'
- * is true, creates the _id index for the collection (and the system indexes, in the case of system
- * collections). Creates the collection's _id index according to 'idIndex', if it is non-empty. When
- * 'idIndex' is empty, creates the default _id index.
- */
-Status userCreateNSImpl(OperationContext* opCtx,
- Database* db,
- StringData ns,
- BSONObj options,
- CollectionOptions::ParseKind parseKind = CollectionOptions::parseForCommand,
- bool createDefaultIndexes = true,
- const BSONObj& idIndex = BSONObj());
-
} // namespace mongo
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index 4d9288e841d..7a2a28a241a 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -37,19 +37,7 @@
namespace mongo {
IndexCatalog::Impl::~Impl() = default;
-namespace {
-IndexCatalog::factory_function_type factory;
-} // namespace
-
-void IndexCatalog::registerFactory(decltype(factory) newFactory) {
- factory = std::move(newFactory);
-}
-
-auto IndexCatalog::makeImpl(IndexCatalog* const this_,
- Collection* const collection,
- const int maxNumIndexesAllowed) -> std::unique_ptr<Impl> {
- return factory(this_, collection, maxNumIndexesAllowed);
-}
+MONGO_DEFINE_SHIM(IndexCatalog::makeImpl);
void IndexCatalog::TUHook::hook() noexcept {}
@@ -63,38 +51,11 @@ IndexCatalogEntry* IndexCatalog::_setupInMemoryStructures(
IndexCatalog::IndexIterator::Impl::~Impl() = default;
-namespace {
-IndexCatalog::IndexIterator::factory_function_type iteratorFactory;
-} // namespace
-
-void IndexCatalog::IndexIterator::registerFactory(decltype(iteratorFactory) newFactory) {
- iteratorFactory = std::move(newFactory);
-}
-
-auto IndexCatalog::IndexIterator::makeImpl(OperationContext* const opCtx,
- const IndexCatalog* const cat,
- const bool includeUnfinishedIndexes)
- -> std::unique_ptr<Impl> {
- return iteratorFactory(opCtx, cat, includeUnfinishedIndexes);
-}
+MONGO_DEFINE_SHIM(IndexCatalog::IndexIterator::makeImpl);
void IndexCatalog::IndexIterator::TUHook::hook() noexcept {}
-namespace {
-stdx::function<decltype(IndexCatalog::fixIndexKey)> fixIndexKeyImpl;
-} // namespace
-
-void IndexCatalog::registerFixIndexKeyImpl(decltype(fixIndexKeyImpl) impl) {
- fixIndexKeyImpl = std::move(impl);
-}
-
-BSONObj IndexCatalog::fixIndexKey(const BSONObj& key) {
- return fixIndexKeyImpl(key);
-}
-
-namespace {
-stdx::function<decltype(IndexCatalog::prepareInsertDeleteOptions)> prepareInsertDeleteOptionsImpl;
-} // namespace
+MONGO_DEFINE_SHIM(IndexCatalog::fixIndexKey);
std::string::size_type IndexCatalog::getLongestIndexNameLength(OperationContext* opCtx) const {
IndexCatalog::IndexIterator it = getIndexIterator(opCtx, true);
@@ -107,14 +68,5 @@ std::string::size_type IndexCatalog::getLongestIndexNameLength(OperationContext*
return longestIndexNameLength;
}
-void IndexCatalog::prepareInsertDeleteOptions(OperationContext* const opCtx,
- const IndexDescriptor* const desc,
- InsertDeleteOptions* const options) {
- return prepareInsertDeleteOptionsImpl(opCtx, desc, options);
-}
-
-void IndexCatalog::registerPrepareInsertDeleteOptionsImpl(
- stdx::function<decltype(prepareInsertDeleteOptions)> impl) {
- prepareInsertDeleteOptionsImpl = std::move(impl);
-}
+MONGO_DEFINE_SHIM(IndexCatalog::prepareInsertDeleteOptions);
} // namespace mongo
diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h
index 5e5b17281e2..bb9902f91cd 100644
--- a/src/mongo/db/catalog/index_catalog.h
+++ b/src/mongo/db/catalog/index_catalog.h
@@ -32,6 +32,7 @@
#include <vector>
#include "mongo/base/clonable_ptr.h"
+#include "mongo/base/shim.h"
#include "mongo/db/catalog/index_catalog_entry.h"
#include "mongo/db/index/multikey_paths.h"
#include "mongo/db/jsobj.h"
@@ -76,20 +77,20 @@ public:
virtual IndexCatalogEntry* catalogEntry(const IndexDescriptor* desc) = 0;
};
- private:
- static std::unique_ptr<Impl> makeImpl(OperationContext* opCtx,
- const IndexCatalog* cat,
- bool includeUnfinishedIndexes);
+ static MONGO_DECLARE_SHIM((OperationContext * opCtx,
+ const IndexCatalog* cat,
+ bool includeUnfinishedIndexes,
+ PrivateTo<IndexIterator>)
+ ->std::unique_ptr<Impl>) makeImpl;
+ private:
explicit inline IndexIterator(OperationContext* const opCtx,
const IndexCatalog* const cat,
const bool includeUnfinishedIndexes)
- : _pimpl(makeImpl(opCtx, cat, includeUnfinishedIndexes)) {}
+ : _pimpl(makeImpl(opCtx, cat, includeUnfinishedIndexes, PrivateCall<IndexIterator>{})) {
+ }
public:
- using factory_function_type = stdx::function<decltype(makeImpl)>;
- static void registerFactory(factory_function_type factory);
-
inline ~IndexIterator() = default;
inline IndexIterator(const IndexIterator& copy) = default;
@@ -255,19 +256,17 @@ public:
friend IndexCatalog;
};
-private:
- static std::unique_ptr<Impl> makeImpl(IndexCatalog* this_,
- Collection* collection,
- int maxNumIndexesAllowed);
-
public:
- using factory_function_type = stdx::function<decltype(makeImpl)>;
- static void registerFactory(factory_function_type factory);
+ static MONGO_DECLARE_SHIM((IndexCatalog * this_,
+ Collection* collection,
+ int maxNumIndexesAllowed,
+ PrivateTo<IndexCatalog>)
+ ->std::unique_ptr<Impl>) makeImpl;
inline ~IndexCatalog() = default;
explicit inline IndexCatalog(Collection* const collection, const int maxNumIndexesAllowed)
- : _pimpl(makeImpl(this, collection, maxNumIndexesAllowed)) {}
+ : _pimpl(makeImpl(this, collection, maxNumIndexesAllowed, PrivateCall<IndexCatalog>{})) {}
inline IndexCatalog(IndexCatalog&&) = delete;
inline IndexCatalog& operator=(IndexCatalog&&) = delete;
@@ -538,18 +537,15 @@ public:
// public static helpers
- static BSONObj fixIndexKey(const BSONObj& key);
- static void registerFixIndexKeyImpl(stdx::function<decltype(fixIndexKey)> impl);
+ static MONGO_DECLARE_SHIM((const BSONObj& key)->BSONObj) fixIndexKey;
/**
* Fills out 'options' in order to indicate whether to allow dups or relax
* index constraints, as needed by replication.
*/
- static void prepareInsertDeleteOptions(OperationContext* opCtx,
- const IndexDescriptor* desc,
- InsertDeleteOptions* options);
- static void registerPrepareInsertDeleteOptionsImpl(
- stdx::function<decltype(prepareInsertDeleteOptions)> impl);
+ static MONGO_DECLARE_SHIM(
+ (OperationContext * opCtx, const IndexDescriptor* desc, InsertDeleteOptions* options)->void)
+ prepareInsertDeleteOptions;
private:
inline const Collection* _getCollection() const {
diff --git a/src/mongo/db/catalog/index_catalog_entry.cpp b/src/mongo/db/catalog/index_catalog_entry.cpp
index 94a05fde7ac..1972c2237d4 100644
--- a/src/mongo/db/catalog/index_catalog_entry.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry.cpp
@@ -38,22 +38,7 @@
namespace mongo {
IndexCatalogEntry::Impl::~Impl() = default;
-namespace {
-stdx::function<IndexCatalogEntry::factory_function_type> factory;
-} // namespace
-
-void IndexCatalogEntry::registerFactory(decltype(factory) newFactory) {
- factory = std::move(newFactory);
-}
-
-auto IndexCatalogEntry::makeImpl(IndexCatalogEntry* const this_,
- OperationContext* const opCtx,
- const StringData ns,
- CollectionCatalogEntry* const collection,
- std::unique_ptr<IndexDescriptor> descriptor,
- CollectionInfoCache* const infoCache) -> std::unique_ptr<Impl> {
- return factory(this_, opCtx, ns, collection, std::move(descriptor), infoCache);
-}
+MONGO_DEFINE_SHIM(IndexCatalogEntry::makeImpl);
void IndexCatalogEntry::TUHook::hook() noexcept {}
@@ -62,7 +47,13 @@ IndexCatalogEntry::IndexCatalogEntry(OperationContext* opCtx,
CollectionCatalogEntry* collection,
std::unique_ptr<IndexDescriptor> descriptor,
CollectionInfoCache* infoCache)
- : _pimpl(makeImpl(this, opCtx, ns, collection, std::move(descriptor), infoCache)) {}
+ : _pimpl(makeImpl(this,
+ opCtx,
+ ns,
+ collection,
+ std::move(descriptor),
+ infoCache,
+ PrivateCall<IndexCatalogEntry>{})) {}
void IndexCatalogEntry::init(std::unique_ptr<IndexAccessMethod> accessMethod) {
return this->_impl().init(std::move(accessMethod));
diff --git a/src/mongo/db/catalog/index_catalog_entry.h b/src/mongo/db/catalog/index_catalog_entry.h
index fecfd197bb1..20d748c9846 100644
--- a/src/mongo/db/catalog/index_catalog_entry.h
+++ b/src/mongo/db/catalog/index_catalog_entry.h
@@ -32,6 +32,7 @@
#include <string>
#include "mongo/base/owned_pointer_vector.h"
+#include "mongo/base/shim.h"
#include "mongo/bson/ordering.h"
#include "mongo/bson/timestamp.h"
#include "mongo/db/index/multikey_paths.h"
@@ -102,18 +103,15 @@ public:
virtual void setMinimumVisibleSnapshot(Timestamp name) = 0;
};
-private:
- static std::unique_ptr<Impl> makeImpl(IndexCatalogEntry* this_,
- OperationContext* opCtx,
- StringData ns,
- CollectionCatalogEntry* collection,
- std::unique_ptr<IndexDescriptor> descriptor,
- CollectionInfoCache* infoCache);
-
public:
- using factory_function_type = decltype(makeImpl);
-
- static void registerFactory(stdx::function<factory_function_type> factory);
+ static MONGO_DECLARE_SHIM((IndexCatalogEntry * this_,
+ OperationContext* opCtx,
+ StringData ns,
+ CollectionCatalogEntry* collection,
+ std::unique_ptr<IndexDescriptor> descriptor,
+ CollectionInfoCache* infoCache,
+ PrivateTo<IndexCatalogEntry>)
+ ->std::unique_ptr<Impl>) makeImpl;
explicit IndexCatalogEntry(
OperationContext* opCtx,
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.cpp b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
index 5e717b97e71..1ef395b85b4 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
@@ -53,20 +53,18 @@
#include "mongo/util/scopeguard.h"
namespace mongo {
-namespace {
-MONGO_INITIALIZER(InitializeIndexCatalogEntryFactory)(InitializerContext* const) {
- IndexCatalogEntry::registerFactory([](IndexCatalogEntry* const this_,
- OperationContext* const opCtx,
- const StringData ns,
- CollectionCatalogEntry* const collection,
- std::unique_ptr<IndexDescriptor> descriptor,
- CollectionInfoCache* const infoCache) {
- return stdx::make_unique<IndexCatalogEntryImpl>(
- this_, opCtx, ns, collection, std::move(descriptor), infoCache);
- });
- return Status::OK();
+MONGO_REGISTER_SHIM(IndexCatalogEntry::makeImpl)
+(IndexCatalogEntry* const this_,
+ OperationContext* const opCtx,
+ const StringData ns,
+ CollectionCatalogEntry* const collection,
+ std::unique_ptr<IndexDescriptor> descriptor,
+ CollectionInfoCache* const infoCache,
+ PrivateTo<IndexCatalogEntry>)
+ ->std::unique_ptr<IndexCatalogEntry::Impl> {
+ return std::make_unique<IndexCatalogEntryImpl>(
+ this_, opCtx, ns, collection, std::move(descriptor), infoCache);
}
-} // namespace
using std::string;
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index e19773b34fd..76bb7aa4b8a 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -70,38 +70,33 @@
#include "mongo/util/represent_as.h"
namespace mongo {
-namespace {
-MONGO_INITIALIZER(InitializeIndexCatalogFactory)(InitializerContext* const) {
- IndexCatalog::registerFactory([](
- IndexCatalog* const this_, Collection* const collection, const int maxNumIndexesAllowed) {
- return stdx::make_unique<IndexCatalogImpl>(this_, collection, maxNumIndexesAllowed);
- });
- return Status::OK();
+MONGO_REGISTER_SHIM(IndexCatalog::makeImpl)
+(IndexCatalog* const this_,
+ Collection* const collection,
+ const int maxNumIndexesAllowed,
+ PrivateTo<IndexCatalog>)
+ ->std::unique_ptr<IndexCatalog::Impl> {
+ return std::make_unique<IndexCatalogImpl>(this_, collection, maxNumIndexesAllowed);
}
-MONGO_INITIALIZER(InitializeIndexCatalogIndexIteratorFactory)(InitializerContext* const) {
- IndexCatalog::IndexIterator::registerFactory([](OperationContext* const opCtx,
- const IndexCatalog* const cat,
- const bool includeUnfinishedIndexes) {
- return stdx::make_unique<IndexCatalogImpl::IndexIteratorImpl>(
- opCtx, cat, includeUnfinishedIndexes);
- });
- return Status::OK();
+MONGO_REGISTER_SHIM(IndexCatalog::IndexIterator::makeImpl)
+(OperationContext* const opCtx,
+ const IndexCatalog* const cat,
+ const bool includeUnfinishedIndexes,
+ PrivateTo<IndexCatalog::IndexIterator>)
+ ->std::unique_ptr<IndexCatalog::IndexIterator::Impl> {
+ return std::make_unique<IndexCatalogImpl::IndexIteratorImpl>(
+ opCtx, cat, includeUnfinishedIndexes);
}
-
-MONGO_INITIALIZER(InitializeFixIndexKeyImpl)(InitializerContext* const) {
- IndexCatalog::registerFixIndexKeyImpl(&IndexCatalogImpl::fixIndexKey);
- return Status::OK();
+MONGO_REGISTER_SHIM(IndexCatalog::fixIndexKey)(const BSONObj& key)->BSONObj {
+ return IndexCatalogImpl::fixIndexKey(key);
}
-MONGO_INITIALIZER(InitializePrepareInsertDeleteOptionsImpl)(InitializerContext* const) {
- IndexCatalog::registerPrepareInsertDeleteOptionsImpl(
- &IndexCatalogImpl::prepareInsertDeleteOptions);
- return Status::OK();
+MONGO_REGISTER_SHIM(IndexCatalog::prepareInsertDeleteOptions)
+(OperationContext* opCtx, const IndexDescriptor* desc, InsertDeleteOptions* options)->void {
+ return IndexCatalogImpl::prepareInsertDeleteOptions(opCtx, desc, options);
}
-} // namespace
-
using std::unique_ptr;
using std::endl;
using std::string;
diff --git a/src/mongo/db/catalog/index_consistency.cpp b/src/mongo/db/catalog/index_consistency.cpp
index f1237212a1b..7caf6e1ed86 100644
--- a/src/mongo/db/catalog/index_consistency.cpp
+++ b/src/mongo/db/catalog/index_consistency.cpp
@@ -506,7 +506,7 @@ uint32_t IndexConsistency::_hashKeyString(const KeyString& ks, int indexNumber)
Status IndexConsistency::_throwExceptionIfError() {
- Database* database = dbHolder().get(_opCtx, _nss.db());
+ Database* database = DatabaseHolder::getDatabaseHolder().get(_opCtx, _nss.db());
// Ensure the database still exists.
if (!database) {
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index 06f9676ca4b..1f4c169000c 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -57,19 +57,7 @@
namespace mongo {
MultiIndexBlock::Impl::~Impl() = default;
-namespace {
-stdx::function<MultiIndexBlock::factory_function_type> factory;
-} // namespace
-
-void MultiIndexBlock::registerFactory(decltype(factory) newFactory) {
- factory = std::move(newFactory);
-}
-
-auto MultiIndexBlock::makeImpl(OperationContext* const opCtx, Collection* const collection)
- -> std::unique_ptr<Impl> {
- return factory(opCtx, collection);
-}
-
+MONGO_DEFINE_SHIM(MultiIndexBlock::makeImpl);
void MultiIndexBlock::TUHook::hook() noexcept {}
} // namespace mongo
diff --git a/src/mongo/db/catalog/index_create.h b/src/mongo/db/catalog/index_create.h
index 56c85609a1f..acdf744da9e 100644
--- a/src/mongo/db/catalog/index_create.h
+++ b/src/mongo/db/catalog/index_create.h
@@ -114,12 +114,11 @@ private:
return *this->_pimpl;
}
- static std::unique_ptr<Impl> makeImpl(OperationContext* opCtx, Collection* collection);
-
public:
- using factory_function_type = decltype(makeImpl);
-
- static void registerFactory(stdx::function<factory_function_type> factory);
+ static MONGO_DECLARE_SHIM((OperationContext * opCtx,
+ Collection* collection,
+ PrivateTo<MultiIndexBlock>)
+ ->std::unique_ptr<Impl>) makeImpl;
inline ~MultiIndexBlock() = default;
@@ -127,7 +126,7 @@ public:
* Neither pointer is owned.
*/
inline explicit MultiIndexBlock(OperationContext* const opCtx, Collection* const collection)
- : _pimpl(makeImpl(opCtx, collection)) {}
+ : _pimpl(makeImpl(opCtx, collection, PrivateCall<MultiIndexBlock>{})) {}
/**
* By default we ignore the 'background' flag in specs when building an index. If this is
diff --git a/src/mongo/db/catalog/index_create_impl_servers.cpp b/src/mongo/db/catalog/index_create_impl_servers.cpp
index 5fab9ebee48..541a19a0434 100644
--- a/src/mongo/db/catalog/index_create_impl_servers.cpp
+++ b/src/mongo/db/catalog/index_create_impl_servers.cpp
@@ -41,14 +41,12 @@ class MultiIndexBlockImplServers : public MultiIndexBlockImpl {
return spec["background"].trueValue();
}
};
+} // namespace
-MONGO_INITIALIZER(InitializeMultiIndexBlockFactory)(InitializerContext* const) {
- MultiIndexBlock::registerFactory(
- [](OperationContext* const opCtx, Collection* const collection) {
- return stdx::make_unique<MultiIndexBlockImplServers>(opCtx, collection);
- });
- return Status::OK();
+MONGO_REGISTER_SHIM(MultiIndexBlock::makeImpl)
+(OperationContext* const opCtx, Collection* const collection, PrivateTo<MultiIndexBlock>)
+ ->std::unique_ptr<MultiIndexBlock::Impl> {
+ return std::make_unique<MultiIndexBlockImplServers>(opCtx, collection);
}
-} // namespace
} // namespace mongo
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index fd5d2df9706..0d7722093e6 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -152,7 +152,7 @@ Status renameCollectionCommon(OperationContext* opCtx,
<< target.ns());
}
- Database* const sourceDB = dbHolder().get(opCtx, source.db());
+ Database* const sourceDB = DatabaseHolder::getDatabaseHolder().get(opCtx, source.db());
if (sourceDB) {
DatabaseShardingState::get(sourceDB).checkDbVersion(opCtx);
}
@@ -180,7 +180,7 @@ Status renameCollectionCommon(OperationContext* opCtx,
BackgroundOperation::assertNoBgOpInProgForNs(source.ns());
- Database* const targetDB = dbHolder().openDb(opCtx, target.db());
+ Database* const targetDB = DatabaseHolder::getDatabaseHolder().openDb(opCtx, target.db());
// Check if the target namespace exists and if dropTarget is true.
// Return a non-OK status if target exists and dropTarget is not true or if the collection
diff --git a/src/mongo/db/catalog/uuid_catalog.cpp b/src/mongo/db/catalog/uuid_catalog.cpp
index 3ec994d7ef0..0a50cadfa71 100644
--- a/src/mongo/db/catalog/uuid_catalog.cpp
+++ b/src/mongo/db/catalog/uuid_catalog.cpp
@@ -92,7 +92,7 @@ repl::OpTime UUIDCatalogObserver::onRenameCollection(OperationContext* opCtx,
if (!uuid)
return {};
auto getNewCollection = [opCtx, toCollection] {
- auto db = dbHolder().get(opCtx, toCollection.db());
+ auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, toCollection.db());
auto newColl = db->getCollection(opCtx, toCollection);
invariant(newColl);
return newColl;
diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp
index 94f7838e39c..116ca47dd6e 100644
--- a/src/mongo/db/catalog_raii.cpp
+++ b/src/mongo/db/catalog_raii.cpp
@@ -59,7 +59,7 @@ AutoGetDb::AutoGetDb(OperationContext* opCtx, StringData dbName, LockMode mode,
: _dbLock(opCtx, dbName, mode, deadline), _db([&] {
uassertLockTimeout(
str::stream() << "database " << dbName, mode, deadline, _dbLock.isLocked());
- return dbHolder().get(opCtx, dbName);
+ return DatabaseHolder::getDatabaseHolder().get(opCtx, dbName);
}()) {
if (_db) {
DatabaseShardingState::get(_db).checkDbVersion(opCtx);
@@ -179,7 +179,7 @@ AutoGetOrCreateDb::AutoGetOrCreateDb(OperationContext* opCtx,
_autoDb.emplace(opCtx, dbName, MODE_X, deadline);
}
- _db = dbHolder().openDb(opCtx, dbName, &_justCreated);
+ _db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName, &_justCreated);
}
DatabaseShardingState::get(_db).checkDbVersion(opCtx);
diff --git a/src/mongo/db/catalog_raii.h b/src/mongo/db/catalog_raii.h
index 081e566e7de..1d0b1e9238f 100644
--- a/src/mongo/db/catalog_raii.h
+++ b/src/mongo/db/catalog_raii.h
@@ -40,7 +40,8 @@ namespace mongo {
/**
* RAII-style class, which acquires a lock on the specified database in the requested mode and
- * obtains a reference to the database. Used as a shortcut for calls to dbHolder().get().
+ * obtains a reference to the database. Used as a shortcut for calls to
+ * DatabaseHolder::getDatabaseHolder().get().
*
* Use this when you want to do a database-level operation, like read a list of all collections, or
* drop a collection.
@@ -152,9 +153,9 @@ private:
/**
* RAII-style class, which acquires a lock on the specified database in the requested mode and
* obtains a reference to the database, creating it was non-existing. Used as a shortcut for
- * calls to dbHolder().openDb(), taking care of locking details. The requested mode must be
- * MODE_IX or MODE_X. If the database needs to be created, the lock will automatically be
- * reacquired as MODE_X.
+ * calls to DatabaseHolder::getDatabaseHolder().openDb(), taking care of locking details. The
+ * requested mode must be MODE_IX or MODE_X. If the database needs to be created, the lock will
+ * automatically be reacquired as MODE_X.
*
* Use this when you are about to perform a write, and want to create the database if it doesn't
* already exist.
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 4c1971b8585..8af7e4f452d 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -1,6 +1,3 @@
-
-// cloner.cpp - copy a database (export/import basically)
-
/**
* Copyright (C) 2008 10gen Inc.
*
@@ -156,7 +153,7 @@ struct Cloner::Fun {
repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, to_collection));
// Make sure database still exists after we resume from the temp release
- Database* db = dbHolder().openDb(opCtx, _dbName);
+ Database* db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, _dbName);
bool createdCollection = false;
Collection* collection = NULL;
@@ -172,13 +169,14 @@ struct Cloner::Fun {
WriteUnitOfWork wunit(opCtx);
const bool createDefaultIndexes = true;
- Status s = userCreateNS(opCtx,
- db,
- to_collection.toString(),
- from_options,
- CollectionOptions::parseForCommand,
- createDefaultIndexes,
- fixIndexSpec(to_collection.db().toString(), from_id_index));
+ Status s = Database::userCreateNS(
+ opCtx,
+ db,
+ to_collection.toString(),
+ from_options,
+ CollectionOptions::parseForCommand,
+ createDefaultIndexes,
+ fixIndexSpec(to_collection.db().toString(), from_id_index));
verify(s.isOK());
wunit.commit();
collection = db->getCollection(opCtx, to_collection);
@@ -214,7 +212,7 @@ struct Cloner::Fun {
}
// TODO: SERVER-16598 abort if original db or collection is gone.
- db = dbHolder().get(opCtx, _dbName);
+ db = DatabaseHolder::getDatabaseHolder().get(opCtx, _dbName);
uassert(28593,
str::stream() << "Database " << _dbName << " dropped while cloning",
db != NULL);
@@ -372,7 +370,7 @@ void Cloner::copyIndexes(OperationContext* opCtx,
// We are under lock here again, so reload the database in case it may have disappeared
// during the temp release
- Database* db = dbHolder().openDb(opCtx, toDBName);
+ Database* db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, toDBName);
Collection* collection = db->getCollection(opCtx, to_collection);
if (!collection) {
@@ -381,7 +379,7 @@ void Cloner::copyIndexes(OperationContext* opCtx,
WriteUnitOfWork wunit(opCtx);
const bool createDefaultIndexes = true;
- Status s = userCreateNS(
+ Status s = Database::userCreateNS(
opCtx,
db,
to_collection.toString(),
@@ -479,7 +477,7 @@ bool Cloner::copyCollection(OperationContext* opCtx,
!opCtx->writesAreReplicated() ||
repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss));
- Database* db = dbHolder().openDb(opCtx, dbname);
+ Database* db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbname);
if (shouldCreateCollection) {
bool result = writeConflictRetry(opCtx, "createCollection", ns, [&] {
@@ -487,7 +485,7 @@ bool Cloner::copyCollection(OperationContext* opCtx,
WriteUnitOfWork wunit(opCtx);
const bool createDefaultIndexes = true;
- Status status = userCreateNS(
+ Status status = Database::userCreateNS(
opCtx, db, ns, options, optionsParser, createDefaultIndexes, idIndexSpec);
if (!status.isOK()) {
errmsg = status.toString();
@@ -567,7 +565,7 @@ Status Cloner::createCollectionsForDb(
const std::vector<CreateCollectionParams>& createCollectionParams,
const std::string& dbName,
const CloneOptions& opts) {
- Database* db = dbHolder().openDb(opCtx, dbName);
+ Database* db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName);
invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_X));
auto collCount = 0;
@@ -639,13 +637,13 @@ Status Cloner::createCollectionsForDb(
auto options = optionsBuilder.obj();
Status createStatus =
- userCreateNS(opCtx,
- db,
- nss.ns(),
- options,
- CollectionOptions::parseForStorage,
- createDefaultIndexes,
- fixIndexSpec(nss.db().toString(), params.idIndexSpec));
+ Database::userCreateNS(opCtx,
+ db,
+ nss.ns(),
+ options,
+ CollectionOptions::parseForStorage,
+ createDefaultIndexes,
+ fixIndexSpec(nss.db().toString(), params.idIndexSpec));
if (!createStatus.isOK()) {
return createStatus;
}
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index eb3b197278f..bf1edf90884 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -269,9 +269,9 @@ public:
str::stream() << "Not primary while creating indexes in " << ns.ns()));
}
- Database* db = dbHolder().get(opCtx, ns.db());
+ Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, ns.db());
if (!db) {
- db = dbHolder().openDb(opCtx, ns.db());
+ db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, ns.db());
}
DatabaseShardingState::get(db).checkDbVersion(opCtx);
@@ -393,7 +393,7 @@ public:
str::stream() << "Not primary while completing index build in " << dbname,
repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, ns));
- Database* db = dbHolder().get(opCtx, ns.db());
+ Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, ns.db());
if (db) {
DatabaseShardingState::get(db).checkDbVersion(opCtx);
}
diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp
index 34e2ed22f5b..ed753c7392a 100644
--- a/src/mongo/db/commands/dbcommands.cpp
+++ b/src/mongo/db/commands/dbcommands.cpp
@@ -210,7 +210,7 @@ public:
// Closing a database requires a global lock.
Lock::GlobalWrite lk(opCtx);
- auto db = dbHolder().get(opCtx, dbname);
+ auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, dbname);
if (db) {
if (db->isDropPending(opCtx)) {
return CommandHelpers::appendCommandStatus(
@@ -221,7 +221,8 @@ public:
}
} else {
// If the name doesn't make an exact match, check for a case insensitive match.
- std::set<std::string> otherCasing = dbHolder().getNamesWithConflictingCasing(dbname);
+ std::set<std::string> otherCasing =
+ DatabaseHolder::getDatabaseHolder().getNamesWithConflictingCasing(dbname);
if (otherCasing.empty()) {
// Database doesn't exist. Treat this as a success (historical behavior).
return true;
@@ -255,7 +256,7 @@ public:
opCtx, engine, dbname, preserveClonedFilesOnFailure, backupOriginalFiles);
// Open database before returning
- dbHolder().openDb(opCtx, dbname);
+ DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbname);
return CommandHelpers::appendCommandStatus(result, status);
}
} cmdRepairDatabase;
diff --git a/src/mongo/db/commands/dbcommands_d.cpp b/src/mongo/db/commands/dbcommands_d.cpp
index a0b4be7ebd1..90ab6c70bfa 100644
--- a/src/mongo/db/commands/dbcommands_d.cpp
+++ b/src/mongo/db/commands/dbcommands_d.cpp
@@ -128,7 +128,7 @@ protected:
if (!db) {
// When setting the profiling level, create the database if it didn't already exist.
// When just reading the profiling level, we do not create the database.
- db = dbHolder().openDb(opCtx, dbName);
+ db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName);
}
uassertStatusOK(db->setProfilingLevel(opCtx, profilingLevel));
}
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index cc0902e9c4e..41c3e01c696 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -481,8 +481,8 @@ public:
if (!collection) {
uassertStatusOK(userAllowedCreateNS(nsString.db(), nsString.coll()));
WriteUnitOfWork wuow(opCtx);
- uassertStatusOK(
- userCreateNS(opCtx, autoDb->getDb(), nsString.ns(), BSONObj()));
+ uassertStatusOK(Database::userCreateNS(
+ opCtx, autoDb->getDb(), nsString.ns(), BSONObj()));
wuow.commit();
collection = autoDb->getDb()->getCollection(opCtx, nsString);
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 476fd6b5d3e..c743a951ba5 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -393,7 +393,8 @@ void State::dropTempCollections() {
writeConflictRetry(_opCtx, "M/R dropTempCollections", _config.incLong.ns(), [this] {
Lock::DBLock lk(_opCtx, _config.incLong.db(), MODE_X);
- if (Database* db = dbHolder().get(_opCtx, _config.incLong.ns())) {
+ if (Database* db =
+ DatabaseHolder::getDatabaseHolder().get(_opCtx, _config.incLong.ns())) {
WriteUnitOfWork wunit(_opCtx);
uassertStatusOK(db->dropCollection(_opCtx, _config.incLong.ns()));
wunit.commit();
@@ -653,7 +654,7 @@ unsigned long long _collectionCount(OperationContext* opCtx,
// If the global write lock is held, we must avoid using AutoGetCollectionForReadCommand as it
// may lead to deadlock when waiting for a majority snapshot to be committed. See SERVER-24596.
if (callerHoldsGlobalLock) {
- Database* db = dbHolder().get(opCtx, nss.ns());
+ Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, nss.ns());
if (db) {
coll = db->getCollection(opCtx, nss);
}
diff --git a/src/mongo/db/commands/resize_oplog.cpp b/src/mongo/db/commands/resize_oplog.cpp
index d82c4321695..726cf043b5d 100644
--- a/src/mongo/db/commands/resize_oplog.cpp
+++ b/src/mongo/db/commands/resize_oplog.cpp
@@ -87,7 +87,7 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss("local", "oplog.rs");
Lock::GlobalWrite global(opCtx);
- Database* database = dbHolder().get(opCtx, nss.db());
+ Database* database = DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db());
if (!database) {
return CommandHelpers::appendCommandStatus(
result, Status(ErrorCodes::NamespaceNotFound, "database local does not exist"));
diff --git a/src/mongo/db/commands/restart_catalog_command.cpp b/src/mongo/db/commands/restart_catalog_command.cpp
index e8a37330d48..fd954626f10 100644
--- a/src/mongo/db/commands/restart_catalog_command.cpp
+++ b/src/mongo/db/commands/restart_catalog_command.cpp
@@ -95,7 +95,7 @@ public:
std::vector<std::string> allDbs;
getGlobalServiceContext()->getGlobalStorageEngine()->listDatabases(&allDbs);
for (auto&& dbName : allDbs) {
- const auto db = dbHolder().get(opCtx, dbName);
+ const auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, dbName);
if (db->isDropPending(opCtx)) {
return CommandHelpers::appendCommandStatus(
result,
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index dd1843e160c..5c4019e18d2 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -222,7 +222,7 @@ void logStartup(OperationContext* opCtx) {
if (!collection) {
BSONObj options = BSON("capped" << true << "size" << 10 * 1024 * 1024);
repl::UnreplicatedWritesBlock uwb(opCtx);
- uassertStatusOK(userCreateNS(opCtx, db, startupLogCollectionName.ns(), options));
+ uassertStatusOK(Database::userCreateNS(opCtx, db, startupLogCollectionName.ns(), options));
collection = db->getCollection(opCtx, startupLogCollectionName);
}
invariant(collection);
diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp
index 36c61edb4c9..3ff4649584a 100644
--- a/src/mongo/db/db_raii.cpp
+++ b/src/mongo/db/db_raii.cpp
@@ -269,7 +269,8 @@ AutoGetCollectionForReadCommand::AutoGetCollectionForReadCommand(
}
OldClientContext::OldClientContext(OperationContext* opCtx, const std::string& ns, bool doVersion)
- : OldClientContext(opCtx, ns, doVersion, dbHolder().get(opCtx, ns), false) {}
+ : OldClientContext(
+ opCtx, ns, doVersion, DatabaseHolder::getDatabaseHolder().get(opCtx, ns), false) {}
OldClientContext::OldClientContext(
OperationContext* opCtx, const std::string& ns, bool doVersion, Database* db, bool justCreated)
@@ -277,7 +278,7 @@ OldClientContext::OldClientContext(
if (!_db) {
const auto dbName = nsToDatabaseSubstring(ns);
invariant(_opCtx->lockState()->isDbLockedForMode(dbName, MODE_X));
- _db = dbHolder().openDb(_opCtx, dbName, &_justCreated);
+ _db = DatabaseHolder::getDatabaseHolder().openDb(_opCtx, dbName, &_justCreated);
invariant(_db);
}
diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp
index ab927cfc54b..d915fc6590b 100644
--- a/src/mongo/db/index_builder.cpp
+++ b/src/mongo/db/index_builder.cpp
@@ -98,7 +98,7 @@ void IndexBuilder::run() {
Lock::DBLock dlk(opCtx.get(), ns.db(), MODE_X);
OldClientContext ctx(opCtx.get(), ns.getSystemIndexesCollection());
- Database* db = dbHolder().get(opCtx.get(), ns.db().toString());
+ Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx.get(), ns.db().toString());
Status status = _build(opCtx.get(), db, true, &dlk);
if (!status.isOK()) {
@@ -210,7 +210,7 @@ Status IndexBuilder::_build(OperationContext* opCtx,
if (allowBackgroundBuilding) {
dbLock->relockWithMode(MODE_X);
- Database* reloadDb = dbHolder().get(opCtx, ns.db());
+ Database* reloadDb = DatabaseHolder::getDatabaseHolder().get(opCtx, ns.db());
fassert(28553, reloadDb);
fassert(28554, reloadDb->getCollection(opCtx, ns));
}
diff --git a/src/mongo/db/keypattern.cpp b/src/mongo/db/keypattern.cpp
index ffc2a1f8e71..37c7f69631f 100644
--- a/src/mongo/db/keypattern.cpp
+++ b/src/mongo/db/keypattern.cpp
@@ -1,5 +1,3 @@
-// @file keypattern.cpp
-
/**
* Copyright (C) 2012 10gen Inc.
*
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index a92d5753d27..c572c9c65a1 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -660,7 +660,7 @@ void OpObserverImpl::onCollMod(OperationContext* opCtx,
// catalog are all present and equal, unless the collection is system.indexes or
// system.namespaces (see SERVER-29926, SERVER-30095).
invariant(opCtx->lockState()->isDbLockedForMode(nss.db(), MODE_X));
- Database* db = dbHolder().get(opCtx, nss.db());
+ Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db());
// Some unit tests call the op observer on an unregistered Database.
if (!db) {
return;
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 7edfd3d09d4..c401349d2c5 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -212,7 +212,7 @@ void makeCollection(OperationContext* opCtx, const NamespaceString& ns) {
if (!db.getDb()->getCollection(opCtx, ns)) { // someone else may have beat us to it.
uassertStatusOK(userAllowedCreateNS(ns.db(), ns.coll()));
WriteUnitOfWork wuow(opCtx);
- uassertStatusOK(userCreateNS(opCtx, db.getDb(), ns.ns(), BSONObj()));
+ uassertStatusOK(Database::userCreateNS(opCtx, db.getDb(), ns.ns(), BSONObj()));
wuow.commit();
}
});
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index 54d56cfb313..2deec30fd5f 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -446,7 +446,7 @@ std::shared_ptr<CappedInsertNotifier> PlanExecutor::getCappedInsertNotifier() {
// We can only wait if we have a collection; otherwise we should retry immediately when
// we hit EOF.
dassert(_opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IS));
- auto db = dbHolder().get(_opCtx, _nss.db());
+ auto db = DatabaseHolder::getDatabaseHolder().get(_opCtx, _nss.db());
invariant(db);
auto collection = db->getCollection(_opCtx, _nss);
invariant(collection);
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index 93773d7fdc4..7c106ec1d62 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -266,14 +266,14 @@ Status repairDatabase(OperationContext* opCtx,
}
// Close the db and invalidate all current users and caches.
- dbHolder().close(opCtx, dbName, "database closed for repair");
+ DatabaseHolder::getDatabaseHolder().close(opCtx, dbName, "database closed for repair");
ON_BLOCK_EXIT([&dbName, &opCtx] {
try {
// Ensure that we don't trigger an exception when attempting to take locks.
UninterruptibleLockGuard noInterrupt(opCtx->lockState());
// Open the db after everything finishes.
- auto db = dbHolder().openDb(opCtx, dbName);
+ auto db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName);
// Set the minimum snapshot for all Collections in this db. This ensures that readers
// using majority readConcern level can only use the collections after their repaired
diff --git a/src/mongo/db/repair_database_and_check_version.cpp b/src/mongo/db/repair_database_and_check_version.cpp
index 90f5872adc6..7a9a50e215e 100644
--- a/src/mongo/db/repair_database_and_check_version.cpp
+++ b/src/mongo/db/repair_database_and_check_version.cpp
@@ -78,11 +78,11 @@ Status restoreMissingFeatureCompatibilityVersionDocument(OperationContext* opCtx
// If the admin database, which contains the server configuration collection with the
// featureCompatibilityVersion document, does not exist, create it.
- Database* db = dbHolder().get(opCtx, fcvNss.db());
+ Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, fcvNss.db());
if (!db) {
log() << "Re-creating admin database that was dropped.";
}
- db = dbHolder().openDb(opCtx, fcvNss.db());
+ db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, fcvNss.db());
invariant(db);
// If the server configuration collection, which contains the FCV document, does not exist, then
@@ -138,7 +138,7 @@ Status ensureAllCollectionsHaveUUIDs(OperationContext* opCtx,
bool isMmapV1 = opCtx->getServiceContext()->getGlobalStorageEngine()->isMmapV1();
std::vector<NamespaceString> nonReplicatedCollNSSsWithoutUUIDs;
for (const auto& dbName : dbNames) {
- Database* db = dbHolder().openDb(opCtx, dbName);
+ Database* db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName);
invariant(db);
for (auto collectionIt = db->begin(); collectionIt != db->end(); ++collectionIt) {
Collection* coll = *collectionIt;
@@ -346,7 +346,7 @@ StatusWith<bool> repairDatabasesAndCheckVersion(OperationContext* opCtx) {
// Attempt to restore the featureCompatibilityVersion document if it is missing.
NamespaceString fcvNSS(NamespaceString::kServerConfigurationNamespace);
- Database* db = dbHolder().get(opCtx, fcvNSS.db());
+ Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, fcvNSS.db());
Collection* versionColl;
BSONObj featureCompatibilityVersion;
if (!db || !(versionColl = db->getCollection(opCtx, fcvNSS)) ||
@@ -377,7 +377,7 @@ StatusWith<bool> repairDatabasesAndCheckVersion(OperationContext* opCtx) {
// it is fine to not open the "local" database and populate the catalog entries because we
// won't attempt to drop the temporary collections anyway.
Lock::DBLock dbLock(opCtx, kSystemReplSetCollection.db(), MODE_X);
- dbHolder().openDb(opCtx, kSystemReplSetCollection.db());
+ DatabaseHolder::getDatabaseHolder().openDb(opCtx, kSystemReplSetCollection.db());
}
const repl::ReplSettings& replSettings =
@@ -405,7 +405,7 @@ StatusWith<bool> repairDatabasesAndCheckVersion(OperationContext* opCtx) {
}
LOG(1) << " Recovering database: " << dbName;
- Database* db = dbHolder().openDb(opCtx, dbName);
+ Database* db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName);
invariant(db);
// First thing after opening the database is to check for file compatibility,
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index 3a732c407ec..81ebf365c62 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -137,7 +137,7 @@ Status _applyOps(OperationContext* opCtx,
invariant(opCtx->lockState()->isW());
invariant(*opType != 'c');
- auto db = dbHolder().get(opCtx, nss.ns());
+ auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, nss.ns());
if (!db) {
// Retry in non-atomic mode, since MMAP cannot implicitly create a new database
// within an active WriteUnitOfWork.
@@ -339,7 +339,7 @@ Status _checkPrecondition(OperationContext* opCtx,
BSONObj realres = db.findOne(nss.ns(), preCondition["q"].Obj());
// Get collection default collation.
- Database* database = dbHolder().get(opCtx, nss.db());
+ Database* database = DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db());
if (!database) {
return {ErrorCodes::NamespaceNotFound, "database in ns does not exist: " + nss.ns()};
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 6e083990f4e..2c6f4395590 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -227,7 +227,7 @@ void createIndexForApplyOps(OperationContext* opCtx,
IncrementOpsAppliedStatsFn incrementOpsAppliedStats,
OplogApplication::Mode mode) {
// Check if collection exists.
- Database* db = dbHolder().get(opCtx, indexNss.ns());
+ Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, indexNss.ns());
auto indexCollection = db ? db->getCollection(opCtx, indexNss) : nullptr;
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Failed to create index due to missing collection: " << indexNss.ns(),
@@ -1516,7 +1516,7 @@ Status applyCommand_inlock(OperationContext* opCtx,
return {ErrorCodes::InvalidNamespace, "invalid ns: " + std::string(nss.ns())};
}
{
- Database* db = dbHolder().get(opCtx, nss.ns());
+ Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, nss.ns());
if (db && !db->getCollection(opCtx, nss) && db->getViewCatalog()->lookup(opCtx, nss.ns())) {
return {ErrorCodes::CommandNotSupportedOnView,
str::stream() << "applyOps not supported on view:" << nss.ns()};
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 4095ca06ef5..d87cf4f73f8 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -394,7 +394,7 @@ Status ReplicationCoordinatorExternalStateImpl::runRepairOnLocalDB(OperationCont
Status status = repairDatabase(opCtx, engine, localDbName, false, false);
// Open database before returning
- dbHolder().openDb(opCtx, localDbName);
+ DatabaseHolder::getDatabaseHolder().openDb(opCtx, localDbName);
} catch (const DBException& ex) {
return ex.toStatus();
}
@@ -787,7 +787,7 @@ void ReplicationCoordinatorExternalStateImpl::_dropAllTempCollections(OperationC
if (*it == "local")
continue;
LOG(2) << "Removing temporary collections from " << *it;
- Database* db = dbHolder().get(opCtx, *it);
+ Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, *it);
// Since we must be holding the global lock during this function, if listDatabases
// returned this dbname, we should be able to get a reference to it - it can't have
// been dropped.
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index ed65f5a0150..e6831304a46 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -857,7 +857,7 @@ void RollbackImpl::_resetDropPendingState(OperationContext* opCtx) {
opCtx->getServiceContext()->getGlobalStorageEngine()->listDatabases(&dbNames);
for (const auto& dbName : dbNames) {
Lock::DBLock dbLock(opCtx, dbName, MODE_X);
- Database* db = dbHolder().openDb(opCtx, dbName);
+ Database* db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName);
checkForIdIndexesAndDropPendingCollections(opCtx, db);
}
}
diff --git a/src/mongo/db/repl/rollback_test_fixture.cpp b/src/mongo/db/repl/rollback_test_fixture.cpp
index 2e9c4f47889..b0d49b1473a 100644
--- a/src/mongo/db/repl/rollback_test_fixture.cpp
+++ b/src/mongo/db/repl/rollback_test_fixture.cpp
@@ -102,8 +102,8 @@ void RollbackTest::tearDown() {
SessionCatalog::get(_serviceContextMongoDTest.getServiceContext())->reset_forTest();
// We cannot unset the global replication coordinator because ServiceContextMongoD::tearDown()
- // calls dropAllDatabasesExceptLocal() which requires the replication coordinator to clear all
- // snapshots.
+ // calls Databse::dropAllDatabasesExceptLocal() which requires the replication coordinator to
+ // clear all snapshots.
_serviceContextMongoDTest.tearDown();
// ServiceContextMongoD::tearDown() does not destroy service context so it is okay
@@ -179,7 +179,7 @@ Collection* RollbackTest::_createCollection(OperationContext* opCtx,
const CollectionOptions& options) {
Lock::DBLock dbLock(opCtx, nss.db(), MODE_X);
mongo::WriteUnitOfWork wuow(opCtx);
- auto db = dbHolder().openDb(opCtx, nss.db());
+ auto db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, nss.db());
ASSERT_TRUE(db);
db->dropCollection(opCtx, nss.ns()).transitional_ignore();
auto coll = db->createCollection(opCtx, nss.ns(), options);
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 3e9944988ba..eae8c70a468 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -844,7 +844,7 @@ void rollbackRenameCollection(OperationContext* opCtx, UUID uuid, RenameCollecti
log() << "Attempting to rename collection with UUID: " << uuid << ", from: " << info.renameFrom
<< ", to: " << info.renameTo;
Lock::DBLock dbLock(opCtx, dbName, MODE_X);
- auto db = dbHolder().openDb(opCtx, dbName);
+ auto db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName);
invariant(db);
auto status = renameCollectionForRollback(opCtx, info.renameTo, uuid);
@@ -1138,7 +1138,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
Lock::DBLock dbLock(opCtx, nss.db(), MODE_X);
- auto db = dbHolder().openDb(opCtx, nss.db().toString());
+ auto db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, nss.db().toString());
invariant(db);
Collection* collection = UUIDCatalog::get(opCtx).lookupCollectionByUUID(uuid);
diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp
index b4858df9cba..93a47ec8044 100644
--- a/src/mongo/db/repl/rs_rollback_test.cpp
+++ b/src/mongo/db/repl/rs_rollback_test.cpp
@@ -357,7 +357,7 @@ int _testRollbackDelete(OperationContext* opCtx,
Lock::DBLock dbLock(opCtx, "test", MODE_S);
Lock::CollectionLock collLock(opCtx->lockState(), "test.t", MODE_S);
- auto db = dbHolder().get(opCtx, "test");
+ auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, "test");
ASSERT_TRUE(db);
auto collection = db->getCollection(opCtx, "test.t");
if (!collection) {
@@ -1777,7 +1777,7 @@ TEST_F(RSRollbackTest, RollbackCreateCollectionCommand) {
_replicationProcess.get()));
{
Lock::DBLock dbLock(_opCtx.get(), "test", MODE_S);
- auto db = dbHolder().get(_opCtx.get(), "test");
+ auto db = DatabaseHolder::getDatabaseHolder().get(_opCtx.get(), "test");
ASSERT_TRUE(db);
ASSERT_FALSE(db->getCollection(_opCtx.get(), "test.t"));
}
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index e4a53344305..cd9dbcc1fd1 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -370,7 +370,7 @@ Status StorageInterfaceImpl::insertDocuments(OperationContext* opCtx,
}
Status StorageInterfaceImpl::dropReplicatedDatabases(OperationContext* opCtx) {
- dropAllDatabasesExceptLocal(opCtx);
+ Database::dropAllDatabasesExceptLocal(opCtx);
return Status::OK();
}
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 1dd997ce5a8..b70f3c56793 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -542,7 +542,7 @@ private:
CollectionProperties collProperties;
Lock::DBLock dbLock(opCtx, nsToDatabaseSubstring(ns), MODE_IS);
- auto db = dbHolder().get(opCtx, ns);
+ auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, ns);
if (!db) {
return collProperties;
}
diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp
index 3740407529a..d995e4f61e2 100644
--- a/src/mongo/db/repl/sync_tail_test.cpp
+++ b/src/mongo/db/repl/sync_tail_test.cpp
@@ -193,7 +193,7 @@ auto createCollectionWithUuid(OperationContext* opCtx, const NamespaceString& ns
void createDatabase(OperationContext* opCtx, StringData dbName) {
Lock::GlobalWrite globalLock(opCtx);
bool justCreated;
- Database* db = dbHolder().openDb(opCtx, dbName, &justCreated);
+ Database* db = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName, &justCreated);
ASSERT_TRUE(db);
ASSERT_TRUE(justCreated);
}
@@ -991,7 +991,8 @@ TEST_F(SyncTailTest, MultiInitialSyncApplyIgnoresUpdateOperationIfDocumentIsMiss
{
Lock::GlobalWrite globalLock(_opCtx.get());
bool justCreated = false;
- Database* db = dbHolder().openDb(_opCtx.get(), nss.db(), &justCreated);
+ Database* db =
+ DatabaseHolder::getDatabaseHolder().openDb(_opCtx.get(), nss.db(), &justCreated);
ASSERT_TRUE(db);
ASSERT_TRUE(justCreated);
}
diff --git a/src/mongo/db/s/implicit_create_collection.cpp b/src/mongo/db/s/implicit_create_collection.cpp
index c15fc331d41..0ad6e536658 100644
--- a/src/mongo/db/s/implicit_create_collection.cpp
+++ b/src/mongo/db/s/implicit_create_collection.cpp
@@ -93,7 +93,7 @@ public:
// Take the DBLock and CollectionLock directly rather than using AutoGetCollection
// (which calls AutoGetDb) to avoid doing database and shard version checks.
Lock::DBLock dbLock(opCtx, _ns.db(), MODE_IS);
- const auto db = dbHolder().get(opCtx, _ns.db());
+ const auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, _ns.db());
if (db) {
Lock::CollectionLock collLock(opCtx->lockState(), _ns.ns(), MODE_IS);
if (db->getCollection(opCtx, _ns.ns())) {
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 06721587c68..b1545848bd5 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -677,13 +677,13 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* opCtx,
// options.
WriteUnitOfWork wuow(opCtx);
const bool createDefaultIndexes = true;
- Status status = userCreateNS(opCtx,
- db,
- _nss.ns(),
- donorOptions,
- CollectionOptions::parseForStorage,
- createDefaultIndexes,
- donorIdIndexSpec);
+ Status status = Database::userCreateNS(opCtx,
+ db,
+ _nss.ns(),
+ donorOptions,
+ CollectionOptions::parseForStorage,
+ createDefaultIndexes,
+ donorIdIndexSpec);
if (!status.isOK()) {
warning() << "failed to create collection [" << _nss << "] "
<< " with options " << donorOptions << ": " << redact(status);
diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
index be2d5ad9db5..90c776cae58 100644
--- a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
+++ b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
@@ -213,7 +213,7 @@ void forceDatabaseRefresh(OperationContext* opCtx, const StringData dbName) {
// Take the DBLock directly rather than using AutoGetDb, to prevent a recursive call
// into checkDbVersion().
Lock::DBLock dbLock(opCtx, dbName, MODE_IS);
- const auto db = dbHolder().get(opCtx, dbName);
+ const auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, dbName);
if (!db) {
log() << "Database " << dbName
<< " has been dropped; not caching the refreshed databaseVersion";
@@ -234,7 +234,7 @@ void forceDatabaseRefresh(OperationContext* opCtx, const StringData dbName) {
// The cached version is older than the refreshed version; update the cached version.
Lock::DBLock dbLock(opCtx, dbName, MODE_X);
- const auto db = dbHolder().get(opCtx, dbName);
+ const auto db = DatabaseHolder::getDatabaseHolder().get(opCtx, dbName);
if (!db) {
log() << "Database " << dbName
<< " has been dropped; not caching the refreshed databaseVersion";
diff --git a/src/mongo/db/service_context_d_test_fixture.cpp b/src/mongo/db/service_context_d_test_fixture.cpp
index d42641381e9..df3811421d4 100644
--- a/src/mongo/db/service_context_d_test_fixture.cpp
+++ b/src/mongo/db/service_context_d_test_fixture.cpp
@@ -99,7 +99,7 @@ void ServiceContextMongoDTest::_doTest() {
}
void ServiceContextMongoDTest::_dropAllDBs(OperationContext* opCtx) {
- dropAllDatabasesExceptLocal(opCtx);
+ Database::dropAllDatabasesExceptLocal(opCtx);
Lock::GlobalWrite lk(opCtx);
AutoGetDb autoDBLocal(opCtx, "local", MODE_X);
@@ -111,10 +111,11 @@ void ServiceContextMongoDTest::_dropAllDBs(OperationContext* opCtx) {
});
}
- // dropAllDatabasesExceptLocal() does not close empty databases. However the holder still
- // allocates resources to track these empty databases. These resources not released by
- // dropAllDatabasesExceptLocal() will be leaked at exit unless we call DatabaseHolder::closeAll.
- dbHolder().closeAll(opCtx, "all databases dropped");
+ // Database::dropAllDatabasesExceptLocal() does not close empty databases. However the holder
+ // still allocates resources to track these empty databases. These resources not released by
+ // Database::dropAllDatabasesExceptLocal() will be leaked at exit unless we call
+ // DatabaseHolder::closeAll.
+ DatabaseHolder::getDatabaseHolder().closeAll(opCtx, "all databases dropped");
}
} // namespace mongo
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl_test.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl_test.cpp
index 49cbc6d1762..700628c4331 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl_test.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl_test.cpp
@@ -1,5 +1,3 @@
-// ephemeral_for_test_btree_impl_test.cpp
-
/**
* Copyright (C) 2014 MongoDB Inc.
*
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine_test.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine_test.cpp
index 949cdc5f670..154daa7c9cd 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine_test.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine_test.cpp
@@ -1,5 +1,3 @@
-// ephemeral_for_test_engine_test.cpp
-
/**
* Copyright (C) 2014 MongoDB Inc.
*
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_init.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_init.cpp
index b52507c20df..1c5fff52e8b 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_init.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_init.cpp
@@ -1,5 +1,3 @@
-// ephemeral_for_test_init.cpp
-
/**
* Copyright (C) 2014 MongoDB Inc.
*
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp
index 29f97fd98d6..602311e4ce4 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store_test.cpp
@@ -1,5 +1,3 @@
-// ephemeral_for_test_record_store_test.cpp
-
/**
* Copyright (C) 2014 MongoDB Inc.
*
diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
index 4ca06132c0c..e0eebfaa73d 100644
--- a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
+++ b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
@@ -1,5 +1,3 @@
-// kv_engine_test_harness.cpp
-
/**
* Copyright (C) 2014 MongoDB Inc.
*
diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.h b/src/mongo/db/storage/kv/kv_engine_test_harness.h
index 592e11b2a32..64edb26da44 100644
--- a/src/mongo/db/storage/kv/kv_engine_test_harness.h
+++ b/src/mongo/db/storage/kv/kv_engine_test_harness.h
@@ -1,5 +1,3 @@
-// kv_engine_test_harness.h
-
/**
* Copyright (C) 2014 MongoDB Inc.
*
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_interface_test.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_interface_test.cpp
index 59bde328780..b49fd70ec26 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_interface_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_interface_test.cpp
@@ -1,5 +1,3 @@
-// btree_interface_test.cpp
-
/**
* Copyright (C) 2014 MongoDB Inc.
*
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
index 75ca4cda551..31440e91bc5 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
@@ -408,7 +408,7 @@ void MMAPV1DatabaseCatalogEntry::invalidateSystemCollectionRecord(
// violation, but at this point we're not going to add more MMAPv1 specific interfaces.
StringData dbName = systemCollectionNamespace.db();
invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_X));
- Database* db = dbHolder().get(opCtx, dbName);
+ Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, dbName);
Collection* systemCollection = db->getCollection(opCtx, systemCollectionNamespace);
systemCollection->getCursorManager()->invalidateDocument(opCtx, record, INVALIDATION_DELETION);
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp
index dff4fab6f08..3a1e71fad40 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp
@@ -1,5 +1,3 @@
-// mmap_v1_record_store_test.cpp
-
/**
* Copyright (C) 2014 MongoDB Inc.
*
diff --git a/src/mongo/db/storage/mmap_v1/repair_database.cpp b/src/mongo/db/storage/mmap_v1/repair_database.cpp
index 73114b71362..416ff14063e 100644
--- a/src/mongo/db/storage/mmap_v1/repair_database.cpp
+++ b/src/mongo/db/storage/mmap_v1/repair_database.cpp
@@ -312,7 +312,7 @@ Status MMAPV1Engine::repairDatabase(OperationContext* opCtx,
new RepairFileDeleter(opCtx, dbName, reservedPathString, reservedPath));
{
- Database* originalDatabase = dbHolder().openDb(opCtx, dbName);
+ Database* originalDatabase = DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName);
if (originalDatabase == NULL) {
return Status(ErrorCodes::NamespaceNotFound, "database does not exist to repair");
}
@@ -454,7 +454,7 @@ Status MMAPV1Engine::repairDatabase(OperationContext* opCtx,
repairFileDeleter->success();
// Close the database so we can rename/delete the original data files
- dbHolder().close(opCtx, dbName, "database closed for repair");
+ DatabaseHolder::getDatabaseHolder().close(opCtx, dbName, "database closed for repair");
if (backupOriginalFiles) {
_renameForBackup(dbName, reservedPath);
@@ -480,7 +480,7 @@ Status MMAPV1Engine::repairDatabase(OperationContext* opCtx,
}
// Reopen the database so it's discoverable
- dbHolder().openDb(opCtx, dbName);
+ DatabaseHolder::getDatabaseHolder().openDb(opCtx, dbName);
return Status::OK();
}
diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.h b/src/mongo/db/storage/sorted_data_interface_test_harness.h
index e6f9443fd23..5966755397e 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_harness.h
+++ b/src/mongo/db/storage/sorted_data_interface_test_harness.h
@@ -1,5 +1,3 @@
-// sorted_data_interface_test_harness.h
-
/**
* Copyright (C) 2014 MongoDB Inc.
*
@@ -136,4 +134,4 @@ inline void removeFromIndex(unowned_ptr<HarnessHelper> harness,
inline std::unique_ptr<SortedDataInterfaceHarnessHelper> newSortedDataInterfaceHarnessHelper() {
return dynamic_ptr_cast<SortedDataInterfaceHarnessHelper>(newHarnessHelper());
}
-}
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
index fbf55d41b3c..f5ac70627cc 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
@@ -1,5 +1,3 @@
-// wiredtiger_kv_engine_test.cpp
-
/**
* Copyright (C) 2014 MongoDB Inc.
*
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp
index c42744ff769..50d0b2a2548 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_record_store_test.cpp
@@ -251,4 +251,4 @@ TEST(WiredTigerRecordStoreTest, PrefixedSeekingCursor) {
}
} // namespace
-} // mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp
index 9650fe4a021..4041187ce36 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp
@@ -451,4 +451,4 @@ TEST_F(SizeStorerValidateTest, InvalidSizeStorerAtCreation) {
}
} // namespace
-} // mongo
+} // namespace mongo
diff --git a/src/mongo/db/views/durable_view_catalog.cpp b/src/mongo/db/views/durable_view_catalog.cpp
index 270bd1ce9c0..199c201f07c 100644
--- a/src/mongo/db/views/durable_view_catalog.cpp
+++ b/src/mongo/db/views/durable_view_catalog.cpp
@@ -54,7 +54,7 @@ namespace mongo {
void DurableViewCatalog::onExternalChange(OperationContext* opCtx, const NamespaceString& name) {
dassert(opCtx->lockState()->isDbLockedForMode(name.db(), MODE_IX));
- Database* db = dbHolder().get(opCtx, name.db());
+ Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, name.db());
if (db) {
opCtx->recoveryUnit()->onCommit([db]() { db->getViewCatalog()->invalidate(); });
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index 6aa07cb3414..e7723e1bfa4 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -1,6 +1,3 @@
-// namespacetests.cpp : namespace.{h,cpp} unit tests.
-//
-
/**
* Copyright (C) 2008-2014 MongoDB Inc.
*
@@ -550,7 +547,7 @@ public:
Lock::DBLock lk(&opCtx, dbName, MODE_X);
bool justCreated;
- Database* db = dbHolder().openDb(&opCtx, dbName, &justCreated);
+ Database* db = DatabaseHolder::getDatabaseHolder().openDb(&opCtx, dbName, &justCreated);
ASSERT(justCreated);
Collection* committedColl;
@@ -594,7 +591,7 @@ public:
Lock::DBLock lk(&opCtx, dbName, MODE_X);
bool justCreated;
- Database* db = dbHolder().openDb(&opCtx, dbName, &justCreated);
+ Database* db = DatabaseHolder::getDatabaseHolder().openDb(&opCtx, dbName, &justCreated);
ASSERT(justCreated);
{
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index d020971fecd..c626a272b9f 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -78,7 +78,7 @@ public:
void dropCollection() {
Lock::DBLock dbLock(&_opCtx, nss.db(), MODE_X);
- Database* database = dbHolder().get(&_opCtx, nss.db());
+ Database* database = DatabaseHolder::getDatabaseHolder().get(&_opCtx, nss.db());
if (!database) {
return;
}
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 9da211526f4..3dd7e253f55 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -1252,12 +1252,12 @@ public:
// a bit.
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT(userCreateNS(&_opCtx,
- ctx.db(),
- ns(),
- fromjson("{ capped : true, size : 2000, max: 10000 }"),
- CollectionOptions::parseForCommand,
- false)
+ ASSERT(Database::userCreateNS(&_opCtx,
+ ctx.db(),
+ ns(),
+ fromjson("{ capped : true, size : 2000, max: 10000 }"),
+ CollectionOptions::parseForCommand,
+ false)
.isOK());
wunit.commit();
}
diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp
index 3d3523e4268..bade34cb7ea 100644
--- a/src/mongo/dbtests/rollbacktests.cpp
+++ b/src/mongo/dbtests/rollbacktests.cpp
@@ -54,7 +54,7 @@ const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
void dropDatabase(OperationContext* opCtx, const NamespaceString& nss) {
Lock::GlobalWrite globalWriteLock(opCtx);
- Database* db = dbHolder().get(opCtx, nss.db());
+ Database* db = DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db());
if (db) {
Database::dropDatabase(opCtx, db);
@@ -72,7 +72,7 @@ void createCollection(OperationContext* opCtx, const NamespaceString& nss) {
{
WriteUnitOfWork uow(opCtx);
ASSERT(!collectionExists(&ctx, nss.ns()));
- ASSERT_OK(userCreateNS(
+ ASSERT_OK(Database::userCreateNS(
opCtx, ctx.db(), nss.ns(), BSONObj(), CollectionOptions::parseForCommand, false));
ASSERT(collectionExists(&ctx, nss.ns()));
uow.commit();
@@ -85,17 +85,20 @@ Status renameCollection(OperationContext* opCtx,
return renameCollection(opCtx, source, target, {});
}
Status truncateCollection(OperationContext* opCtx, const NamespaceString& nss) {
- Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(opCtx, nss);
+ Collection* coll =
+ DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db())->getCollection(opCtx, nss);
return coll->truncate(opCtx);
}
void insertRecord(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& data) {
- Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(opCtx, nss);
+ Collection* coll =
+ DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db())->getCollection(opCtx, nss);
OpDebug* const nullOpDebug = nullptr;
ASSERT_OK(coll->insertDocument(opCtx, InsertStatement(data), nullOpDebug, false));
}
void assertOnlyRecord(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& data) {
- Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(opCtx, nss);
+ Collection* coll =
+ DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db())->getCollection(opCtx, nss);
auto cursor = coll->getCursor(opCtx);
auto record = cursor->next();
@@ -105,15 +108,18 @@ void assertOnlyRecord(OperationContext* opCtx, const NamespaceString& nss, const
ASSERT(!cursor->next());
}
void assertEmpty(OperationContext* opCtx, const NamespaceString& nss) {
- Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(opCtx, nss);
+ Collection* coll =
+ DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db())->getCollection(opCtx, nss);
ASSERT(!coll->getCursor(opCtx)->next());
}
bool indexExists(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) {
- Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(opCtx, nss);
+ Collection* coll =
+ DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db())->getCollection(opCtx, nss);
return coll->getIndexCatalog()->findIndexByName(opCtx, idxName, true) != NULL;
}
bool indexReady(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) {
- Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(opCtx, nss);
+ Collection* coll =
+ DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db())->getCollection(opCtx, nss);
return coll->getIndexCatalog()->findIndexByName(opCtx, idxName, false) != NULL;
}
size_t getNumIndexEntries(OperationContext* opCtx,
@@ -121,7 +127,8 @@ size_t getNumIndexEntries(OperationContext* opCtx,
const string& idxName) {
size_t numEntries = 0;
- Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(opCtx, nss);
+ Collection* coll =
+ DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db())->getCollection(opCtx, nss);
IndexCatalog* catalog = coll->getIndexCatalog();
IndexDescriptor* desc = catalog->findIndexByName(opCtx, idxName, false);
@@ -137,7 +144,8 @@ size_t getNumIndexEntries(OperationContext* opCtx,
}
void dropIndex(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) {
- Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(opCtx, nss);
+ Collection* coll =
+ DatabaseHolder::getDatabaseHolder().get(opCtx, nss.db())->getCollection(opCtx, nss);
IndexDescriptor* desc = coll->getIndexCatalog()->findIndexByName(opCtx, idxName);
ASSERT(desc);
ASSERT_OK(coll->getIndexCatalog()->dropIndex(opCtx, desc));
@@ -160,7 +168,7 @@ public:
WriteUnitOfWork uow(&opCtx);
ASSERT(!collectionExists(&ctx, ns));
auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj();
- ASSERT_OK(userCreateNS(
+ ASSERT_OK(Database::userCreateNS(
&opCtx, ctx.db(), ns, options, CollectionOptions::parseForCommand, defaultIndexes));
ASSERT(collectionExists(&ctx, ns));
if (!rollback) {
@@ -191,7 +199,7 @@ public:
WriteUnitOfWork uow(&opCtx);
ASSERT(!collectionExists(&ctx, ns));
auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj();
- ASSERT_OK(userCreateNS(
+ ASSERT_OK(Database::userCreateNS(
&opCtx, ctx.db(), ns, options, CollectionOptions::parseForCommand, defaultIndexes));
uow.commit();
}
@@ -236,12 +244,12 @@ public:
ASSERT(!collectionExists(&ctx, source.ns()));
ASSERT(!collectionExists(&ctx, target.ns()));
auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj();
- ASSERT_OK(userCreateNS(&opCtx,
- ctx.db(),
- source.ns(),
- options,
- CollectionOptions::parseForCommand,
- defaultIndexes));
+ ASSERT_OK(Database::userCreateNS(&opCtx,
+ ctx.db(),
+ source.ns(),
+ options,
+ CollectionOptions::parseForCommand,
+ defaultIndexes));
uow.commit();
}
ASSERT(collectionExists(&ctx, source.ns()));
@@ -293,18 +301,18 @@ public:
ASSERT(!collectionExists(&ctx, source.ns()));
ASSERT(!collectionExists(&ctx, target.ns()));
auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj();
- ASSERT_OK(userCreateNS(&opCtx,
- ctx.db(),
- source.ns(),
- options,
- CollectionOptions::parseForCommand,
- defaultIndexes));
- ASSERT_OK(userCreateNS(&opCtx,
- ctx.db(),
- target.ns(),
- options,
- CollectionOptions::parseForCommand,
- defaultIndexes));
+ ASSERT_OK(Database::userCreateNS(&opCtx,
+ ctx.db(),
+ source.ns(),
+ options,
+ CollectionOptions::parseForCommand,
+ defaultIndexes));
+ ASSERT_OK(Database::userCreateNS(&opCtx,
+ ctx.db(),
+ target.ns(),
+ options,
+ CollectionOptions::parseForCommand,
+ defaultIndexes));
insertRecord(&opCtx, source, sourceDoc);
insertRecord(&opCtx, target, targetDoc);
@@ -368,12 +376,12 @@ public:
{
WriteUnitOfWork uow(&opCtx);
ASSERT(!collectionExists(&ctx, nss.ns()));
- ASSERT_OK(userCreateNS(&opCtx,
- ctx.db(),
- nss.ns(),
- BSONObj(),
- CollectionOptions::parseForCommand,
- defaultIndexes));
+ ASSERT_OK(Database::userCreateNS(&opCtx,
+ ctx.db(),
+ nss.ns(),
+ BSONObj(),
+ CollectionOptions::parseForCommand,
+ defaultIndexes));
insertRecord(&opCtx, nss, oldDoc);
uow.commit();
}
@@ -392,12 +400,12 @@ public:
{},
DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops));
ASSERT(!collectionExists(&ctx, nss.ns()));
- ASSERT_OK(userCreateNS(&opCtx,
- ctx.db(),
- nss.ns(),
- BSONObj(),
- CollectionOptions::parseForCommand,
- defaultIndexes));
+ ASSERT_OK(Database::userCreateNS(&opCtx,
+ ctx.db(),
+ nss.ns(),
+ BSONObj(),
+ CollectionOptions::parseForCommand,
+ defaultIndexes));
ASSERT(collectionExists(&ctx, nss.ns()));
insertRecord(&opCtx, nss, newDoc);
assertOnlyRecord(&opCtx, nss, newDoc);
@@ -433,12 +441,12 @@ public:
{
WriteUnitOfWork uow(&opCtx);
- ASSERT_OK(userCreateNS(&opCtx,
- ctx.db(),
- nss.ns(),
- BSONObj(),
- CollectionOptions::parseForCommand,
- defaultIndexes));
+ ASSERT_OK(Database::userCreateNS(&opCtx,
+ ctx.db(),
+ nss.ns(),
+ BSONObj(),
+ CollectionOptions::parseForCommand,
+ defaultIndexes));
ASSERT(collectionExists(&ctx, nss.ns()));
insertRecord(&opCtx, nss, doc);
assertOnlyRecord(&opCtx, nss, doc);
@@ -479,12 +487,12 @@ public:
{
WriteUnitOfWork uow(&opCtx);
- ASSERT_OK(userCreateNS(&opCtx,
- ctx.db(),
- nss.ns(),
- BSONObj(),
- CollectionOptions::parseForCommand,
- defaultIndexes));
+ ASSERT_OK(Database::userCreateNS(&opCtx,
+ ctx.db(),
+ nss.ns(),
+ BSONObj(),
+ CollectionOptions::parseForCommand,
+ defaultIndexes));
ASSERT(collectionExists(&ctx, nss.ns()));
insertRecord(&opCtx, nss, doc);
assertOnlyRecord(&opCtx, nss, doc);
@@ -743,7 +751,7 @@ public:
{
WriteUnitOfWork uow(&opCtx);
ASSERT(!collectionExists(&ctx, nss.ns()));
- ASSERT_OK(userCreateNS(
+ ASSERT_OK(Database::userCreateNS(
&opCtx, ctx.db(), nss.ns(), BSONObj(), CollectionOptions::parseForCommand, false));
ASSERT(collectionExists(&ctx, nss.ns()));
Collection* coll = ctx.db()->getCollection(&opCtx, nss);