summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/db/SConscript25
-rw-r--r--src/mongo/db/catalog/SConscript6
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp7
-rw-r--r--src/mongo/db/catalog/collection.h26
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp216
-rw-r--r--src/mongo/db/catalog/collection_impl.h32
-rw-r--r--src/mongo/db/catalog/collection_mock.h15
-rw-r--r--src/mongo/db/catalog/collection_test.cpp32
-rw-r--r--src/mongo/db/catalog/collection_validation_test.cpp11
-rw-r--r--src/mongo/db/catalog/collection_write_path.cpp224
-rw-r--r--src/mongo/db/catalog/collection_write_path.h27
-rw-r--r--src/mongo/db/catalog/create_collection_test.cpp14
-rw-r--r--src/mongo/db/catalog/database_holder_impl.cpp5
-rw-r--r--src/mongo/db/catalog/index_repair.cpp5
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp9
-rw-r--r--src/mongo/db/catalog/rename_collection_test.cpp10
-rw-r--r--src/mongo/db/catalog/throttle_cursor_test.cpp11
-rw-r--r--src/mongo/db/catalog/validate_state_test.cpp12
-rw-r--r--src/mongo/db/change_stream_change_collection_manager.cpp14
-rw-r--r--src/mongo/db/change_stream_pre_images_collection_manager.cpp11
-rw-r--r--src/mongo/db/cloner.cpp7
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp7
-rw-r--r--src/mongo/db/commands/test_commands.cpp14
-rw-r--r--src/mongo/db/concurrency/SConscript1
-rw-r--r--src/mongo/db/concurrency/deferred_writer.cpp7
-rw-r--r--src/mongo/db/exec/upsert_stage.cpp12
-rw-r--r--src/mongo/db/index_build_entry_helpers.cpp7
-rw-r--r--src/mongo/db/index_builds_coordinator_test.cpp9
-rw-r--r--src/mongo/db/introspect.cpp9
-rw-r--r--src/mongo/db/mongod_main.cpp9
-rw-r--r--src/mongo/db/ops/SConscript1
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_group_test.cpp1
-rw-r--r--src/mongo/db/pipeline/document_source_unwind_test.cpp1
-rw-r--r--src/mongo/db/pipeline/expression_and_test.cpp1
-rw-r--r--src/mongo/db/pipeline/expression_compare_test.cpp1
-rw-r--r--src/mongo/db/pipeline/expression_field_path_test.cpp1
-rw-r--r--src/mongo/db/pipeline/expression_nary_test.cpp1
-rw-r--r--src/mongo/db/pipeline/expression_object_test.cpp1
-rw-r--r--src/mongo/db/pipeline/process_interface/mongo_process_interface.h1
-rw-r--r--src/mongo/db/query/SConscript1
-rw-r--r--src/mongo/db/query/ce/SConscript1
-rw-r--r--src/mongo/db/query/ce/stats_cache_loader_test.cpp12
-rw-r--r--src/mongo/db/query/find.cpp1
-rw-r--r--src/mongo/db/query/sbe_stage_builder_lookup_test.cpp24
-rw-r--r--src/mongo/db/repl/SConscript5
-rw-r--r--src/mongo/db/repl/collection_bulk_loader_impl.cpp4
-rw-r--r--src/mongo/db/repl/mock_repl_coord_server_fixture.cpp16
-rw-r--r--src/mongo/db/repl/oplog.cpp17
-rw-r--r--src/mongo/db/repl/oplog_applier_impl.cpp8
-rw-r--r--src/mongo/db/repl/rollback_test_fixture.cpp8
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp9
-rw-r--r--src/mongo/db/repl/storage_interface_impl_test.cpp14
-rw-r--r--src/mongo/db/repl/storage_timestamp_test.cpp22
-rw-r--r--src/mongo/db/s/SConscript1
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy.h2
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp1
-rw-r--r--src/mongo/db/s/config/config_server_test_fixture.cpp1
-rw-r--r--src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp5
-rw-r--r--src/mongo/db/s/migration_destination_manager_legacy_commands.cpp3
-rw-r--r--src/mongo/db/s/resharding/resharding_data_copy_util.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_data_replication_test.cpp5
-rw-r--r--src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp3
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_application.cpp23
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp8
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp15
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp8
-rw-r--r--src/mongo/db/s/shard_filtering_metadata_refresh.h1
-rw-r--r--src/mongo/db/s/shard_metadata_util.cpp1
-rw-r--r--src/mongo/db/s/sharding_initialization_op_observer_test.cpp6
-rw-r--r--src/mongo/db/startup_recovery.cpp10
-rw-r--r--src/mongo/db/transaction/SConscript1
-rw-r--r--src/mongo/db/transaction/transaction_participant.cpp8
-rw-r--r--src/mongo/db/transaction/transaction_participant_test.cpp8
-rw-r--r--src/mongo/db/views/SConscript1
-rw-r--r--src/mongo/db/views/durable_view_catalog.cpp9
-rw-r--r--src/mongo/dbtests/SConscript3
-rw-r--r--src/mongo/dbtests/counttests.cpp12
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp16
-rw-r--r--src/mongo/dbtests/dbtests.h1
-rw-r--r--src/mongo/dbtests/deferred_writer.cpp4
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp65
-rw-r--r--src/mongo/dbtests/jsontests.cpp8
-rw-r--r--src/mongo/dbtests/jstests.cpp150
-rw-r--r--src/mongo/dbtests/multikey_paths_test.cpp31
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp18
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp41
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp11
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp67
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp21
-rw-r--r--src/mongo/dbtests/query_stage_ixscan.cpp12
-rw-r--r--src/mongo/dbtests/query_stage_near.cpp31
-rw-r--r--src/mongo/dbtests/querytests.cpp13
-rw-r--r--src/mongo/dbtests/repltests.cpp27
-rw-r--r--src/mongo/dbtests/rollbacktests.cpp60
-rw-r--r--src/mongo/dbtests/validate_tests.cpp464
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp70
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_test.cpp3
-rw-r--r--src/mongo/s/chunk.h2
-rw-r--r--src/mongo/s/chunk_test.cpp4
-rw-r--r--src/mongo/s/chunk_version_test.cpp1
-rw-r--r--src/mongo/s/shard_util.h1
102 files changed, 1061 insertions, 1154 deletions
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index e3cc201d6c9..ccd8ef57ea8 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -556,6 +556,7 @@ env.Library(
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/db/catalog/catalog_helpers',
'$BUILD_DIR/mongo/db/catalog/clustered_collection_options',
+ '$BUILD_DIR/mongo/db/catalog/collection_crud',
'$BUILD_DIR/mongo/db/change_stream_pre_images_collection_manager',
'$BUILD_DIR/mongo/db/concurrency/exception_util',
'$BUILD_DIR/mongo/db/dbhelpers',
@@ -574,7 +575,7 @@ env.Library(
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/db/catalog/catalog_helpers',
'$BUILD_DIR/mongo/db/catalog/clustered_collection_options',
- '$BUILD_DIR/mongo/db/catalog/collection',
+ '$BUILD_DIR/mongo/db/catalog/collection_crud',
'$BUILD_DIR/mongo/db/catalog_raii',
'$BUILD_DIR/mongo/db/change_stream_options_manager',
'$BUILD_DIR/mongo/db/concurrency/exception_util',
@@ -1180,6 +1181,7 @@ env.Library(
],
LIBDEPS_PRIVATE=[
"$BUILD_DIR/mongo/base",
+ "$BUILD_DIR/mongo/db/catalog/collection_crud",
"$BUILD_DIR/mongo/db/catalog/commit_quorum_options",
"$BUILD_DIR/mongo/db/catalog/index_build_entry_idl",
"$BUILD_DIR/mongo/db/catalog/local_oplog_info",
@@ -1194,10 +1196,10 @@ env.Library(
)
env.Library(
- target="cloner",
+ target='cloner',
source=[
- "cloner.cpp",
- "cloner.idl",
+ 'cloner.cpp',
+ 'cloner.idl',
],
LIBDEPS=[
'$BUILD_DIR/mongo/base',
@@ -1208,13 +1210,14 @@ env.Library(
],
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/client/authentication',
- '$BUILD_DIR/mongo/db/concurrency/exception_util',
- '$BUILD_DIR/mongo/db/ops/write_ops',
- '$BUILD_DIR/mongo/db/s/sharding_api_d',
+ 'catalog/collection_crud',
'catalog/database_holder',
'commands/list_collections_filter',
+ 'concurrency/exception_util',
'index_builds_coordinator_interface',
'multitenancy',
+ 'ops/write_ops',
+ 's/sharding_api_d',
],
)
@@ -1235,6 +1238,7 @@ env.Library(
"introspect.cpp",
],
LIBDEPS_PRIVATE=[
+ "$BUILD_DIR/mongo/db/catalog/collection_crud",
"$BUILD_DIR/mongo/db/catalog/collection_options",
"$BUILD_DIR/mongo/db/concurrency/exception_util",
"$BUILD_DIR/mongo/db/stats/resource_consumption_metrics",
@@ -1511,6 +1515,7 @@ env.Library(
'update/update_driver',
],
LIBDEPS_PRIVATE=[
+ '$BUILD_DIR/mongo/db/catalog/collection_crud',
'$BUILD_DIR/mongo/db/catalog/database_holder',
'$BUILD_DIR/mongo/db/catalog/local_oplog_info',
'$BUILD_DIR/mongo/db/commands/server_status_core',
@@ -1526,12 +1531,13 @@ env.Library(
)
env.Library(
- target="startup_recovery",
+ target='startup_recovery',
source=[
- "startup_recovery.cpp",
+ 'startup_recovery.cpp',
],
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/db/catalog/catalog_helpers',
+ '$BUILD_DIR/mongo/db/catalog/collection_crud',
'$BUILD_DIR/mongo/db/catalog/database_holder',
'$BUILD_DIR/mongo/db/catalog/multi_index_block',
'$BUILD_DIR/mongo/db/commands/mongod_fcv',
@@ -2394,6 +2400,7 @@ env.Library(
# please add that library as a private libdep of
# mongod_initializers.
'$BUILD_DIR/mongo/client/clientdriver_minimal',
+ '$BUILD_DIR/mongo/db/catalog/collection_crud',
'$BUILD_DIR/mongo/db/change_stream_change_collection_manager',
'$BUILD_DIR/mongo/db/change_stream_options_manager',
'$BUILD_DIR/mongo/db/change_streams_cluster_parameter',
diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript
index d0d5332d9c0..b982222cb12 100644
--- a/src/mongo/db/catalog/SConscript
+++ b/src/mongo/db/catalog/SConscript
@@ -126,6 +126,7 @@ env.Library(
'$BUILD_DIR/mongo/db/curop',
'$BUILD_DIR/mongo/db/index/index_access_method',
'$BUILD_DIR/mongo/db/storage/key_string',
+ 'collection_crud',
'validate_state',
],
)
@@ -229,12 +230,15 @@ env.Library(
'collection_write_path.cpp',
],
LIBDEPS_PRIVATE=[
+ '$BUILD_DIR/mongo/db/concurrency/lock_manager',
+ '$BUILD_DIR/mongo/db/curop',
'$BUILD_DIR/mongo/db/record_id_helpers',
'$BUILD_DIR/mongo/db/repl/repl_coordinator_interface',
'$BUILD_DIR/mongo/db/storage/record_store_base',
'$BUILD_DIR/mongo/db/storage/write_unit_of_work',
'$BUILD_DIR/mongo/util/fail_point',
'collection',
+ 'document_validation',
'local_oplog_info',
],
)
@@ -528,6 +532,7 @@ env.Library(
],
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/base',
+ '$BUILD_DIR/mongo/db/catalog/collection_crud',
'$BUILD_DIR/mongo/db/concurrency/exception_util',
'$BUILD_DIR/mongo/db/db_raii',
'$BUILD_DIR/mongo/db/index/index_access_method',
@@ -673,6 +678,7 @@ if wiredtiger:
],
LIBDEPS=[
'$BUILD_DIR/mongo/db/auth/authmocks',
+ '$BUILD_DIR/mongo/db/catalog/collection_crud',
'$BUILD_DIR/mongo/db/commands/test_commands_enabled',
'$BUILD_DIR/mongo/db/concurrency/lock_manager',
'$BUILD_DIR/mongo/db/db_raii',
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index 29260b921e3..459698aa67c 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -31,6 +31,7 @@
#include "mongo/base/error_codes.h"
#include "mongo/db/catalog/collection_catalog.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/create_collection.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/catalog/drop_collection.h"
@@ -54,7 +55,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand
-
namespace mongo {
Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionName) {
@@ -230,7 +230,6 @@ void cloneCollectionAsCapped(OperationContext* opCtx,
}
WriteUnitOfWork wunit(opCtx);
- OpDebug* const nullOpDebug = nullptr;
InsertStatement insertStmt(objToClone);
@@ -245,8 +244,8 @@ void cloneCollectionAsCapped(OperationContext* opCtx,
insertStmt.oplogSlot = oplogSlots.front();
}
- uassertStatusOK(toCollection->insertDocument(
- opCtx, InsertStatement(objToClone), nullOpDebug, true));
+ uassertStatusOK(collection_internal::insertDocument(
+ opCtx, toCollection, InsertStatement(objToClone), nullptr /* OpDebug */, true));
wunit.commit();
// Go to the next document
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 084d3341b26..e5bf833ad20 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -29,7 +29,6 @@
#pragma once
-#include <cstdint>
#include <functional>
#include <memory>
#include <string>
@@ -38,7 +37,6 @@
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
#include "mongo/base/string_data.h"
-#include "mongo/bson/mutable/damage_vector.h"
#include "mongo/bson/timestamp.h"
#include "mongo/db/catalog/collection_operation_source.h"
#include "mongo/db/catalog/collection_options.h"
@@ -335,30 +333,6 @@ public:
StoreDeletedDoc storeDeletedDoc = StoreDeletedDoc::Off,
CheckRecordId checkRecordId = CheckRecordId::Off) const = 0;
- /*
- * Inserts all documents inside one WUOW.
- * Caller should ensure vector is appropriately sized for this.
- * If any errors occur (including WCE), caller should retry documents individually.
- *
- * 'opDebug' Optional argument. When not null, will be used to record operation statistics.
- */
- virtual Status insertDocuments(OperationContext* opCtx,
- std::vector<InsertStatement>::const_iterator begin,
- std::vector<InsertStatement>::const_iterator end,
- OpDebug* opDebug,
- bool fromMigrate = false) const = 0;
-
- /**
- * this does NOT modify the doc before inserting
- * i.e. will not add an _id field for documents that are missing it
- *
- * 'opDebug' Optional argument. When not null, will be used to record operation statistics.
- */
- virtual Status insertDocument(OperationContext* opCtx,
- const InsertStatement& doc,
- OpDebug* opDebug,
- bool fromMigrate = false) const = 0;
-
/**
* Updates the document @ oldLocation with newDoc.
*
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index 4919aa63f34..a66ba8a7063 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -29,24 +29,15 @@
#include "mongo/db/catalog/collection_impl.h"
-#include "mongo/base/counter.h"
-#include "mongo/base/init.h"
#include "mongo/bson/ordering.h"
#include "mongo/bson/simple_bsonelement_comparator.h"
#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/crypto/fle_crypto.h"
-#include "mongo/db/catalog/capped_collection_maintenance.h"
-#include "mongo/db/catalog/collection_catalog.h"
-#include "mongo/db/catalog/collection_options.h"
-#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/catalog/index_catalog_impl.h"
-#include "mongo/db/catalog/index_consistency.h"
#include "mongo/db/catalog/index_key_validate.h"
#include "mongo/db/catalog/local_oplog_info.h"
#include "mongo/db/catalog/uncommitted_multikey.h"
-#include "mongo/db/clientcursor.h"
-#include "mongo/db/commands/server_status_metric.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/exception_util.h"
#include "mongo/db/curop.h"
@@ -60,30 +51,20 @@
#include "mongo/db/matcher/implicit_validator.h"
#include "mongo/db/op_observer/op_observer.h"
#include "mongo/db/operation_context.h"
-#include "mongo/db/ops/update_request.h"
#include "mongo/db/query/collation/collator_factory_interface.h"
#include "mongo/db/query/collation/collator_interface.h"
#include "mongo/db/query/collection_query_info.h"
#include "mongo/db/query/internal_plans.h"
-#include "mongo/db/record_id_helpers.h"
#include "mongo/db/repl/oplog.h"
-#include "mongo/db/repl/repl_server_parameters_gen.h"
-#include "mongo/db/repl/replication_coordinator.h"
-#include "mongo/db/server_options.h"
-#include "mongo/db/server_recovery.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/durable_catalog.h"
-#include "mongo/db/storage/key_string.h"
-#include "mongo/db/storage/record_store.h"
#include "mongo/db/storage/storage_parameters_gen.h"
#include "mongo/db/timeseries/timeseries_constants.h"
#include "mongo/db/timeseries/timeseries_extended_range.h"
#include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h"
#include "mongo/db/transaction/transaction_participant.h"
#include "mongo/db/ttl_collection_cache.h"
-#include "mongo/db/update/update_driver.h"
#include "mongo/logv2/log.h"
-#include "mongo/rpc/object_check.h"
#include "mongo/util/fail_point.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage
@@ -91,23 +72,10 @@
namespace mongo {
namespace {
-// Used to pause after inserting collection data and calling the opObservers. Inserts to
-// replicated collections that are not part of a multi-statement transaction will have generated
-// their OpTime and oplog entry. Supports parameters to limit pause by namespace and by _id
-// of first data item in an insert (must be of type string):
-// data: {
-// collectionNS: <fully-qualified collection namespace>,
-// first_id: <string>
-// }
-MONGO_FAIL_POINT_DEFINE(hangAfterCollectionInserts);
-
// This fail point allows collections to be given malformed validator. A malformed validator
// will not (and cannot) be enforced but it will be persisted.
MONGO_FAIL_POINT_DEFINE(allowSettingMalformedCollectionValidators);
-// This fail point introduces corruption to documents during insert.
-MONGO_FAIL_POINT_DEFINE(corruptDocumentOnInsert);
-
MONGO_FAIL_POINT_DEFINE(skipCappedDeletes);
// Uses the collator factory to convert the BSON representation of a collator to a
@@ -695,190 +663,6 @@ Collection::Validator CollectionImpl::parseValidator(
return Collection::Validator{validator, std::move(expCtx), std::move(combinedMatchExpr)};
}
-Status CollectionImpl::insertDocuments(OperationContext* opCtx,
- const std::vector<InsertStatement>::const_iterator begin,
- const std::vector<InsertStatement>::const_iterator end,
- OpDebug* opDebug,
- bool fromMigrate) const {
- auto status = collection_internal::checkFailCollectionInsertsFailPoint(
- _ns, (begin != end ? begin->doc : BSONObj()));
- if (!status.isOK()) {
- return status;
- }
-
- // Should really be done in the collection object at creation and updated on index create.
- const bool hasIdIndex = _indexCatalog->findIdIndex(opCtx);
-
- for (auto it = begin; it != end; it++) {
- if (hasIdIndex && it->doc["_id"].eoo()) {
- return Status(ErrorCodes::InternalError,
- str::stream()
- << "Collection::insertDocument got document without _id for ns:"
- << _ns.toString());
- }
-
- auto status = checkValidationAndParseResult(opCtx, it->doc);
- if (!status.isOK()) {
- return status;
- }
-
- auto& validationSettings = DocumentValidationSettings::get(opCtx);
-
- if (getCollectionOptions().encryptedFieldConfig &&
- !validationSettings.isSchemaValidationDisabled() &&
- !validationSettings.isSafeContentValidationDisabled() &&
- it->doc.hasField(kSafeContent)) {
- return Status(ErrorCodes::BadValue,
- str::stream()
- << "Cannot insert a document with field name " << kSafeContent);
- }
- }
-
- const SnapshotId sid = opCtx->recoveryUnit()->getSnapshotId();
-
- status = _insertDocuments(opCtx, begin, end, opDebug, fromMigrate);
- if (!status.isOK()) {
- return status;
- }
- invariant(sid == opCtx->recoveryUnit()->getSnapshotId());
-
- opCtx->recoveryUnit()->onCommit(
- [this](boost::optional<Timestamp>) { getRecordStore()->notifyCappedWaitersIfNeeded(); });
-
- hangAfterCollectionInserts.executeIf(
- [&](const BSONObj& data) {
- const auto& firstIdElem = data["first_id"];
- std::string whenFirst;
- if (firstIdElem) {
- whenFirst += " when first _id is ";
- whenFirst += firstIdElem.str();
- }
- LOGV2(20289,
- "hangAfterCollectionInserts fail point enabled. Blocking "
- "until fail point is disabled.",
- "ns"_attr = _ns,
- "whenFirst"_attr = whenFirst);
- hangAfterCollectionInserts.pauseWhileSet(opCtx);
- },
- [&](const BSONObj& data) {
- const auto& collElem = data["collectionNS"];
- const auto& firstIdElem = data["first_id"];
- // If the failpoint specifies no collection or matches the existing one, hang.
- return (!collElem || _ns.ns() == collElem.str()) &&
- (!firstIdElem ||
- (begin != end && firstIdElem.type() == mongo::String &&
- begin->doc["_id"].str() == firstIdElem.str()));
- });
-
- return Status::OK();
-}
-
-Status CollectionImpl::insertDocument(OperationContext* opCtx,
- const InsertStatement& docToInsert,
- OpDebug* opDebug,
- bool fromMigrate) const {
- std::vector<InsertStatement> docs;
- docs.push_back(docToInsert);
- return insertDocuments(opCtx, docs.begin(), docs.end(), opDebug, fromMigrate);
-}
-
-Status CollectionImpl::_insertDocuments(OperationContext* opCtx,
- const std::vector<InsertStatement>::const_iterator begin,
- const std::vector<InsertStatement>::const_iterator end,
- OpDebug* opDebug,
- bool fromMigrate) const {
- dassert(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_IX));
-
- const size_t count = std::distance(begin, end);
- if (isCapped() && _indexCatalog->haveAnyIndexes() && count > 1) {
- // We require that inserts to indexed capped collections be done one-at-a-time to avoid the
- // possibility that a later document causes an earlier document to be deleted before it can
- // be indexed.
- // TODO SERVER-21512 It would be better to handle this here by just doing single inserts.
- return {ErrorCodes::OperationCannotBeBatched,
- "Can't batch inserts into indexed capped collections"};
- }
-
- if (needsCappedLock()) {
- Lock::ResourceLock heldUntilEndOfWUOW{
- opCtx->lockState(), ResourceId(RESOURCE_METADATA, _ns), MODE_X};
- }
-
- std::vector<Record> records;
- records.reserve(count);
- std::vector<Timestamp> timestamps;
- timestamps.reserve(count);
-
- for (auto it = begin; it != end; it++) {
- const auto& doc = it->doc;
-
- RecordId recordId;
- if (isClustered()) {
- invariant(_shared->_recordStore->keyFormat() == KeyFormat::String);
- recordId = uassertStatusOK(record_id_helpers::keyForDoc(
- doc, getClusteredInfo()->getIndexSpec(), getDefaultCollator()));
- }
-
- if (MONGO_unlikely(corruptDocumentOnInsert.shouldFail())) {
- // Insert a truncated record that is half the expected size of the source document.
- records.emplace_back(
- Record{std::move(recordId), RecordData(doc.objdata(), doc.objsize() / 2)});
- timestamps.emplace_back(it->oplogSlot.getTimestamp());
- continue;
- }
-
- records.emplace_back(Record{std::move(recordId), RecordData(doc.objdata(), doc.objsize())});
- timestamps.emplace_back(it->oplogSlot.getTimestamp());
- }
-
- Status status = _shared->_recordStore->insertRecords(opCtx, &records, timestamps);
- if (!status.isOK())
- return status;
-
- std::vector<BsonRecord> bsonRecords;
- bsonRecords.reserve(count);
- int recordIndex = 0;
- for (auto it = begin; it != end; it++) {
- RecordId loc = records[recordIndex++].id;
- if (_shared->_recordStore->keyFormat() == KeyFormat::Long) {
- invariant(RecordId::minLong() < loc);
- invariant(loc < RecordId::maxLong());
- }
-
- BsonRecord bsonRecord = {
- std::move(loc), Timestamp(it->oplogSlot.getTimestamp()), &(it->doc)};
- bsonRecords.emplace_back(std::move(bsonRecord));
- }
-
- int64_t keysInserted = 0;
- status = _indexCatalog->indexRecords(
- opCtx, {this, CollectionPtr::NoYieldTag{}}, bsonRecords, &keysInserted);
- if (!status.isOK()) {
- return status;
- }
-
- if (opDebug) {
- opDebug->additiveMetrics.incrementKeysInserted(keysInserted);
- // 'opDebug' may be deleted at rollback time in case of multi-document transaction.
- if (!opCtx->inMultiDocumentTransaction()) {
- opCtx->recoveryUnit()->onRollback([opDebug, keysInserted]() {
- opDebug->additiveMetrics.incrementKeysInserted(-keysInserted);
- });
- }
- }
-
- if (!ns().isImplicitlyReplicated()) {
- opCtx->getServiceContext()->getOpObserver()->onInserts(
- opCtx, ns(), uuid(), begin, end, fromMigrate);
- }
-
- // TODO (SERVER-67900): Get rid of the CollectionPtr constructor
- collection_internal::cappedDeleteUntilBelowConfiguredMaximum(
- opCtx, CollectionPtr(this, CollectionPtr::NoYieldTag()), records.begin()->id);
-
- return Status::OK();
-}
-
bool CollectionImpl::needsCappedLock() const {
return _shared->_needCappedLock;
}
diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h
index 7da531c5edb..c93fff30855 100644
--- a/src/mongo/db/catalog/collection_impl.h
+++ b/src/mongo/db/catalog/collection_impl.h
@@ -29,10 +29,8 @@
#pragma once
-#include "mongo/bson/timestamp.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/index_catalog.h"
-#include "mongo/db/concurrency/d_concurrency.h"
namespace mongo {
@@ -181,30 +179,6 @@ public:
Collection::StoreDeletedDoc storeDeletedDoc = Collection::StoreDeletedDoc::Off,
CheckRecordId checkRecordId = CheckRecordId::Off) const final;
- /*
- * Inserts all documents inside one WUOW.
- * Caller should ensure vector is appropriately sized for this.
- * If any errors occur (including WCE), caller should retry documents individually.
- *
- * 'opDebug' Optional argument. When not null, will be used to record operation statistics.
- */
- Status insertDocuments(OperationContext* opCtx,
- std::vector<InsertStatement>::const_iterator begin,
- std::vector<InsertStatement>::const_iterator end,
- OpDebug* opDebug,
- bool fromMigrate = false) const final;
-
- /**
- * this does NOT modify the doc before inserting
- * i.e. will not add an _id field for documents that are missing it
- *
- * 'opDebug' Optional argument. When not null, will be used to record operation statistics.
- */
- Status insertDocument(OperationContext* opCtx,
- const InsertStatement& doc,
- OpDebug* opDebug,
- bool fromMigrate = false) const final;
-
/**
* Updates the document @ oldLocation with newDoc.
*
@@ -459,12 +433,6 @@ public:
bool isCappedAndNeedsDelete(OperationContext* opCtx) const final;
private:
- Status _insertDocuments(OperationContext* opCtx,
- std::vector<InsertStatement>::const_iterator begin,
- std::vector<InsertStatement>::const_iterator end,
- OpDebug* opDebug,
- bool fromMigrate) const;
-
/**
* Writes metadata to the DurableCatalog. Func should have the function signature
* 'void(BSONCollectionCatalogEntry::MetaData&)'
diff --git a/src/mongo/db/catalog/collection_mock.h b/src/mongo/db/catalog/collection_mock.h
index 3844f4dcac3..52e2b5bcf92 100644
--- a/src/mongo/db/catalog/collection_mock.h
+++ b/src/mongo/db/catalog/collection_mock.h
@@ -151,21 +151,6 @@ public:
MONGO_UNREACHABLE;
}
- Status insertDocuments(OperationContext* opCtx,
- std::vector<InsertStatement>::const_iterator begin,
- std::vector<InsertStatement>::const_iterator end,
- OpDebug* opDebug,
- bool fromMigrate) const {
- MONGO_UNREACHABLE;
- }
-
- Status insertDocument(OperationContext* opCtx,
- const InsertStatement& doc,
- OpDebug* opDebug,
- bool fromMigrate) const {
- MONGO_UNREACHABLE;
- }
-
RecordId updateDocument(OperationContext* opCtx,
const RecordId& oldLocation,
const Snapshotted<BSONObj>& oldDoc,
diff --git a/src/mongo/db/catalog/collection_test.cpp b/src/mongo/db/catalog/collection_test.cpp
index 1d3ec05595a..e05bec3c790 100644
--- a/src/mongo/db/catalog/collection_test.cpp
+++ b/src/mongo/db/catalog/collection_test.cpp
@@ -27,16 +27,12 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
-#include <memory>
-
#include "mongo/bson/oid.h"
#include "mongo/db/catalog/capped_utils.h"
#include "mongo/db/catalog/catalog_test_fixture.h"
-#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/collection_mock.h"
#include "mongo/db/catalog/collection_validation.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/repl/storage_interface_impl.h"
#include "mongo/stdx/thread.h"
@@ -44,16 +40,15 @@
#include "mongo/util/assert_util.h"
#include "mongo/util/fail_point.h"
+namespace mongo {
+namespace {
+
#define ASSERT_ID_EQ(EXPR, ID) \
[](boost::optional<Record> record, RecordId id) { \
ASSERT(record); \
ASSERT_EQ(record->id, id); \
}((EXPR), (ID));
-namespace {
-
-using namespace mongo;
-
class CollectionTest : public CatalogTestFixture {
protected:
void makeCapped(NamespaceString nss, long long cappedSize = 8192);
@@ -555,7 +550,8 @@ TEST_F(CatalogTestFixture, CappedDeleteRecord) {
{
WriteUnitOfWork wuow(operationContext());
- ASSERT_OK(coll->insertDocument(operationContext(), InsertStatement(firstDoc), nullptr));
+ ASSERT_OK(collection_internal::insertDocument(
+ operationContext(), coll, InsertStatement(firstDoc), nullptr));
wuow.commit();
}
@@ -564,7 +560,8 @@ TEST_F(CatalogTestFixture, CappedDeleteRecord) {
// Inserting the second document will remove the first one.
{
WriteUnitOfWork wuow(operationContext());
- ASSERT_OK(coll->insertDocument(operationContext(), InsertStatement(secondDoc), nullptr));
+ ASSERT_OK(collection_internal::insertDocument(
+ operationContext(), coll, InsertStatement(secondDoc), nullptr));
wuow.commit();
}
@@ -599,7 +596,8 @@ TEST_F(CatalogTestFixture, CappedDeleteMultipleRecords) {
WriteUnitOfWork wuow(operationContext());
for (int i = 0; i < nToInsertFirst; i++) {
BSONObj doc = BSON("_id" << i);
- ASSERT_OK(coll->insertDocument(operationContext(), InsertStatement(doc), nullptr));
+ ASSERT_OK(collection_internal::insertDocument(
+ operationContext(), coll, InsertStatement(doc), nullptr));
}
wuow.commit();
}
@@ -610,7 +608,8 @@ TEST_F(CatalogTestFixture, CappedDeleteMultipleRecords) {
WriteUnitOfWork wuow(operationContext());
for (int i = nToInsertFirst; i < nToInsertFirst + nToInsertSecond; i++) {
BSONObj doc = BSON("_id" << i);
- ASSERT_OK(coll->insertDocument(operationContext(), InsertStatement(doc), nullptr));
+ ASSERT_OK(collection_internal::insertDocument(
+ operationContext(), coll, InsertStatement(doc), nullptr));
}
wuow.commit();
}
@@ -790,7 +789,8 @@ TEST_F(CollectionTest, CappedCursorRollover) {
WriteUnitOfWork wuow(operationContext());
for (int i = 0; i < numToInsertFirst; ++i) {
const BSONObj doc = BSON("_id" << i);
- ASSERT_OK(coll->insertDocument(operationContext(), InsertStatement(doc), nullptr));
+ ASSERT_OK(collection_internal::insertDocument(
+ operationContext(), coll, InsertStatement(doc), nullptr));
}
wuow.commit();
}
@@ -808,7 +808,8 @@ TEST_F(CollectionTest, CappedCursorRollover) {
WriteUnitOfWork wuow(operationContext());
for (int i = numToInsertFirst; i < numToInsertFirst + 10; ++i) {
const BSONObj doc = BSON("_id" << i);
- ASSERT_OK(coll->insertDocument(operationContext(), InsertStatement(doc), nullptr));
+ ASSERT_OK(collection_internal::insertDocument(
+ operationContext(), coll, InsertStatement(doc), nullptr));
}
wuow.commit();
}
@@ -855,3 +856,4 @@ TEST_F(CatalogTestFixture, CappedCursorYieldFirst) {
}
} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_validation_test.cpp b/src/mongo/db/catalog/collection_validation_test.cpp
index e3b5a1a9742..f0cfa7842c8 100644
--- a/src/mongo/db/catalog/collection_validation_test.cpp
+++ b/src/mongo/db/catalog/collection_validation_test.cpp
@@ -27,21 +27,19 @@
* it in the license file.
*/
-#include "mongo/db/catalog/collection_validation.h"
-
#include "mongo/bson/util/builder.h"
#include "mongo/db/catalog/catalog_test_fixture.h"
-#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_validation.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/index/index_access_method.h"
-#include "mongo/db/operation_context.h" // for UnreplicatedWritesBlock
+#include "mongo/db/operation_context.h"
#include "mongo/stdx/thread.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/bufreader.h"
#include "mongo/util/fail_point.h"
namespace mongo {
-
namespace {
const NamespaceString kNss = NamespaceString("test.t");
@@ -176,7 +174,8 @@ int insertDataRange(OperationContext* opCtx, int startIDNum, int endIDNum) {
{
WriteUnitOfWork wuow(opCtx);
- ASSERT_OK(coll->insertDocuments(opCtx, inserts.begin(), inserts.end(), nullptr, false));
+ ASSERT_OK(collection_internal::insertDocuments(
+ opCtx, *coll, inserts.begin(), inserts.end(), nullptr, false));
wuow.commit();
}
return endIDNum - startIDNum;
diff --git a/src/mongo/db/catalog/collection_write_path.cpp b/src/mongo/db/catalog/collection_write_path.cpp
index 84a2cdebbb1..4d3aeba3899 100644
--- a/src/mongo/db/catalog/collection_write_path.cpp
+++ b/src/mongo/db/catalog/collection_write_path.cpp
@@ -29,7 +29,9 @@
#include "mongo/db/catalog/collection_write_path.h"
+#include "mongo/crypto/fle_crypto.h"
#include "mongo/db/catalog/capped_collection_maintenance.h"
+#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/catalog/local_oplog_info.h"
#include "mongo/db/concurrency/exception_util.h"
#include "mongo/db/op_observer/op_observer.h"
@@ -53,6 +55,129 @@ MONGO_FAIL_POINT_DEFINE(failAfterBulkLoadDocInsert);
// }
MONGO_FAIL_POINT_DEFINE(failCollectionInserts);
+// Used to pause after inserting collection data and calling the opObservers. Inserts to
+// replicated collections that are not part of a multi-statement transaction will have generated
+// their OpTime and oplog entry. Supports parameters to limit pause by namespace and by _id
+// of first data item in an insert (must be of type string):
+// data: {
+// collectionNS: <fully-qualified collection namespace>,
+// first_id: <string>
+// }
+MONGO_FAIL_POINT_DEFINE(hangAfterCollectionInserts);
+
+// This fail point introduces corruption to documents during insert.
+MONGO_FAIL_POINT_DEFINE(corruptDocumentOnInsert);
+
+Status insertDocumentsImpl(OperationContext* opCtx,
+ const CollectionPtr& collection,
+ const std::vector<InsertStatement>::const_iterator begin,
+ const std::vector<InsertStatement>::const_iterator end,
+ OpDebug* opDebug,
+ bool fromMigrate) {
+ const auto& nss = collection->ns();
+ const auto& uuid = collection->uuid();
+
+ dassert(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IX));
+
+ const size_t count = std::distance(begin, end);
+
+ if (collection->isCapped() && collection->getIndexCatalog()->haveAnyIndexes() && count > 1) {
+ // We require that inserts to indexed capped collections be done one-at-a-time to avoid the
+ // possibility that a later document causes an earlier document to be deleted before it can
+ // be indexed.
+ // TODO SERVER-21512 It would be better to handle this here by just doing single inserts.
+ return {ErrorCodes::OperationCannotBeBatched,
+ "Can't batch inserts into indexed capped collections"};
+ }
+
+ if (collection->needsCappedLock()) {
+ // X-lock the metadata resource for this replicated, non-clustered capped collection until
+ // the end of the WUOW. Non-clustered capped collections require writes to be serialized on
+ // the secondary in order to guarantee insertion order (SERVER-21483); this exclusive access
+ // to the metadata resource prevents the primary from executing with more concurrency than
+ // secondaries - thus helping secondaries keep up - and protects '_cappedFirstRecord'. See
+ // SERVER-21646. On the other hand, capped clustered collections with a monotonically
+ // increasing cluster key natively guarantee preservation of the insertion order, and don't
+ // need serialisation. We allow concurrent inserts for clustered capped collections.
+ Lock::ResourceLock heldUntilEndOfWUOW{
+ opCtx->lockState(), ResourceId(RESOURCE_METADATA, nss.ns()), MODE_X};
+ }
+
+ std::vector<Record> records;
+ records.reserve(count);
+ std::vector<Timestamp> timestamps;
+ timestamps.reserve(count);
+
+ for (auto it = begin; it != end; it++) {
+ const auto& doc = it->doc;
+
+ RecordId recordId;
+ if (collection->isClustered()) {
+ invariant(collection->getRecordStore()->keyFormat() == KeyFormat::String);
+ recordId = uassertStatusOK(
+ record_id_helpers::keyForDoc(doc,
+ collection->getClusteredInfo()->getIndexSpec(),
+ collection->getDefaultCollator()));
+ }
+
+ if (MONGO_unlikely(corruptDocumentOnInsert.shouldFail())) {
+ // Insert a truncated record that is half the expected size of the source document.
+ records.emplace_back(
+ Record{std::move(recordId), RecordData(doc.objdata(), doc.objsize() / 2)});
+ timestamps.emplace_back(it->oplogSlot.getTimestamp());
+ continue;
+ }
+
+ records.emplace_back(Record{std::move(recordId), RecordData(doc.objdata(), doc.objsize())});
+ timestamps.emplace_back(it->oplogSlot.getTimestamp());
+ }
+
+ Status status = collection->getRecordStore()->insertRecords(opCtx, &records, timestamps);
+ if (!status.isOK())
+ return status;
+
+ std::vector<BsonRecord> bsonRecords;
+ bsonRecords.reserve(count);
+ int recordIndex = 0;
+ for (auto it = begin; it != end; it++) {
+ RecordId loc = records[recordIndex++].id;
+ if (collection->getRecordStore()->keyFormat() == KeyFormat::Long) {
+ invariant(RecordId::minLong() < loc);
+ invariant(loc < RecordId::maxLong());
+ }
+
+ BsonRecord bsonRecord = {
+ std::move(loc), Timestamp(it->oplogSlot.getTimestamp()), &(it->doc)};
+ bsonRecords.emplace_back(std::move(bsonRecord));
+ }
+
+ int64_t keysInserted = 0;
+ status =
+ collection->getIndexCatalog()->indexRecords(opCtx, collection, bsonRecords, &keysInserted);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ if (opDebug) {
+ opDebug->additiveMetrics.incrementKeysInserted(keysInserted);
+ // 'opDebug' may be deleted at rollback time in case of multi-document transaction.
+ if (!opCtx->inMultiDocumentTransaction()) {
+ opCtx->recoveryUnit()->onRollback([opDebug, keysInserted]() {
+ opDebug->additiveMetrics.incrementKeysInserted(-keysInserted);
+ });
+ }
+ }
+
+ if (!nss.isImplicitlyReplicated()) {
+ opCtx->getServiceContext()->getOpObserver()->onInserts(
+ opCtx, nss, uuid, begin, end, fromMigrate);
+ }
+
+ cappedDeleteUntilBelowConfiguredMaximum(opCtx, collection, records.begin()->id);
+
+ return Status::OK();
+}
+
} // namespace
Status insertDocumentForBulkLoader(OperationContext* opCtx,
@@ -62,7 +187,7 @@ Status insertDocumentForBulkLoader(OperationContext* opCtx,
const auto& nss = collection->ns();
const auto& uuid = collection->uuid();
- auto status = collection_internal::checkFailCollectionInsertsFailPoint(nss, doc);
+ auto status = checkFailCollectionInsertsFailPoint(nss, doc);
if (!status.isOK()) {
return status;
}
@@ -112,7 +237,7 @@ Status insertDocumentForBulkLoader(OperationContext* opCtx,
opCtx->getServiceContext()->getOpObserver()->onInserts(
opCtx, nss, uuid, inserts.begin(), inserts.end(), false);
- collection_internal::cappedDeleteUntilBelowConfiguredMaximum(opCtx, collection, loc.getValue());
+ cappedDeleteUntilBelowConfiguredMaximum(opCtx, collection, loc.getValue());
// Capture the recordStore here instead of the CollectionPtr object itself, because the record
// store's lifetime is controlled by the collection IX lock held on the write paths, whereas the
@@ -125,6 +250,101 @@ Status insertDocumentForBulkLoader(OperationContext* opCtx,
return loc.getStatus();
}
+Status insertDocuments(OperationContext* opCtx,
+ const CollectionPtr& collection,
+ std::vector<InsertStatement>::const_iterator begin,
+ std::vector<InsertStatement>::const_iterator end,
+ OpDebug* opDebug,
+ bool fromMigrate) {
+ const auto& nss = collection->ns();
+
+ auto status = checkFailCollectionInsertsFailPoint(nss, (begin != end ? begin->doc : BSONObj()));
+ if (!status.isOK()) {
+ return status;
+ }
+
+ // Should really be done in the collection object at creation and updated on index create.
+ const bool hasIdIndex = collection->getIndexCatalog()->findIdIndex(opCtx);
+
+ for (auto it = begin; it != end; it++) {
+ if (hasIdIndex && it->doc["_id"].eoo()) {
+ return Status(ErrorCodes::InternalError,
+ str::stream()
+ << "Collection::insertDocument got document without _id for ns:"
+ << nss.toString());
+ }
+
+ auto status = collection->checkValidationAndParseResult(opCtx, it->doc);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ auto& validationSettings = DocumentValidationSettings::get(opCtx);
+
+ if (collection->getCollectionOptions().encryptedFieldConfig &&
+ !validationSettings.isSchemaValidationDisabled() &&
+ !validationSettings.isSafeContentValidationDisabled() &&
+ it->doc.hasField(kSafeContent)) {
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Cannot insert a document with field name " << kSafeContent);
+ }
+ }
+
+ const SnapshotId sid = opCtx->recoveryUnit()->getSnapshotId();
+
+ status = insertDocumentsImpl(opCtx, collection, begin, end, opDebug, fromMigrate);
+ if (!status.isOK()) {
+ return status;
+ }
+ invariant(sid == opCtx->recoveryUnit()->getSnapshotId());
+
+ // Capture the recordStore here instead of the CollectionPtr object itself, because the record
+ // store's lifetime is controlled by the collection IX lock held on the write paths, whereas the
+ // CollectionPtr is just a front to the collection and its lifetime is shorter
+ opCtx->recoveryUnit()->onCommit(
+ [recordStore = collection->getRecordStore()](boost::optional<Timestamp>) {
+ recordStore->notifyCappedWaitersIfNeeded();
+ });
+
+ hangAfterCollectionInserts.executeIf(
+ [&](const BSONObj& data) {
+ const auto& firstIdElem = data["first_id"];
+ std::string whenFirst;
+ if (firstIdElem) {
+ whenFirst += " when first _id is ";
+ whenFirst += firstIdElem.str();
+ }
+ LOGV2(20289,
+ "hangAfterCollectionInserts fail point enabled. Blocking "
+ "until fail point is disabled.",
+ "ns"_attr = nss,
+ "whenFirst"_attr = whenFirst);
+ hangAfterCollectionInserts.pauseWhileSet(opCtx);
+ },
+ [&](const BSONObj& data) {
+ const auto& collElem = data["collectionNS"];
+ const auto& firstIdElem = data["first_id"];
+ // If the failpoint specifies no collection or matches the existing one, hang.
+ return (!collElem || nss.ns() == collElem.str()) &&
+ (!firstIdElem ||
+ (begin != end && firstIdElem.type() == mongo::String &&
+ begin->doc["_id"].str() == firstIdElem.str()));
+ });
+
+ return Status::OK();
+}
+
+Status insertDocument(OperationContext* opCtx,
+ const CollectionPtr& collection,
+ const InsertStatement& doc,
+ OpDebug* opDebug,
+ bool fromMigrate) {
+ std::vector<InsertStatement> docs;
+ docs.push_back(doc);
+ return insertDocuments(opCtx, collection, docs.begin(), docs.end(), opDebug, fromMigrate);
+}
+
Status checkFailCollectionInsertsFailPoint(const NamespaceString& ns, const BSONObj& firstDoc) {
Status s = Status::OK();
failCollectionInserts.executeIf(
diff --git a/src/mongo/db/catalog/collection_write_path.h b/src/mongo/db/catalog/collection_write_path.h
index 5039a48e8ee..084c04893ab 100644
--- a/src/mongo/db/catalog/collection_write_path.h
+++ b/src/mongo/db/catalog/collection_write_path.h
@@ -32,6 +32,7 @@
#include "mongo/base/status.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/db/catalog/collection.h"
+#include "mongo/db/curop.h"
#include "mongo/db/namespace_string.h"
namespace mongo {
@@ -52,6 +53,32 @@ Status insertDocumentForBulkLoader(OperationContext* opCtx,
const OnRecordInsertedFn& onRecordInserted);
/**
+ * Inserts all documents inside one WUOW.
+ * Caller should ensure vector is appropriately sized for this.
+ * If any errors occur (including WCE), caller should retry documents individually.
+ *
+ * 'opDebug' Optional argument. When not null, will be used to record operation statistics.
+ */
+Status insertDocuments(OperationContext* opCtx,
+ const CollectionPtr& collection,
+ std::vector<InsertStatement>::const_iterator begin,
+ std::vector<InsertStatement>::const_iterator end,
+ OpDebug* opDebug,
+ bool fromMigrate = false);
+
+/**
+ * Does NOT modify the doc before inserting (i.e. will not add an _id field for documents that are
+ * missing it)
+ *
+ * 'opDebug' Optional argument. When not null, will be used to record operation statistics.
+ */
+Status insertDocument(OperationContext* opCtx,
+ const CollectionPtr& collection,
+ const InsertStatement& doc,
+ OpDebug* opDebug,
+ bool fromMigrate = false);
+
+/**
* Checks the 'failCollectionInserts' fail point at the beginning of an insert operation to see if
* the insert should fail. Returns Status::OK if The function should proceed with the insertion.
* Otherwise, the function should fail and return early with the error Status.
diff --git a/src/mongo/db/catalog/create_collection_test.cpp b/src/mongo/db/catalog/create_collection_test.cpp
index 8d711c44dc0..0be0942c375 100644
--- a/src/mongo/db/catalog/create_collection_test.cpp
+++ b/src/mongo/db/catalog/create_collection_test.cpp
@@ -27,27 +27,22 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
-#include <memory>
-
#include "mongo/db/catalog/collection_catalog.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/create_collection.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/concurrency/exception_util.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/jsobj.h"
-#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/repl/storage_interface_impl.h"
#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/uuid.h"
+namespace mongo {
namespace {
-using namespace mongo;
-
class CreateCollectionTest : public ServiceContextMongoDTest {
private:
void setUp() override;
@@ -288,9 +283,10 @@ TEST_F(CreateCollectionTest, ValidationDisabledForTemporaryReshardingCollection)
// Ensure a document that violates validator criteria can be inserted into the temporary
// resharding collection.
auto insertObj = fromjson("{'_id':2, a:1}");
- auto status =
- collection->insertDocument(opCtx.get(), InsertStatement(insertObj), nullptr, false);
+ auto status = collection_internal::insertDocument(
+ opCtx.get(), *collection, InsertStatement(insertObj), nullptr, false);
ASSERT_OK(status);
}
} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp
index f267c109b32..712b10c46fd 100644
--- a/src/mongo/db/catalog/database_holder_impl.cpp
+++ b/src/mongo/db/catalog/database_holder_impl.cpp
@@ -27,14 +27,10 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/catalog/database_holder_impl.h"
#include "mongo/db/audit.h"
#include "mongo/db/catalog/collection_catalog.h"
-#include "mongo/db/catalog/collection_impl.h"
#include "mongo/db/catalog/database_impl.h"
#include "mongo/db/concurrency/exception_util.h"
#include "mongo/db/index_builds_coordinator.h"
@@ -47,7 +43,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage
-
namespace mongo {
Database* DatabaseHolderImpl::getDb(OperationContext* opCtx, const DatabaseName& dbName) const {
diff --git a/src/mongo/db/catalog/index_repair.cpp b/src/mongo/db/catalog/index_repair.cpp
index a62d8d81604..8b36157188b 100644
--- a/src/mongo/db/catalog/index_repair.cpp
+++ b/src/mongo/db/catalog/index_repair.cpp
@@ -29,6 +29,7 @@
#include "mongo/db/catalog/index_repair.h"
#include "mongo/base/status_with.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/validate_state.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/concurrency/exception_util.h"
@@ -88,8 +89,8 @@ StatusWith<int> moveRecordToLostAndFound(OperationContext* opCtx,
}
// Write document to lost_and_found collection and delete from original collection.
- Status status =
- localCollection->insertDocument(opCtx, InsertStatement(doc.value()), nullptr);
+ Status status = collection_internal::insertDocument(
+ opCtx, localCollection, InsertStatement(doc.value()), nullptr);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 05c2bc09a4f..01baedf90c1 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -33,6 +33,7 @@
#include "mongo/db/catalog/catalog_helper.h"
#include "mongo/db/catalog/collection_catalog.h"
#include "mongo/db/catalog/collection_uuid_mismatch.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/catalog/drop_collection.h"
@@ -698,8 +699,12 @@ Status renameBetweenDBs(OperationContext* opCtx,
}
OpDebug* const opDebug = nullptr;
- auto status = autoTmpColl->insertDocuments(
- opCtx, stmts.begin(), stmts.end(), opDebug, false /* fromMigrate */);
+ auto status = collection_internal::insertDocuments(opCtx,
+ *autoTmpColl,
+ stmts.begin(),
+ stmts.end(),
+ opDebug,
+ false /* fromMigrate */);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/catalog/rename_collection_test.cpp b/src/mongo/db/catalog/rename_collection_test.cpp
index f28c5c25eb5..7c95e382c85 100644
--- a/src/mongo/db/catalog/rename_collection_test.cpp
+++ b/src/mongo/db/catalog/rename_collection_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include <memory>
#include <set>
#include <string>
@@ -36,6 +34,7 @@
#include "mongo/db/catalog/collection_catalog.h"
#include "mongo/db/catalog/collection_options.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/catalog/rename_collection.h"
@@ -62,10 +61,9 @@
#include "mongo/util/assert_util.h"
#include "mongo/util/str.h"
+namespace mongo {
namespace {
-using namespace mongo;
-
/**
* Mock OpObserver that tracks dropped collections and databases.
* Since this class is used exclusively to test dropDatabase(), we will also check the drop-pending
@@ -499,7 +497,8 @@ void _insertDocument(OperationContext* opCtx, const NamespaceString& nss, const
WriteUnitOfWork wuow(opCtx);
OpDebug* const opDebug = nullptr;
- ASSERT_OK(collection->insertDocument(opCtx, InsertStatement(doc), opDebug));
+ ASSERT_OK(
+ collection_internal::insertDocument(opCtx, *collection, InsertStatement(doc), opDebug));
wuow.commit();
});
}
@@ -1148,3 +1147,4 @@ TEST_F(RenameCollectionTest, FailRenameCollectionToSystemJavascript) {
}
} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/catalog/throttle_cursor_test.cpp b/src/mongo/db/catalog/throttle_cursor_test.cpp
index 02999c2a739..33cd3e9a0b7 100644
--- a/src/mongo/db/catalog/throttle_cursor_test.cpp
+++ b/src/mongo/db/catalog/throttle_cursor_test.cpp
@@ -27,14 +27,11 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/catalog/throttle_cursor.h"
-
#include "mongo/db/catalog/catalog_test_fixture.h"
-#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/catalog/index_catalog_entry.h"
+#include "mongo/db/catalog/throttle_cursor.h"
#include "mongo/db/catalog/validate_gen.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/index/index_access_method.h"
@@ -93,8 +90,8 @@ void ThrottleCursorTest::setUp() {
for (int i = 0; i < 10; i++) {
WriteUnitOfWork wuow(operationContext());
- ASSERT_OK(collection->insertDocument(
- operationContext(), InsertStatement(BSON("_id" << i)), nullOpDebug));
+ ASSERT_OK(collection_internal::insertDocument(
+ operationContext(), *collection, InsertStatement(BSON("_id" << i)), nullOpDebug));
wuow.commit();
}
diff --git a/src/mongo/db/catalog/validate_state_test.cpp b/src/mongo/db/catalog/validate_state_test.cpp
index fa381198b05..a3ccd77ff58 100644
--- a/src/mongo/db/catalog/validate_state_test.cpp
+++ b/src/mongo/db/catalog/validate_state_test.cpp
@@ -27,13 +27,9 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/catalog/validate_state.h"
-
#include "mongo/db/catalog/catalog_test_fixture.h"
-#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_write_path.h"
+#include "mongo/db/catalog/validate_state.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index_builds_coordinator.h"
#include "mongo/db/op_observer/op_observer_impl.h"
@@ -87,8 +83,8 @@ void ValidateStateTest::createCollectionAndPopulateIt(OperationContext* opCtx,
OpDebug* const nullOpDebug = nullptr;
for (int i = 0; i < 10; i++) {
WriteUnitOfWork wuow(opCtx);
- ASSERT_OK(
- collection->insertDocument(opCtx, InsertStatement(BSON("_id" << i)), nullOpDebug));
+ ASSERT_OK(collection_internal::insertDocument(
+ opCtx, *collection, InsertStatement(BSON("_id" << i)), nullOpDebug));
wuow.commit();
}
}
diff --git a/src/mongo/db/change_stream_change_collection_manager.cpp b/src/mongo/db/change_stream_change_collection_manager.cpp
index 9dbf6937b49..0afcb527b9e 100644
--- a/src/mongo/db/change_stream_change_collection_manager.cpp
+++ b/src/mongo/db/change_stream_change_collection_manager.cpp
@@ -29,12 +29,11 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery
-#include "mongo/platform/basic.h"
-
#include "mongo/db/change_stream_change_collection_manager.h"
#include "mongo/db/catalog/clustered_collection_util.h"
#include "mongo/db/catalog/coll_mod.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/create_collection.h"
#include "mongo/db/catalog/drop_collection.h"
#include "mongo/db/catalog_raii.h"
@@ -105,11 +104,12 @@ public:
// Writes to the change collection should not be replicated.
repl::UnreplicatedWritesBlock unReplBlock(opCtx);
- Status status = tenantChangeCollection->insertDocuments(opCtx,
- insertStatements.begin(),
- insertStatements.end(),
- opDebug,
- false /* fromMigrate */);
+ Status status = collection_internal::insertDocuments(opCtx,
+ *tenantChangeCollection,
+ insertStatements.begin(),
+ insertStatements.end(),
+ opDebug,
+ false /* fromMigrate */);
if (!status.isOK()) {
return Status(status.code(),
str::stream()
diff --git a/src/mongo/db/change_stream_pre_images_collection_manager.cpp b/src/mongo/db/change_stream_pre_images_collection_manager.cpp
index a83aa9645f6..3b82e7a7c46 100644
--- a/src/mongo/db/change_stream_pre_images_collection_manager.cpp
+++ b/src/mongo/db/change_stream_pre_images_collection_manager.cpp
@@ -27,13 +27,11 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/change_stream_pre_images_collection_manager.h"
#include "mongo/base/error_codes.h"
#include "mongo/db/catalog/clustered_collection_util.h"
-#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/create_collection.h"
#include "mongo/db/catalog/drop_collection.h"
#include "mongo/db/catalog_raii.h"
@@ -162,8 +160,11 @@ void ChangeStreamPreImagesCollectionManager::insertPreImage(OperationContext* op
"The change stream pre-images collection is not present",
changeStreamPreImagesCollection);
- const auto insertionStatus = changeStreamPreImagesCollection->insertDocument(
- opCtx, InsertStatement{preImage.toBSON()}, &CurOp::get(opCtx)->debug());
+ const auto insertionStatus =
+ collection_internal::insertDocument(opCtx,
+ changeStreamPreImagesCollection,
+ InsertStatement{preImage.toBSON()},
+ &CurOp::get(opCtx)->debug());
tassert(5868601,
str::stream() << "Attempted to insert a duplicate document into the pre-images "
"collection. Pre-image id: "
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 9806d0aa44c..d1ec41e1295 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -39,8 +39,8 @@
#include "mongo/bson/util/bson_extract.h"
#include "mongo/bson/util/builder.h"
#include "mongo/client/authenticate.h"
-#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/collection_options.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/index_catalog.h"
@@ -194,9 +194,8 @@ struct Cloner::BatchHandler {
WriteUnitOfWork wunit(opCtx);
BSONObj doc = tmp;
- OpDebug* const nullOpDebug = nullptr;
- Status status =
- collection->insertDocument(opCtx, InsertStatement(doc), nullOpDebug, true);
+ Status status = collection_internal::insertDocument(
+ opCtx, collection, InsertStatement(doc), nullptr /* OpDebug */, true);
if (!status.isOK() && status.code() != ErrorCodes::DuplicateKey) {
LOGV2_ERROR(20424,
"error: exception cloning object",
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 2bc6b7d9471..c7750690cb5 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -27,11 +27,7 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include <fmt/format.h>
-
#include <memory>
#include <string>
@@ -66,16 +62,13 @@
#include "mongo/db/stats/top.h"
#include "mongo/logv2/log.h"
#include "mongo/rpc/rewrite_state_change_errors.h"
-#include "mongo/s/chunk_version.h"
#include "mongo/util/fail_point.h"
#include "mongo/util/scopeguard.h"
#include "mongo/util/time_support.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery
-
namespace mongo {
-
namespace {
using namespace fmt::literals;
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index d40430c7143..4b3e63cc9dc 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -34,7 +34,7 @@
#include "mongo/base/init.h"
#include "mongo/db/catalog/capped_collection_maintenance.h"
#include "mongo/db/catalog/capped_utils.h"
-#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/commands/test_commands_enabled.h"
@@ -50,11 +50,10 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand
namespace mongo {
-
namespace {
+
const NamespaceString kDurableHistoryTestNss("mdb_testing.pinned_timestamp");
const std::string kTestingDurableHistoryPinName = "_testing";
-} // namespace
using repl::UnreplicatedWritesBlock;
using std::endl;
@@ -111,7 +110,8 @@ public:
}
}
OpDebug* const nullOpDebug = nullptr;
- Status status = collection->insertDocument(opCtx, InsertStatement(obj), nullOpDebug, false);
+ Status status = collection_internal::insertDocument(
+ opCtx, collection, InsertStatement(obj), nullOpDebug, false);
if (status.isOK()) {
wunit.commit();
}
@@ -283,8 +283,9 @@ public:
uassertStatusOK(opCtx->getServiceContext()->getStorageEngine()->pinOldestTimestamp(
opCtx, kTestingDurableHistoryPinName, requestedPinTs, round));
- uassertStatusOK(autoColl->insertDocument(
+ uassertStatusOK(collection_internal::insertDocument(
opCtx,
+ *autoColl,
InsertStatement(fixDocumentForInsert(opCtx, BSON("pinTs" << pinTs)).getValue()),
nullptr));
wuow.commit();
@@ -297,6 +298,8 @@ public:
MONGO_REGISTER_TEST_COMMAND(DurableHistoryReplicatedTestCmd);
+} // namespace
+
std::string TestingDurableHistoryPin::getName() {
return kTestingDurableHistoryPinName;
}
@@ -322,5 +325,4 @@ boost::optional<Timestamp> TestingDurableHistoryPin::calculatePin(OperationConte
return ret;
}
-
} // namespace mongo
diff --git a/src/mongo/db/concurrency/SConscript b/src/mongo/db/concurrency/SConscript
index 26403ec4a90..c5a91b12387 100644
--- a/src/mongo/db/concurrency/SConscript
+++ b/src/mongo/db/concurrency/SConscript
@@ -15,6 +15,7 @@ env.Library(
],
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/db/catalog/catalog_helpers',
+ "$BUILD_DIR/mongo/db/catalog/collection_crud",
'$BUILD_DIR/mongo/db/concurrency/exception_util',
'$BUILD_DIR/mongo/db/db_raii',
'$BUILD_DIR/mongo/util/concurrency/thread_pool',
diff --git a/src/mongo/db/concurrency/deferred_writer.cpp b/src/mongo/db/concurrency/deferred_writer.cpp
index a0866cb1ccb..1c0005d8fa7 100644
--- a/src/mongo/db/concurrency/deferred_writer.cpp
+++ b/src/mongo/db/concurrency/deferred_writer.cpp
@@ -27,8 +27,9 @@
* it in the license file.
*/
-
#include "mongo/db/concurrency/deferred_writer.h"
+
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/create_collection.h"
#include "mongo/db/client.h"
#include "mongo/db/concurrency/exception_util.h"
@@ -40,7 +41,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWrite
-
namespace mongo {
namespace {
@@ -116,7 +116,8 @@ Status DeferredWriter::_worker(InsertStatement stmt) noexcept try {
Status status = writeConflictRetry(opCtx, "deferred insert", _nss.ns(), [&] {
WriteUnitOfWork wuow(opCtx);
- Status status = collection->insertDocument(opCtx, stmt, nullptr, false);
+ Status status =
+ collection_internal::insertDocument(opCtx, collection, stmt, nullptr, false);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/exec/upsert_stage.cpp b/src/mongo/db/exec/upsert_stage.cpp
index a8afd618fd1..5e48a76b826 100644
--- a/src/mongo/db/exec/upsert_stage.cpp
+++ b/src/mongo/db/exec/upsert_stage.cpp
@@ -29,6 +29,7 @@
#include "mongo/db/exec/upsert_stage.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/catalog/local_oplog_info.h"
#include "mongo/db/concurrency/exception_util.h"
@@ -180,11 +181,12 @@ void UpsertStage::_performInsert(BSONObj newDocument) {
insertStmt.oplogSlot = oplogSlots.front();
}
- uassertStatusOK(collection()->insertDocument(opCtx(),
- insertStmt,
- _params.opDebug,
- _params.request->source() ==
- OperationSource::kFromMigrate));
+ uassertStatusOK(collection_internal::insertDocument(opCtx(),
+ collection(),
+ insertStmt,
+ _params.opDebug,
+ _params.request->source() ==
+ OperationSource::kFromMigrate));
// Technically, we should save/restore state here, but since we are going to return
// immediately after, it would just be wasted work.
diff --git a/src/mongo/db/index_build_entry_helpers.cpp b/src/mongo/db/index_build_entry_helpers.cpp
index e5f83f1918b..537eb6916d1 100644
--- a/src/mongo/db/index_build_entry_helpers.cpp
+++ b/src/mongo/db/index_build_entry_helpers.cpp
@@ -27,11 +27,9 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/index_build_entry_helpers.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/commit_quorum_options.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/index_build_entry_gen.h"
@@ -236,8 +234,9 @@ Status addIndexBuildEntry(OperationContext* opCtx, const IndexBuildEntry& indexB
// documents out-of-order into the oplog.
auto oplogInfo = LocalOplogInfo::get(opCtx);
auto oplogSlot = oplogInfo->getNextOpTimes(opCtx, 1U)[0];
- Status status = collection->insertDocument(
+ Status status = collection_internal::insertDocument(
opCtx,
+ *collection,
InsertStatement(kUninitializedStmtId, indexBuildEntry.toBSON(), oplogSlot),
nullptr);
diff --git a/src/mongo/db/index_builds_coordinator_test.cpp b/src/mongo/db/index_builds_coordinator_test.cpp
index eb4e7a7ad20..461174709ad 100644
--- a/src/mongo/db/index_builds_coordinator_test.cpp
+++ b/src/mongo/db/index_builds_coordinator_test.cpp
@@ -27,12 +27,11 @@
* it in the license file.
*/
-#include "mongo/db/index_builds_coordinator.h"
-
#include "mongo/db/catalog/catalog_test_fixture.h"
+#include "mongo/db/catalog/collection_write_path.h"
+#include "mongo/db/index_builds_coordinator.h"
namespace mongo {
-
namespace {
class IndexBuildsCoordinatorTest : public CatalogTestFixture {
@@ -57,8 +56,8 @@ void IndexBuildsCoordinatorTest::createCollectionWithDuplicateDocs(OperationCont
OpDebug* const nullOpDebug = nullptr;
for (int i = 0; i < 10; i++) {
WriteUnitOfWork wuow(opCtx);
- ASSERT_OK(collection->insertDocument(
- opCtx, InsertStatement(BSON("_id" << i << "a" << 1)), nullOpDebug));
+ ASSERT_OK(collection_internal::insertDocument(
+ opCtx, *collection, InsertStatement(BSON("_id" << i << "a" << 1)), nullOpDebug));
wuow.commit();
}
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index 6d4e92bd4da..2851471ae6b 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -27,15 +27,12 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/introspect.h"
#include "mongo/bson/util/builder.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_session.h"
-#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/client.h"
#include "mongo/db/concurrency/exception_util.h"
#include "mongo/db/curop.h"
@@ -48,7 +45,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault
-
namespace mongo {
using std::endl;
@@ -147,7 +143,8 @@ void profile(OperationContext* opCtx, NetworkOp op) {
invariant(!opCtx->shouldParticipateInFlowControl());
WriteUnitOfWork wuow(opCtx);
OpDebug* const nullOpDebug = nullptr;
- uassertStatusOK(coll->insertDocument(opCtx, InsertStatement(p), nullOpDebug, false));
+ uassertStatusOK(collection_internal::insertDocument(
+ opCtx, coll, InsertStatement(p), nullOpDebug, false));
wuow.commit();
} catch (const AssertionException& assertionEx) {
LOGV2_WARNING(20703,
diff --git a/src/mongo/db/mongod_main.cpp b/src/mongo/db/mongod_main.cpp
index 80fe723c81e..976a10d10f0 100644
--- a/src/mongo/db/mongod_main.cpp
+++ b/src/mongo/db/mongod_main.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/mongod_main.h"
#include <boost/filesystem/operations.hpp>
@@ -51,9 +48,9 @@
#include "mongo/db/auth/auth_op_observer.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/sasl_options.h"
-#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/collection_catalog.h"
#include "mongo/db/catalog/collection_impl.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/create_collection.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/database_holder_impl.h"
@@ -295,8 +292,8 @@ void logStartup(OperationContext* opCtx) {
}
invariant(collection);
- OpDebug* const nullOpDebug = nullptr;
- uassertStatusOK(collection->insertDocument(opCtx, InsertStatement(o), nullOpDebug, false));
+ uassertStatusOK(collection_internal::insertDocument(
+ opCtx, collection, InsertStatement(o), nullptr /* OpDebug */, false));
wunit.commit();
}
diff --git a/src/mongo/db/ops/SConscript b/src/mongo/db/ops/SConscript
index d9ff91cd2fc..47f551b863b 100644
--- a/src/mongo/db/ops/SConscript
+++ b/src/mongo/db/ops/SConscript
@@ -38,6 +38,7 @@ env.Library(
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/base',
'$BUILD_DIR/mongo/db/catalog/catalog_helpers',
+ '$BUILD_DIR/mongo/db/catalog/collection_crud',
'$BUILD_DIR/mongo/db/catalog/collection_options',
'$BUILD_DIR/mongo/db/catalog_raii',
'$BUILD_DIR/mongo/db/concurrency/exception_util',
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 2cac79f8cbe..14a94a14e89 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/catalog/collection_operation_source.h"
#include "mongo/db/catalog/collection_options.h"
#include "mongo/db/catalog/collection_uuid_mismatch.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/catalog_raii.h"
@@ -385,8 +386,8 @@ void insertDocuments(OperationContext* opCtx,
return !collElem || collection->ns().ns() == collElem.str();
});
- uassertStatusOK(
- collection->insertDocuments(opCtx, begin, end, &CurOp::get(opCtx)->debug(), fromMigrate));
+ uassertStatusOK(collection_internal::insertDocuments(
+ opCtx, collection, begin, end, &CurOp::get(opCtx)->debug(), fromMigrate));
wuow.commit();
}
@@ -1407,7 +1408,8 @@ Status performAtomicTimeseriesWrites(
}
if (!insertOps.empty()) {
- auto status = coll->insertDocuments(opCtx, inserts.begin(), inserts.end(), &curOp->debug());
+ auto status = collection_internal::insertDocuments(
+ opCtx, *coll, inserts.begin(), inserts.end(), &curOp->debug());
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/pipeline/document_source_group_test.cpp b/src/mongo/db/pipeline/document_source_group_test.cpp
index d3ed11f2031..8867356bc89 100644
--- a/src/mongo/db/pipeline/document_source_group_test.cpp
+++ b/src/mongo/db/pipeline/document_source_group_test.cpp
@@ -54,7 +54,6 @@
#include "mongo/dbtests/dbtests.h"
#include "mongo/stdx/unordered_set.h"
#include "mongo/unittest/temp_dir.h"
-#include "mongo/unittest/unittest.h"
namespace mongo {
diff --git a/src/mongo/db/pipeline/document_source_unwind_test.cpp b/src/mongo/db/pipeline/document_source_unwind_test.cpp
index aa8a550ec06..662ee79d24a 100644
--- a/src/mongo/db/pipeline/document_source_unwind_test.cpp
+++ b/src/mongo/db/pipeline/document_source_unwind_test.cpp
@@ -49,7 +49,6 @@
#include "mongo/db/query/query_test_service_context.h"
#include "mongo/db/service_context.h"
#include "mongo/dbtests/dbtests.h"
-#include "mongo/unittest/unittest.h"
namespace mongo {
namespace {
diff --git a/src/mongo/db/pipeline/expression_and_test.cpp b/src/mongo/db/pipeline/expression_and_test.cpp
index cbfa37a77d1..4451dee15cf 100644
--- a/src/mongo/db/pipeline/expression_and_test.cpp
+++ b/src/mongo/db/pipeline/expression_and_test.cpp
@@ -40,7 +40,6 @@
#include "mongo/db/pipeline/expression_context_for_test.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
#include "mongo/dbtests/dbtests.h"
-#include "mongo/unittest/unittest.h"
namespace mongo {
namespace ExpressionTests {
diff --git a/src/mongo/db/pipeline/expression_compare_test.cpp b/src/mongo/db/pipeline/expression_compare_test.cpp
index 11836d77267..a2ee85a1a65 100644
--- a/src/mongo/db/pipeline/expression_compare_test.cpp
+++ b/src/mongo/db/pipeline/expression_compare_test.cpp
@@ -37,7 +37,6 @@
#include "mongo/db/pipeline/expression.h"
#include "mongo/db/pipeline/expression_context_for_test.h"
#include "mongo/dbtests/dbtests.h"
-#include "mongo/unittest/unittest.h"
namespace mongo {
namespace ExpressionTests {
diff --git a/src/mongo/db/pipeline/expression_field_path_test.cpp b/src/mongo/db/pipeline/expression_field_path_test.cpp
index 9fa73620dfe..8d175025209 100644
--- a/src/mongo/db/pipeline/expression_field_path_test.cpp
+++ b/src/mongo/db/pipeline/expression_field_path_test.cpp
@@ -37,7 +37,6 @@
#include "mongo/db/pipeline/expression.h"
#include "mongo/db/pipeline/expression_context_for_test.h"
#include "mongo/dbtests/dbtests.h"
-#include "mongo/unittest/unittest.h"
namespace mongo {
namespace ExpressionTests {
diff --git a/src/mongo/db/pipeline/expression_nary_test.cpp b/src/mongo/db/pipeline/expression_nary_test.cpp
index b848832c6f9..6f385e5e8e9 100644
--- a/src/mongo/db/pipeline/expression_nary_test.cpp
+++ b/src/mongo/db/pipeline/expression_nary_test.cpp
@@ -40,7 +40,6 @@
#include "mongo/db/pipeline/expression_context_for_test.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
#include "mongo/dbtests/dbtests.h"
-#include "mongo/unittest/unittest.h"
namespace ExpressionTests {
using boost::intrusive_ptr;
diff --git a/src/mongo/db/pipeline/expression_object_test.cpp b/src/mongo/db/pipeline/expression_object_test.cpp
index 7246e9569e3..651568da433 100644
--- a/src/mongo/db/pipeline/expression_object_test.cpp
+++ b/src/mongo/db/pipeline/expression_object_test.cpp
@@ -40,7 +40,6 @@
#include "mongo/db/pipeline/expression_context_for_test.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
#include "mongo/dbtests/dbtests.h"
-#include "mongo/unittest/unittest.h"
namespace mongo {
namespace ExpressionTests {
diff --git a/src/mongo/db/pipeline/process_interface/mongo_process_interface.h b/src/mongo/db/pipeline/process_interface/mongo_process_interface.h
index 9e2f0e96926..3ca0029411f 100644
--- a/src/mongo/db/pipeline/process_interface/mongo_process_interface.h
+++ b/src/mongo/db/pipeline/process_interface/mongo_process_interface.h
@@ -57,7 +57,6 @@
#include "mongo/db/storage/backup_cursor_state.h"
#include "mongo/db/storage/temporary_record_store.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/s/chunk_version.h"
#include "mongo/s/shard_version.h"
namespace mongo {
diff --git a/src/mongo/db/query/SConscript b/src/mongo/db/query/SConscript
index bb5e3047085..362a7e796f4 100644
--- a/src/mongo/db/query/SConscript
+++ b/src/mongo/db/query/SConscript
@@ -415,6 +415,7 @@ env.CppUnitTest(
],
LIBDEPS=[
"$BUILD_DIR/mongo/db/auth/authmocks",
+ "$BUILD_DIR/mongo/db/catalog/collection_crud",
"$BUILD_DIR/mongo/db/concurrency/lock_manager",
"$BUILD_DIR/mongo/db/exec/sbe/sbe_plan_stage_test",
"$BUILD_DIR/mongo/db/multitenancy",
diff --git a/src/mongo/db/query/ce/SConscript b/src/mongo/db/query/ce/SConscript
index d14a578764d..780f8cb8af1 100644
--- a/src/mongo/db/query/ce/SConscript
+++ b/src/mongo/db/query/ce/SConscript
@@ -66,6 +66,7 @@ env.CppUnitTest(
LIBDEPS=[
'$BUILD_DIR/mongo/base',
'$BUILD_DIR/mongo/db/auth/authmocks',
+ '$BUILD_DIR/mongo/db/catalog/collection_crud',
'$BUILD_DIR/mongo/db/commands/test_commands_enabled',
'$BUILD_DIR/mongo/db/concurrency/lock_manager',
'$BUILD_DIR/mongo/db/db_raii',
diff --git a/src/mongo/db/query/ce/stats_cache_loader_test.cpp b/src/mongo/db/query/ce/stats_cache_loader_test.cpp
index e27454a6e5c..7b3a7b2d63a 100644
--- a/src/mongo/db/query/ce/stats_cache_loader_test.cpp
+++ b/src/mongo/db/query/ce/stats_cache_loader_test.cpp
@@ -27,11 +27,8 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
-#include <memory>
-
#include "mongo/bson/oid.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/query/ce/stats_cache_loader_impl.h"
#include "mongo/db/query/ce/stats_cache_loader_test_fixture.h"
@@ -39,10 +36,9 @@
#include "mongo/util/assert_util.h"
#include "mongo/util/fail_point.h"
+namespace mongo {
namespace {
-using namespace mongo;
-
class StatsCacheLoaderTest : public StatsCacheLoaderTestFixture {
protected:
void createStatsCollection(NamespaceString nss);
@@ -73,7 +69,8 @@ TEST_F(StatsCacheLoaderTest, VerifyStatsLoad) {
WriteUnitOfWork wuow(operationContext());
// TODO: SERVER-68745, insert histogram.
BSONObj doc = BSON("_id" << 1);
- ASSERT_OK(coll->insertDocument(operationContext(), InsertStatement(doc), nullptr));
+ ASSERT_OK(collection_internal::insertDocument(
+ operationContext(), coll, InsertStatement(doc), nullptr));
wuow.commit();
}
auto newStats = _statsCacheLoader.getStats(operationContext(), nss).get();
@@ -81,3 +78,4 @@ TEST_F(StatsCacheLoaderTest, VerifyStatsLoad) {
}
} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index b48bb92e5bd..817b2782168 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -63,7 +63,6 @@
#include "mongo/db/stats/top.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/logv2/log.h"
-#include "mongo/s/chunk_version.h"
#include "mongo/s/stale_exception.h"
#include "mongo/util/fail_point.h"
#include "mongo/util/scopeguard.h"
diff --git a/src/mongo/db/query/sbe_stage_builder_lookup_test.cpp b/src/mongo/db/query/sbe_stage_builder_lookup_test.cpp
index 2a2b8041ff0..b2fd204e3a3 100644
--- a/src/mongo/db/query/sbe_stage_builder_lookup_test.cpp
+++ b/src/mongo/db/query/sbe_stage_builder_lookup_test.cpp
@@ -31,20 +31,22 @@
* This file contains tests for building execution stages that implement $lookup operator.
*/
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/exec/sbe/sbe_plan_stage_test.h"
#include "mongo/db/exec/sbe/stages/loop_join.h"
#include "mongo/db/pipeline/document_source_lookup.h"
+#include "mongo/db/pipeline/expression_context.h"
+#include "mongo/db/pipeline/expression_context_for_test.h"
#include "mongo/db/query/query_solution.h"
+#include "mongo/db/query/query_test_service_context.h"
#include "mongo/db/query/sbe_stage_builder_test_fixture.h"
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/repl/storage_interface_impl.h"
#include "mongo/util/assert_util.h"
-#include "mongo/db/pipeline/expression_context.h"
-#include "mongo/db/pipeline/expression_context_for_test.h"
-#include "mongo/db/query/query_test_service_context.h"
-
namespace mongo::sbe {
+namespace {
+
using namespace value;
class LookupStageBuilderTest : public SbeStageBuilderTestFixture {
@@ -75,14 +77,15 @@ public:
std::unique_ptr<AutoGetCollection>& lock,
const std::vector<BSONObj>& docs) {
std::vector<InsertStatement> inserts{docs.begin(), docs.end()};
- lock = std::make_unique<AutoGetCollection>(operationContext(), nss, LockMode::MODE_X);
+ lock = std::make_unique<AutoGetCollection>(operationContext(), nss, LockMode::MODE_IX);
+
{
WriteUnitOfWork wuow{operationContext()};
- ASSERT_OK(
- lock.get()
- ->getWritableCollection(operationContext())
- ->insertDocuments(
- operationContext(), inserts.begin(), inserts.end(), nullptr /* opDebug */));
+ ASSERT_OK(collection_internal::insertDocuments(operationContext(),
+ lock.get()->getCollection(),
+ inserts.begin(),
+ inserts.end(),
+ nullptr /* opDebug */));
wuow.commit();
}
@@ -776,4 +779,5 @@ TEST_F(LookupStageBuilderTest, ThreeComponentAsPathDoesNotPerformArrayTraversal)
"_id", "_id", "one.two.three", {fromjson("{_id: 0, one: {two: {three: [{_id: 0}]}}}")});
}
+} // namespace
} // namespace mongo::sbe
diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript
index 40dcaa49c6e..5478f2fa9aa 100644
--- a/src/mongo/db/repl/SConscript
+++ b/src/mongo/db/repl/SConscript
@@ -504,6 +504,7 @@ env.Library(
'rollback_test_fixture.cpp',
],
LIBDEPS=[
+ '$BUILD_DIR/mongo/db/catalog/collection_crud',
'$BUILD_DIR/mongo/db/multitenancy',
'$BUILD_DIR/mongo/db/op_observer/op_observer',
'$BUILD_DIR/mongo/db/query_exec',
@@ -612,7 +613,6 @@ env.Library(
],
LIBDEPS=[
'$BUILD_DIR/mongo/db/auth/authorization_manager_global',
- '$BUILD_DIR/mongo/db/concurrency/lock_manager',
'$BUILD_DIR/mongo/db/curop',
'$BUILD_DIR/mongo/db/query_exec',
'$BUILD_DIR/mongo/db/stats/timer_stats',
@@ -629,9 +629,11 @@ env.Library(
'storage_interface',
],
LIBDEPS_PRIVATE=[
+ '$BUILD_DIR/mongo/db/catalog/collection_crud',
'$BUILD_DIR/mongo/db/change_stream_change_collection_manager',
'$BUILD_DIR/mongo/db/commands/mongod_fsync',
'$BUILD_DIR/mongo/db/concurrency/exception_util',
+ '$BUILD_DIR/mongo/db/concurrency/lock_manager',
'$BUILD_DIR/mongo/db/storage/storage_control',
'repl_server_parameters',
'replication_auth',
@@ -1553,6 +1555,7 @@ env.Library(
'mock_repl_coord_server_fixture.cpp',
],
LIBDEPS=[
+ '$BUILD_DIR/mongo/db/catalog/collection_crud',
'$BUILD_DIR/mongo/db/catalog_raii',
'$BUILD_DIR/mongo/db/dbdirectclient',
'$BUILD_DIR/mongo/db/service_context_d_test_fixture',
diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.cpp b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
index a888dd87ad9..7cabcb659d0 100644
--- a/src/mongo/db/repl/collection_bulk_loader_impl.cpp
+++ b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
@@ -196,8 +196,8 @@ Status CollectionBulkLoaderImpl::_insertDocumentsForCappedCollection(
WriteUnitOfWork wunit(_opCtx.get());
// For capped collections, we use regular insertDocument, which
// will update pre-existing indexes.
- const auto status =
- (*_collection)->insertDocument(_opCtx.get(), InsertStatement(doc), nullptr);
+ const auto status = collection_internal::insertDocument(
+ _opCtx.get(), **_collection, InsertStatement(doc), nullptr);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/repl/mock_repl_coord_server_fixture.cpp b/src/mongo/db/repl/mock_repl_coord_server_fixture.cpp
index 9a76ffc13b4..7ca8a8cf8eb 100644
--- a/src/mongo/db/repl/mock_repl_coord_server_fixture.cpp
+++ b/src/mongo/db/repl/mock_repl_coord_server_fixture.cpp
@@ -27,17 +27,14 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
-#include <memory>
-
+#include "mongo/db/repl/mock_repl_coord_server_fixture.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/client.h"
#include "mongo/db/curop.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/drop_pending_collection_reaper.h"
-#include "mongo/db/repl/mock_repl_coord_server_fixture.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/oplog_entry.h"
#include "mongo/db/repl/replication_consistency_markers_mock.h"
@@ -106,10 +103,11 @@ void MockReplCoordServerFixture::insertOplogEntry(const repl::OplogEntry& entry)
ASSERT_TRUE(coll);
WriteUnitOfWork wuow(opCtx());
- auto status = coll->insertDocument(opCtx(),
- InsertStatement(entry.getEntry().toBSON()),
- &CurOp::get(opCtx())->debug(),
- /* fromMigrate */ false);
+ auto status = collection_internal::insertDocument(opCtx(),
+ *coll,
+ InsertStatement(entry.getEntry().toBSON()),
+ &CurOp::get(opCtx())->debug(),
+ /* fromMigrate */ false);
ASSERT_OK(status);
wuow.commit();
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 71631949cc2..b8cac58a81a 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -43,8 +43,8 @@
#include "mongo/db/catalog/capped_collection_maintenance.h"
#include "mongo/db/catalog/capped_utils.h"
#include "mongo/db/catalog/coll_mod.h"
-#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/collection_catalog.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/create_collection.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/document_validation.h"
@@ -1328,11 +1328,12 @@ Status applyOperation_inlock(OperationContext* opCtx,
}
OpDebug* const nullOpDebug = nullptr;
- Status status = collection->insertDocuments(opCtx,
- insertObjs.begin(),
- insertObjs.end(),
- nullOpDebug,
- false /* fromMigrate */);
+ Status status = collection_internal::insertDocuments(opCtx,
+ collection,
+ insertObjs.begin(),
+ insertObjs.end(),
+ nullOpDebug,
+ false /* fromMigrate */);
if (!status.isOK()) {
return status;
}
@@ -1413,8 +1414,8 @@ Status applyOperation_inlock(OperationContext* opCtx,
}
OpDebug* const nullOpDebug = nullptr;
- Status status = collection->insertDocument(
- opCtx, insertStmt, nullOpDebug, false /* fromMigrate */);
+ Status status = collection_internal::insertDocument(
+ opCtx, collection, insertStmt, nullOpDebug, false /* fromMigrate */);
if (status.isOK()) {
wuow.commit();
diff --git a/src/mongo/db/repl/oplog_applier_impl.cpp b/src/mongo/db/repl/oplog_applier_impl.cpp
index f7403a15209..8dfbdbd24f6 100644
--- a/src/mongo/db/repl/oplog_applier_impl.cpp
+++ b/src/mongo/db/repl/oplog_applier_impl.cpp
@@ -27,11 +27,10 @@
* it in the license file.
*/
-
#include "mongo/db/repl/oplog_applier_impl.h"
-#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/collection_catalog.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/document_validation.h"
@@ -55,7 +54,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication
-
namespace mongo {
namespace repl {
namespace {
@@ -142,8 +140,8 @@ Status _insertDocumentsToOplogAndChangeCollections(
return {ErrorCodes::NamespaceNotFound, "Oplog collection does not exist"};
}
- auto status = oplogColl->insertDocuments(
- opCtx, begin, end, nullptr /* OpDebug */, false /* fromMigrate */);
+ auto status = collection_internal::insertDocuments(
+ opCtx, oplogColl, begin, end, nullptr /* OpDebug */, false /* fromMigrate */);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/repl/rollback_test_fixture.cpp b/src/mongo/db/repl/rollback_test_fixture.cpp
index 6944d6a14b4..94ff50bbca6 100644
--- a/src/mongo/db/repl/rollback_test_fixture.cpp
+++ b/src/mongo/db/repl/rollback_test_fixture.cpp
@@ -27,13 +27,11 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/repl/rollback_test_fixture.h"
-#include <memory>
#include <string>
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/client.h"
#include "mongo/db/db_raii.h"
@@ -234,8 +232,8 @@ void RollbackTest::_insertDocument(OperationContext* opCtx,
auto insertDoc = [opCtx, &doc](const CollectionPtr& collection) {
WriteUnitOfWork wuow(opCtx);
- OpDebug* const opDebug = nullptr;
- ASSERT_OK(collection->insertDocument(opCtx, InsertStatement(doc), opDebug));
+ ASSERT_OK(collection_internal::insertDocument(
+ opCtx, collection, InsertStatement(doc), nullptr /* OpDebug */));
wuow.commit();
};
AutoGetCollection collection(opCtx, nss, MODE_X);
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index 045bf46d458..0ce8e4bf344 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/repl/storage_interface_impl.h"
#include <algorithm>
@@ -44,9 +41,9 @@
#include "mongo/bson/util/bson_extract.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/catalog/coll_mod.h"
-#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/collection_catalog.h"
#include "mongo/db/catalog/collection_catalog_helper.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/catalog/index_catalog.h"
@@ -90,7 +87,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication
-
namespace mongo {
namespace repl {
@@ -363,7 +359,8 @@ Status insertDocumentsSingleBatch(OperationContext* opCtx,
WriteUnitOfWork wunit(opCtx);
OpDebug* const nullOpDebug = nullptr;
- auto status = (*collection)->insertDocuments(opCtx, begin, end, nullOpDebug, false);
+ auto status =
+ collection_internal::insertDocuments(opCtx, *collection, begin, end, nullOpDebug, false);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp
index 55d9142d8ce..fc946ced9db 100644
--- a/src/mongo/db/repl/storage_interface_impl_test.cpp
+++ b/src/mongo/db/repl/storage_interface_impl_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include <algorithm>
#include <boost/optional.hpp>
#include <memory>
@@ -36,6 +34,7 @@
#include "mongo/bson/bsonmisc.h"
#include "mongo/db/catalog/clustered_collection_util.h"
#include "mongo/db/catalog/collection_options.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/catalog/index_catalog.h"
@@ -61,11 +60,10 @@
#include "mongo/util/fail_point.h"
#include "mongo/util/str.h"
+namespace mongo {
+namespace repl {
namespace {
-using namespace mongo;
-using namespace mongo::repl;
-
const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
BSONObj makeIdIndexSpec(const NamespaceString& nss) {
@@ -446,8 +444,8 @@ TEST_F(StorageInterfaceImplTest,
AutoGetCollection autoCollection(opCtx, nss, MODE_IX);
WriteUnitOfWork wunit(opCtx);
ASSERT_EQUALS(ErrorCodes::OperationCannotBeBatched,
- autoCollection.getCollection()->insertDocuments(
- opCtx, docs.cbegin(), docs.cend(), nullptr, false));
+ collection_internal::insertDocuments(
+ opCtx, *autoCollection, docs.cbegin(), docs.cend(), nullptr, false));
}
ASSERT_OK(storage.insertDocuments(opCtx, nss, docs));
@@ -3305,3 +3303,5 @@ TEST_F(StorageInterfaceImplTest, SetIndexIsMultikeySucceeds) {
}
} // namespace
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/storage_timestamp_test.cpp b/src/mongo/db/repl/storage_timestamp_test.cpp
index 4dcce2ef1ac..1a63b3b764e 100644
--- a/src/mongo/db/repl/storage_timestamp_test.cpp
+++ b/src/mongo/db/repl/storage_timestamp_test.cpp
@@ -27,17 +27,12 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
-#include <cstdint>
-
#include "mongo/bson/bsonmisc.h"
#include "mongo/bson/mutable/algorithm.h"
#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/bson/timestamp.h"
-#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/collection_catalog.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/create_collection.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/catalog/drop_database.h"
@@ -101,7 +96,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
@@ -372,7 +366,8 @@ public:
// Insert some documents.
OpDebug* const nullOpDebug = nullptr;
const bool fromMigrate = false;
- ASSERT_OK(coll->insertDocument(_opCtx, stmt, nullOpDebug, fromMigrate));
+ ASSERT_OK(
+ collection_internal::insertDocument(_opCtx, coll, stmt, nullOpDebug, fromMigrate));
}
void createIndex(CollectionWriter& coll, std::string indexName, const BSONObj& indexKey) {
@@ -2762,8 +2757,9 @@ TEST_F(StorageTimestampTest, IndexBuildsResolveErrorsDuringStateChangeToPrimary)
LOGV2(22507, "attempting to insert {badDoc3}", "badDoc3"_attr = badDoc3);
WriteUnitOfWork wuow(_opCtx);
ASSERT_THROWS_CODE(
- collection->insertDocument(
+ collection_internal::insertDocument(
_opCtx,
+ collection.get(),
InsertStatement(badDoc3, indexInit.addTicks(1).asTimestamp(), _presentTerm),
/* opDebug */ nullptr,
/* noWarn */ false),
@@ -3194,8 +3190,12 @@ TEST_F(StorageTimestampTest, MultipleTimestampsForMultikeyWrites) {
_presentTerm);
WriteUnitOfWork wuow(_opCtx);
- ASSERT_OK(autoColl.getCollection()->insertDocuments(
- _opCtx, vectoredInsert.begin(), vectoredInsert.end(), nullptr, false));
+ ASSERT_OK(collection_internal::insertDocuments(_opCtx,
+ autoColl.getCollection(),
+ vectoredInsert.begin(),
+ vectoredInsert.end(),
+ nullptr,
+ false));
wuow.commit();
}
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 5808fdeabb0..0fa209c2340 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -184,6 +184,7 @@ env.Library(
'$BUILD_DIR/mongo/crypto/encrypted_field_config',
'$BUILD_DIR/mongo/crypto/fle_crypto',
'$BUILD_DIR/mongo/db/catalog/catalog_helpers',
+ '$BUILD_DIR/mongo/db/catalog/collection_crud',
'$BUILD_DIR/mongo/db/catalog/database_holder',
'$BUILD_DIR/mongo/db/index_builds_coordinator_interface',
'$BUILD_DIR/mongo/db/ops/write_ops',
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h b/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h
index 3eb70bcb00b..1bfcc5dc989 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h
@@ -34,11 +34,9 @@
#include "mongo/db/s/balancer/balancer_policy.h"
#include "mongo/s/catalog/type_chunk.h"
-#include "mongo/s/chunk_version.h"
namespace mongo {
-class ChunkType;
class NamespaceString;
class OperationContext;
template <typename T>
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index 4084fe8e9e2..3c576765f4c 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -32,7 +32,6 @@
#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/resharding/resharding_util.h"
#include "mongo/s/catalog/type_chunk.h"
-#include "mongo/s/chunk_version.h"
#include "mongo/s/sharding_test_fixture_common.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/db/s/config/config_server_test_fixture.cpp b/src/mongo/db/s/config/config_server_test_fixture.cpp
index 2d845151764..eedeb9943a3 100644
--- a/src/mongo/db/s/config/config_server_test_fixture.cpp
+++ b/src/mongo/db/s/config/config_server_test_fixture.cpp
@@ -66,7 +66,6 @@
#include "mongo/s/catalog/type_database_gen.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog_cache.h"
-#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/config_server_catalog_cache_loader.h"
#include "mongo/s/database_version.h"
diff --git a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
index 66b4416a343..ec93541b311 100644
--- a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
+++ b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/base/status_with.h"
#include "mongo/bson/util/bson_extract.h"
#include "mongo/db/auth/authorization_session.h"
@@ -43,13 +40,11 @@
#include "mongo/db/s/sharding_state.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/catalog/type_chunk.h"
-#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
namespace {
diff --git a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
index 8e2ab56f624..726d96d7465 100644
--- a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
@@ -27,7 +27,6 @@
* it in the license file.
*/
-
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_session.h"
@@ -43,13 +42,11 @@
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/s/start_chunk_clone_request.h"
#include "mongo/logv2/log.h"
-#include "mongo/s/chunk_version.h"
#include "mongo/s/request_types/migration_secondary_throttle_options.h"
#include "mongo/util/assert_util.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
namespace {
diff --git a/src/mongo/db/s/resharding/resharding_data_copy_util.cpp b/src/mongo/db/s/resharding/resharding_data_copy_util.cpp
index 4d9c16d31f3..7126e13a091 100644
--- a/src/mongo/db/s/resharding/resharding_data_copy_util.cpp
+++ b/src/mongo/db/s/resharding/resharding_data_copy_util.cpp
@@ -27,10 +27,9 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/resharding/resharding_data_copy_util.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/rename_collection.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/concurrency/exception_util.h"
@@ -244,7 +243,8 @@ int insertBatch(OperationContext* opCtx,
numBytes += insert->doc.objsize();
}
- uassertStatusOK(outputColl->insertDocuments(opCtx, batch.begin(), batch.end(), nullptr));
+ uassertStatusOK(collection_internal::insertDocuments(
+ opCtx, *outputColl, batch.begin(), batch.end(), nullptr));
wuow.commit();
return numBytes;
diff --git a/src/mongo/db/s/resharding/resharding_data_replication_test.cpp b/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
index f5f588ac948..3d5934199d3 100644
--- a/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
@@ -28,6 +28,7 @@
*/
#include "mongo/bson/bsonmisc.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/persistent_task_store.h"
#include "mongo/db/query/collation/collator_factory_mock.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
@@ -219,8 +220,8 @@ TEST_F(ReshardingDataReplicationTest, GetOplogFetcherResumeId) {
AutoGetCollection oplogBufferColl(opCtx.get(), oplogBufferNss, MODE_IX);
WriteUnitOfWork wuow(opCtx.get());
- ASSERT_OK(oplogBufferColl->insertDocument(
- opCtx.get(), InsertStatement{oplogEntry.toBSON()}, nullptr));
+ ASSERT_OK(collection_internal::insertDocument(
+ opCtx.get(), *oplogBufferColl, InsertStatement{oplogEntry.toBSON()}, nullptr));
wuow.commit();
};
diff --git a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
index 3283fde2e4f..8d29958aa9d 100644
--- a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
@@ -27,6 +27,7 @@
* it in the license file.
*/
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/create_collection.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/dbdirectclient.h"
@@ -250,7 +251,7 @@ protected:
const ReshardingEnv& env) {
AutoGetCollection coll(opCtx, nss, MODE_IX);
WriteUnitOfWork wuow(opCtx);
- ASSERT_OK(coll->insertDocument(opCtx, InsertStatement(doc), nullptr));
+ ASSERT_OK(collection_internal::insertDocument(opCtx, *coll, InsertStatement(doc), nullptr));
wuow.commit();
}
diff --git a/src/mongo/db/s/resharding/resharding_oplog_application.cpp b/src/mongo/db/s/resharding/resharding_oplog_application.cpp
index 5a056f0ffbc..21321ce00fd 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_application.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_application.cpp
@@ -27,11 +27,9 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/resharding/resharding_oplog_application.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/concurrency/exception_util.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/index/index_access_method.h"
@@ -54,7 +52,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding
-
namespace mongo {
namespace {
Date_t getDeadline(OperationContext* opCtx) {
@@ -275,8 +272,11 @@ void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCt
auto foundDoc = Helpers::findByIdAndNoopUpdate(opCtx, outputColl, idQuery, outputCollDoc);
if (!foundDoc) {
- uassertStatusOK(outputColl->insertDocument(
- opCtx, InsertStatement(oField), nullptr /* nullOpDebug*/, false /* fromMigrate */));
+ uassertStatusOK(collection_internal::insertDocument(opCtx,
+ outputColl,
+ InsertStatement(oField),
+ nullptr /* OpDebug */,
+ false /* fromMigrate */));
return;
}
@@ -304,8 +304,8 @@ void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCt
// The doc does not belong to '_donorShardId' under the original shard key, so apply rule #4
// and insert the contents of 'op' to the stash collection.
- uassertStatusOK(stashColl->insertDocument(
- opCtx, InsertStatement(oField), nullptr /* nullOpDebug */, false /* fromMigrate */));
+ uassertStatusOK(collection_internal::insertDocument(
+ opCtx, stashColl, InsertStatement(oField), nullptr /* OpDebug */, false /* fromMigrate */));
_applierMetrics->onWriteToStashCollections();
}
@@ -536,8 +536,11 @@ void ReshardingOplogApplicationRules::_applyDelete_inlock(OperationContext* opCt
// Insert the doc we just deleted from one of the stash collections into the output
// collection.
if (!doc.isEmpty()) {
- uassertStatusOK(autoCollOutput->insertDocument(
- opCtx, InsertStatement(doc), nullptr /* nullOpDebug */, false /* fromMigrate */));
+ uassertStatusOK(collection_internal::insertDocument(opCtx,
+ *autoCollOutput,
+ InsertStatement(doc),
+ nullptr /* OpDebug */,
+ false /* fromMigrate */));
}
});
}
diff --git a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
index 67eb32196ac..8172673e578 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
@@ -29,6 +29,7 @@
#include "mongo/bson/bsonmisc.h"
#include "mongo/db/catalog/collection_options.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/logical_session_cache_noop.h"
#include "mongo/db/op_observer/op_observer_registry.h"
@@ -711,9 +712,10 @@ TEST_F(ReshardingOplogCrudApplicationTest, DeleteOpAtomicallyMovesFromOtherStash
AutoGetCollection otherStashColl(opCtx.get(), otherStashNss(), MODE_IX);
WriteUnitOfWork wuow(opCtx.get());
ASSERT_OK(
- otherStashColl->insertDocument(opCtx.get(),
- InsertStatement{BSON("_id" << 0 << sk() << -3)},
- nullptr /* opDebug */));
+ collection_internal::insertDocument(opCtx.get(),
+ *otherStashColl,
+ InsertStatement{BSON("_id" << 0 << sk() << -3)},
+ nullptr /* opDebug */));
wuow.commit();
}
diff --git a/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp b/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp
index 5f84a037f4c..20685f612ab 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp
@@ -27,19 +27,16 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
-#include <vector>
-
#include "mongo/db/s/resharding/resharding_oplog_fetcher.h"
#include <fmt/format.h>
+#include <vector>
#include "mongo/bson/bsonobj.h"
#include "mongo/client/dbclient_connection.h"
#include "mongo/client/remote_command_targeter.h"
#include "mongo/db/catalog/clustered_collection_util.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/concurrency/exception_util.h"
#include "mongo/db/pipeline/aggregate_command_gen.h"
@@ -56,7 +53,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding
-
namespace mongo {
namespace {
boost::intrusive_ptr<ExpressionContext> _makeExpressionContext(OperationContext* opCtx) {
@@ -351,7 +347,8 @@ bool ReshardingOplogFetcher::consume(Client* client,
auto startAt = ReshardingDonorOplogId::parse(
{"OplogFetcherParsing"}, nextOplog.get_id()->getDocument().toBson());
Timer insertTimer;
- uassertStatusOK(toWriteTo->insertDocument(opCtx, InsertStatement{doc}, nullptr));
+ uassertStatusOK(collection_internal::insertDocument(
+ opCtx, *toWriteTo, InsertStatement{doc}, nullptr));
wuow.commit();
_env->metrics()->onLocalInsertDuringOplogFetching(
@@ -396,8 +393,8 @@ bool ReshardingOplogFetcher::consume(Client* client,
oplog.setOpTime(OplogSlot());
oplog.setWallClockTime(opCtx->getServiceContext()->getFastClockSource()->now());
- uassertStatusOK(
- toWriteTo->insertDocument(opCtx, InsertStatement{oplog.toBSON()}, nullptr));
+ uassertStatusOK(collection_internal::insertDocument(
+ opCtx, *toWriteTo, InsertStatement{oplog.toBSON()}, nullptr));
wuow.commit();
// Also include synthetic oplog in the fetched count so it can match up with the
diff --git a/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp
index 9258511832d..d56ec50531d 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp
@@ -27,13 +27,11 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include <boost/optional.hpp>
#include <vector>
#include "mongo/bson/bsonobj.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/client.h"
#include "mongo/db/concurrency/exception_util.h"
#include "mongo/db/db_raii.h"
@@ -59,7 +57,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
@@ -171,7 +168,8 @@ public:
// Insert some documents.
OpDebug* const nullOpDebug = nullptr;
const bool fromMigrate = false;
- ASSERT_OK(coll->insertDocument(_opCtx, stmt, nullOpDebug, fromMigrate));
+ ASSERT_OK(
+ collection_internal::insertDocument(_opCtx, coll, stmt, nullOpDebug, fromMigrate));
}
BSONObj queryCollection(NamespaceString nss, const BSONObj& query) {
diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.h b/src/mongo/db/s/shard_filtering_metadata_refresh.h
index 724409b4621..955eb3ccdee 100644
--- a/src/mongo/db/s/shard_filtering_metadata_refresh.h
+++ b/src/mongo/db/s/shard_filtering_metadata_refresh.h
@@ -31,7 +31,6 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/collection_metadata.h"
-#include "mongo/s/chunk_version.h"
#include "mongo/s/database_version.h"
namespace mongo {
diff --git a/src/mongo/db/s/shard_metadata_util.cpp b/src/mongo/db/s/shard_metadata_util.cpp
index 3dc7349c655..9d53099c519 100644
--- a/src/mongo/db/s/shard_metadata_util.cpp
+++ b/src/mongo/db/s/shard_metadata_util.cpp
@@ -39,7 +39,6 @@
#include "mongo/rpc/unique_message.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
-#include "mongo/s/chunk_version.h"
#include "mongo/s/write_ops/batched_command_response.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
diff --git a/src/mongo/db/s/sharding_initialization_op_observer_test.cpp b/src/mongo/db/s/sharding_initialization_op_observer_test.cpp
index 2420e858e54..eb8bc9a248b 100644
--- a/src/mongo/db/s/sharding_initialization_op_observer_test.cpp
+++ b/src/mongo/db/s/sharding_initialization_op_observer_test.cpp
@@ -27,9 +27,8 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/client/remote_command_targeter_mock.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/dbdirectclient.h"
@@ -118,7 +117,8 @@ TEST_F(ShardingInitializationOpObserverTest, GlobalInitDoesntGetCalledIfWriteAbo
WriteUnitOfWork wuow(operationContext());
InsertStatement stmt(shardIdentity.toShardIdentityDocument());
- ASSERT_OK(autoColl.getCollection()->insertDocument(operationContext(), stmt, nullptr));
+ ASSERT_OK(
+ collection_internal::insertDocument(operationContext(), *autoColl, stmt, nullptr));
ASSERT_EQ(0, getInitCallCount());
}
diff --git a/src/mongo/db/startup_recovery.cpp b/src/mongo/db/startup_recovery.cpp
index c789197f1e8..e13d007748d 100644
--- a/src/mongo/db/startup_recovery.cpp
+++ b/src/mongo/db/startup_recovery.cpp
@@ -27,12 +27,9 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/startup_recovery.h"
-#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/create_collection.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/database_holder.h"
@@ -131,9 +128,8 @@ Status restoreMissingFeatureCompatibilityVersionDocument(OperationContext* opCtx
writeConflictRetry(opCtx, "insertFCVDocument", fcvNss.ns(), [&] {
WriteUnitOfWork wunit(opCtx);
- OpDebug* const nullOpDebug = nullptr;
- uassertStatusOK(fcvColl->insertDocument(
- opCtx, InsertStatement(fcvDoc.toBSON()), nullOpDebug, false));
+ uassertStatusOK(collection_internal::insertDocument(
+ opCtx, fcvColl, InsertStatement(fcvDoc.toBSON()), nullptr /* OpDebug */, false));
wunit.commit();
});
}
diff --git a/src/mongo/db/transaction/SConscript b/src/mongo/db/transaction/SConscript
index 234612b827f..f64da4c08cb 100644
--- a/src/mongo/db/transaction/SConscript
+++ b/src/mongo/db/transaction/SConscript
@@ -22,6 +22,7 @@ env.Library(
],
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/db/catalog/collection_catalog',
+ '$BUILD_DIR/mongo/db/catalog/collection_crud',
'$BUILD_DIR/mongo/db/catalog/database_holder',
'$BUILD_DIR/mongo/db/catalog/local_oplog_info',
'$BUILD_DIR/mongo/db/catalog_raii',
diff --git a/src/mongo/db/transaction/transaction_participant.cpp b/src/mongo/db/transaction/transaction_participant.cpp
index 221c9dfef35..43ade31c7fe 100644
--- a/src/mongo/db/transaction/transaction_participant.cpp
+++ b/src/mongo/db/transaction/transaction_participant.cpp
@@ -27,16 +27,14 @@
* it in the license file.
*/
-
#define LOGV2_FOR_TRANSACTION(ID, DLEVEL, MESSAGE, ...) \
LOGV2_DEBUG_OPTIONS(ID, DLEVEL, {logv2::LogComponent::kTransaction}, MESSAGE, ##__VA_ARGS__)
-#include "mongo/platform/basic.h"
-
#include "mongo/db/transaction/transaction_participant.h"
#include <fmt/format.h>
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/catalog/local_oplog_info.h"
@@ -81,7 +79,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage
-
namespace mongo {
using namespace fmt::literals;
namespace {
@@ -421,7 +418,8 @@ void updateSessionEntry(OperationContext* opCtx,
if (recordId.isNull()) {
// Upsert case.
- auto status = collection->insertDocument(opCtx, InsertStatement(updateMod), nullptr, false);
+ auto status = collection_internal::insertDocument(
+ opCtx, *collection, InsertStatement(updateMod), nullptr, false);
if (status == ErrorCodes::DuplicateKey) {
throwWriteConflictException(
diff --git a/src/mongo/db/transaction/transaction_participant_test.cpp b/src/mongo/db/transaction/transaction_participant_test.cpp
index 9e2642e20e2..16c8d1ba235 100644
--- a/src/mongo/db/transaction/transaction_participant_test.cpp
+++ b/src/mongo/db/transaction/transaction_participant_test.cpp
@@ -27,10 +27,7 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
-#include <memory>
-
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/client.h"
#include "mongo/db/concurrency/replication_state_transition_lock_guard.h"
#include "mongo/db/db_raii.h"
@@ -404,7 +401,8 @@ void insertTxnRecord(OperationContext* opCtx, unsigned i, DurableTxnStateEnum st
auto coll = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss);
ASSERT(coll);
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(coll->insertDocument(opCtx, InsertStatement(record.toBSON()), nullOpDebug, false));
+ ASSERT_OK(collection_internal::insertDocument(
+ opCtx, coll, InsertStatement(record.toBSON()), nullOpDebug, false));
wuow.commit();
}
} // namespace
diff --git a/src/mongo/db/views/SConscript b/src/mongo/db/views/SConscript
index 1fe254d944f..a3f5227f5fc 100644
--- a/src/mongo/db/views/SConscript
+++ b/src/mongo/db/views/SConscript
@@ -15,6 +15,7 @@ env.Library(
],
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/db/audit',
+ '$BUILD_DIR/mongo/db/catalog/collection_crud',
'$BUILD_DIR/mongo/db/catalog/database_holder',
'$BUILD_DIR/mongo/db/multitenancy',
'$BUILD_DIR/mongo/db/server_feature_flags',
diff --git a/src/mongo/db/views/durable_view_catalog.cpp b/src/mongo/db/views/durable_view_catalog.cpp
index e8186218a78..b02fd3f4a56 100644
--- a/src/mongo/db/views/durable_view_catalog.cpp
+++ b/src/mongo/db/views/durable_view_catalog.cpp
@@ -27,16 +27,13 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/views/durable_view_catalog.h"
#include <string>
#include "mongo/db/audit.h"
-#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/collection_catalog.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/concurrency/d_concurrency.h"
@@ -278,8 +275,8 @@ void DurableViewCatalogImpl::upsert(OperationContext* opCtx,
"Insert view to system views catalog",
"view"_attr = view,
"viewCatalog"_attr = _db->getSystemViewsName());
- uassertStatusOK(
- systemViews->insertDocument(opCtx, InsertStatement(view), &CurOp::get(opCtx)->debug()));
+ uassertStatusOK(collection_internal::insertDocument(
+ opCtx, systemViews, InsertStatement(view), &CurOp::get(opCtx)->debug()));
} else {
CollectionUpdateArgs args;
args.update = view;
diff --git a/src/mongo/dbtests/SConscript b/src/mongo/dbtests/SConscript
index eabf9fb48ea..f0c65377e4b 100644
--- a/src/mongo/dbtests/SConscript
+++ b/src/mongo/dbtests/SConscript
@@ -70,7 +70,7 @@ env.Library(
)
env.Program(
- target="dbtest",
+ target='dbtest',
source=[
'basictests.cpp',
'catalogtests.cpp',
@@ -141,6 +141,7 @@ env.Program(
"$BUILD_DIR/mongo/db/bson/dotted_path_support",
"$BUILD_DIR/mongo/db/catalog/catalog_helpers",
"$BUILD_DIR/mongo/db/catalog/clustered_collection_options",
+ "$BUILD_DIR/mongo/db/catalog/collection_crud",
"$BUILD_DIR/mongo/db/catalog/collection_validation",
"$BUILD_DIR/mongo/db/catalog/index_key_validate",
"$BUILD_DIR/mongo/db/catalog/multi_index_block",
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index 1b807d28395..b1c5aa77295 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -27,9 +27,7 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/client.h"
#include "mongo/db/db_raii.h"
@@ -38,6 +36,7 @@
#include "mongo/db/json.h"
#include "mongo/dbtests/dbtests.h"
+namespace mongo {
namespace CountTests {
class Base {
@@ -99,10 +98,12 @@ protected:
oid.init();
b.appendOID("_id", &oid);
b.appendElements(o);
- _collection->insertDocument(&_opCtx, InsertStatement(b.obj()), nullOpDebug, false)
+ collection_internal::insertDocument(
+ &_opCtx, _collection, InsertStatement(b.obj()), nullOpDebug, false)
.transitional_ignore();
} else {
- _collection->insertDocument(&_opCtx, InsertStatement(o), nullOpDebug, false)
+ collection_internal::insertDocument(
+ &_opCtx, _collection, InsertStatement(o), nullOpDebug, false)
.transitional_ignore();
}
wunit.commit();
@@ -174,3 +175,4 @@ public:
OldStyleSuiteInitializer<All> myall;
} // namespace CountTests
+} // namespace mongo
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index dfda6c43a0d..188e287dc84 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -27,10 +27,8 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/client/dbclient_cursor.h"
-#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/client.h"
#include "mongo/db/db_raii.h"
@@ -50,12 +48,8 @@
#include "mongo/util/assert_util.h"
namespace mongo {
-
namespace {
-using std::set;
-using std::unique_ptr;
-
/**
* Unit tests related to DBHelpers
*/
@@ -144,10 +138,10 @@ public:
WriteUnitOfWork wuow(opCtx1.get());
collection1 = db->createCollection(opCtx1.get(), nss, CollectionOptions(), true);
ASSERT_TRUE(collection1 != nullptr);
- ASSERT_TRUE(collection1
- ->insertDocument(
- opCtx1.get(), InsertStatement(doc), nullptr /* opDebug */, false)
- .isOK());
+ ASSERT_TRUE(
+ collection_internal::insertDocument(
+ opCtx1.get(), collection1, InsertStatement(doc), nullptr /* opDebug */, false)
+ .isOK());
wuow.commit();
}
diff --git a/src/mongo/dbtests/dbtests.h b/src/mongo/dbtests/dbtests.h
index 4c44f6a565b..3f0061d4c24 100644
--- a/src/mongo/dbtests/dbtests.h
+++ b/src/mongo/dbtests/dbtests.h
@@ -38,7 +38,6 @@
using namespace mongo;
using namespace mongo::unittest;
-using std::shared_ptr;
namespace mongo {
diff --git a/src/mongo/dbtests/deferred_writer.cpp b/src/mongo/dbtests/deferred_writer.cpp
index 90f4b29a366..ba30b6908f9 100644
--- a/src/mongo/dbtests/deferred_writer.cpp
+++ b/src/mongo/dbtests/deferred_writer.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include <chrono>
#include "mongo/bson/simple_bsonobj_comparator.h"
@@ -40,6 +38,7 @@
#include "mongo/dbtests/dbtests.h"
#include "mongo/stdx/chrono.h"
+namespace mongo {
namespace deferred_writer_tests {
namespace {
@@ -387,3 +386,4 @@ public:
OldStyleSuiteInitializer<DeferredWriterTests> deferredWriterTests;
} // namespace deferred_writer_tests
+} // namespace mongo
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 5b11a9d6078..f29fad7cabb 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -27,11 +27,7 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
-#include <cstdint>
-
-#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/catalog/multi_index_block.h"
#include "mongo/db/catalog_raii.h"
@@ -43,8 +39,8 @@
#include "mongo/db/storage/storage_engine_init.h"
#include "mongo/dbtests/dbtests.h"
+namespace mongo {
namespace IndexUpdateTests {
-
namespace {
const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
} // namespace
@@ -137,16 +133,18 @@ public:
{
WriteUnitOfWork wunit(_opCtx);
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(coll->insertDocument(_opCtx,
- InsertStatement(BSON("_id" << 1 << "a"
- << "dup")),
- nullOpDebug,
- true));
- ASSERT_OK(coll->insertDocument(_opCtx,
- InsertStatement(BSON("_id" << 2 << "a"
- << "dup")),
- nullOpDebug,
- true));
+ ASSERT_OK(collection_internal::insertDocument(_opCtx,
+ coll.get(),
+ InsertStatement(BSON("_id" << 1 << "a"
+ << "dup")),
+ nullOpDebug,
+ true));
+ ASSERT_OK(collection_internal::insertDocument(_opCtx,
+ coll.get(),
+ InsertStatement(BSON("_id" << 2 << "a"
+ << "dup")),
+ nullOpDebug,
+ true));
wunit.commit();
}
@@ -192,16 +190,18 @@ public:
{
WriteUnitOfWork wunit(_opCtx);
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(coll->insertDocument(_opCtx,
- InsertStatement(BSON("_id" << 1 << "a"
- << "dup")),
- nullOpDebug,
- true));
- ASSERT_OK(coll->insertDocument(_opCtx,
- InsertStatement(BSON("_id" << 2 << "a"
- << "dup")),
- nullOpDebug,
- true));
+ ASSERT_OK(collection_internal::insertDocument(_opCtx,
+ coll.get(),
+ InsertStatement(BSON("_id" << 1 << "a"
+ << "dup")),
+ nullOpDebug,
+ true));
+ ASSERT_OK(collection_internal::insertDocument(_opCtx,
+ coll.get(),
+ InsertStatement(BSON("_id" << 2 << "a"
+ << "dup")),
+ nullOpDebug,
+ true));
wunit.commit();
}
}
@@ -256,8 +256,8 @@ public:
int32_t nDocs = 1000;
OpDebug* const nullOpDebug = nullptr;
for (int32_t i = 0; i < nDocs; ++i) {
- ASSERT_OK(
- coll->insertDocument(_opCtx, InsertStatement(BSON("a" << i)), nullOpDebug));
+ ASSERT_OK(collection_internal::insertDocument(
+ _opCtx, coll.get(), InsertStatement(BSON("a" << i)), nullOpDebug));
}
wunit.commit();
}
@@ -306,8 +306,12 @@ public:
int32_t nDocs = 1000;
OpDebug* const nullOpDebug = nullptr;
for (int32_t i = 0; i < nDocs; ++i) {
- ASSERT_OK(coll->insertDocument(
- _opCtx, InsertStatement(BSON("_id" << i)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ _opCtx,
+ CollectionPtr(coll, CollectionPtr::NoYieldTag{}),
+ InsertStatement(BSON("_id" << i)),
+ nullOpDebug,
+ true));
}
wunit.commit();
// Request an interrupt.
@@ -714,3 +718,4 @@ public:
OldStyleSuiteInitializer<IndexUpdateTests> indexUpdateTests;
} // namespace IndexUpdateTests
+} // namespace mongo
diff --git a/src/mongo/dbtests/jsontests.cpp b/src/mongo/dbtests/jsontests.cpp
index d732f2967ee..7fd425e3ad7 100644
--- a/src/mongo/dbtests/jsontests.cpp
+++ b/src/mongo/dbtests/jsontests.cpp
@@ -31,9 +31,6 @@
* Tests for json.{h,cpp} code and BSONObj::jsonString()
*/
-
-#include "mongo/platform/basic.h"
-
#include <boost/property_tree/json_parser.hpp>
#include <boost/property_tree/ptree.hpp>
#include <fmt/format.h>
@@ -46,12 +43,12 @@
#include "mongo/dbtests/dbtests.h"
#include "mongo/logv2/log.h"
#include "mongo/platform/decimal128.h"
-#include "mongo/unittest/unittest.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
+namespace mongo {
namespace {
+
std::string makeJsonEquvalent(const std::string& json) {
boost::property_tree::ptree tree;
@@ -1199,3 +1196,4 @@ TEST(FromJsonTest, MinMaxKey) {
} // namespace FromJsonTests
} // namespace
+} // namespace mongo
diff --git a/src/mongo/dbtests/jstests.cpp b/src/mongo/dbtests/jstests.cpp
index 987e2d934c2..900fd3c404c 100644
--- a/src/mongo/dbtests/jstests.cpp
+++ b/src/mongo/dbtests/jstests.cpp
@@ -27,12 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
-#include <iostream>
-#include <limits>
-
#include "mongo/base/parse_number.h"
#include "mongo/db/client.h"
#include "mongo/db/dbdirectclient.h"
@@ -49,14 +43,7 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault
-
-using std::cout;
-using std::endl;
-using std::string;
-using std::stringstream;
-using std::unique_ptr;
-using std::vector;
-
+namespace mongo {
namespace JSTests {
using ScopeFactory = Scope* (ScriptEngine::*)();
@@ -74,7 +61,7 @@ template <ScopeFactory scopeFactory>
class BasicScope {
public:
void run() {
- unique_ptr<Scope> s;
+ std::unique_ptr<Scope> s;
s.reset((getGlobalScriptEngine()->*scopeFactory)());
s->setNumber("x", 5);
@@ -99,7 +86,7 @@ class ResetScope {
public:
void run() {
/* Currently reset does not clear data in v8 or spidermonkey scopes. See SECURITY-10
- unique_ptr<Scope> s;
+ std::unique_ptr<Scope> s;
s.reset( (getGlobalScriptEngine()->*scopeFactory)() );
s->setBoolean( "x" , true );
@@ -116,7 +103,7 @@ class FalseTests {
public:
void run() {
// Test falsy javascript values
- unique_ptr<Scope> s;
+ std::unique_ptr<Scope> s;
s.reset((getGlobalScriptEngine()->*scopeFactory)());
ASSERT(!s->getBoolean("notSet"));
@@ -140,7 +127,7 @@ template <ScopeFactory scopeFactory>
class SimpleFunctions {
public:
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
s->invoke("x=5;", nullptr, nullptr);
ASSERT(5 == s->getNumber("x"));
@@ -169,7 +156,7 @@ template <ScopeFactory scopeFactory>
class ObjectMapping {
public:
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
BSONObj o = BSON("x" << 17.0 << "y"
<< "eliot"
@@ -227,7 +214,7 @@ template <ScopeFactory scopeFactory>
class ObjectDecoding {
public:
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
s->invoke("z = { num : 1 };", nullptr, nullptr);
BSONObj out = s->getObject("z");
@@ -251,7 +238,7 @@ class JSOIDTests {
public:
void run() {
#ifdef MOZJS
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
s->localConnect("blah");
@@ -283,7 +270,7 @@ template <ScopeFactory scopeFactory>
class SetImplicit {
public:
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
BSONObj o = BSON("foo"
<< "bar");
@@ -306,7 +293,7 @@ template <ScopeFactory scopeFactory>
class ObjectModReadonlyTests {
public:
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
BSONObj o = BSON("x" << 17 << "y"
<< "eliot"
@@ -345,7 +332,7 @@ template <ScopeFactory scopeFactory>
class OtherJSTypes {
public:
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
{
// date
@@ -390,8 +377,8 @@ public:
s->invoke("z = { a : x.r };", nullptr, nullptr);
BSONObj out = s->getObject("z");
- ASSERT_EQUALS((string) "^a", out["a"].regex());
- ASSERT_EQUALS((string) "i", out["a"].regexFlags());
+ ASSERT_EQUALS((std::string) "^a", out["a"].regex());
+ ASSERT_EQUALS((std::string) "i", out["a"].regexFlags());
// This regex used to cause a segfault because x isn't a valid flag for a js RegExp.
// Now it throws a JS exception.
@@ -443,7 +430,7 @@ template <ScopeFactory scopeFactory>
class SpecialDBTypes {
public:
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
BSONObjBuilder b;
b.appendTimestamp("a", 123456789);
@@ -478,7 +465,7 @@ template <ScopeFactory scopeFactory>
class TypeConservation {
public:
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
// -- A --
@@ -577,7 +564,7 @@ template <ScopeFactory scopeFactory>
class NumberLong {
public:
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
BSONObjBuilder b;
long long val = (long long)(0xbabadeadbeefbaddULL);
b.append("a", val);
@@ -590,15 +577,15 @@ public:
out = s->getObject("b");
ASSERT_EQUALS(mongo::NumberLong, out.firstElement().type());
if (val != out.firstElement().numberLong()) {
- cout << val << endl;
- cout << out.firstElement().numberLong() << endl;
- cout << out.toString() << endl;
+ std::cout << val << std::endl;
+ std::cout << out.firstElement().numberLong() << std::endl;
+ std::cout << out.toString() << std::endl;
ASSERT_EQUALS(val, out.firstElement().numberLong());
}
ASSERT(s->exec("c = {c:a.a.toString()}", "foo", false, true, false));
out = s->getObject("c");
- stringstream ss;
+ std::stringstream ss;
ss << "NumberLong(\"" << val << "\")";
ASSERT_EQUALS(ss.str(), out.firstElement().str());
@@ -637,7 +624,7 @@ template <ScopeFactory scopeFactory>
class NumberLong2 {
public:
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
BSONObj in;
{
@@ -653,9 +640,9 @@ public:
s->setObject("a", in);
ASSERT(s->exec("x = tojson( a ); ", "foo", false, true, false));
- string outString = s->getString("x");
+ std::string outString = s->getString("x");
- ASSERT(s->exec((string) "y = " + outString, "foo2", false, true, false));
+ ASSERT(s->exec((std::string) "y = " + outString, "foo2", false, true, false));
BSONObj out = s->getObject("y");
ASSERT_BSONOBJ_EQ(in, out);
}
@@ -665,7 +652,7 @@ template <ScopeFactory scopeFactory>
class NumberLongUnderLimit {
public:
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
BSONObjBuilder b;
// limit is 2^53
@@ -680,15 +667,15 @@ public:
out = s->getObject("b");
ASSERT_EQUALS(mongo::NumberLong, out.firstElement().type());
if (val != out.firstElement().numberLong()) {
- cout << val << endl;
- cout << out.firstElement().numberLong() << endl;
- cout << out.toString() << endl;
+ std::cout << val << std::endl;
+ std::cout << out.firstElement().numberLong() << std::endl;
+ std::cout << out.toString() << std::endl;
ASSERT_EQUALS(val, out.firstElement().numberLong());
}
ASSERT(s->exec("c = {c:a.a.toString()}", "foo", false, true, false));
out = s->getObject("c");
- stringstream ss;
+ std::stringstream ss;
ss << "NumberLong(\"" << val << "\")";
ASSERT_EQUALS(ss.str(), out.firstElement().str());
@@ -712,7 +699,7 @@ template <ScopeFactory scopeFactory>
class NumberDecimal {
public:
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
BSONObjBuilder b;
Decimal128 val = Decimal128("2.010");
b.append("a", val);
@@ -729,10 +716,10 @@ public:
ASSERT_EQUALS(mongo::NumberDecimal, out.firstElement().type());
ASSERT_TRUE(val.isEqual(out.firstElement().numberDecimal()));
- // Test that the appropriate string output is generated
+ // Test that the appropriatestd::string output is generated
ASSERT(s->exec("c = {c:a.a.toString()}", "foo", false, true, false));
out = s->getObject("c");
- stringstream ss;
+ std::stringstream ss;
ss << "NumberDecimal(\"" << val.toString() << "\")";
ASSERT_EQUALS(ss.str(), out.firstElement().str());
}
@@ -742,7 +729,7 @@ template <ScopeFactory scopeFactory>
class NumberDecimalGetFromScope {
public:
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
ASSERT(s->exec("a = 5;", "a", false, true, false));
ASSERT_TRUE(Decimal128(5).isEqual(s->getNumberDecimal("a")));
}
@@ -752,7 +739,7 @@ template <ScopeFactory scopeFactory>
class NumberDecimalBigObject {
public:
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
BSONObj in;
{
@@ -768,9 +755,9 @@ public:
s->setObject("a", in);
ASSERT(s->exec("x = tojson( a ); ", "foo", false, true, false));
- string outString = s->getString("x");
+ std::string outString = s->getString("x");
- ASSERT(s->exec((string) "y = " + outString, "foo2", false, true, false));
+ ASSERT(s->exec((std::string) "y = " + outString, "foo2", false, true, false));
BSONObj out = s->getObject("y");
ASSERT_BSONOBJ_EQ(in, out);
}
@@ -780,7 +767,7 @@ template <ScopeFactory scopeFactory>
class MaxTimestamp {
public:
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
// Timestamp 't' component can exceed max for int32_t.
BSONObj in;
@@ -810,7 +797,7 @@ public:
}
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
for (int i = 5; i < 100; i += 10) {
s->setObject("a", build(i), false);
@@ -829,7 +816,7 @@ template <ScopeFactory scopeFactory>
class ExecTimeout {
public:
void run() {
- unique_ptr<Scope> scope((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> scope((getGlobalScriptEngine()->*scopeFactory)());
// assert timeout occurred
ASSERT(!scope->exec("var a = 1; while (true) { ; }", "ExecTimeout", false, true, false, 1));
@@ -843,7 +830,7 @@ template <ScopeFactory scopeFactory>
class ExecNoTimeout {
public:
void run() {
- unique_ptr<Scope> scope((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> scope((getGlobalScriptEngine()->*scopeFactory)());
// assert no timeout occurred
ASSERT(scope->exec("var a = function() { return 1; }",
@@ -862,7 +849,7 @@ template <ScopeFactory scopeFactory>
class InvokeTimeout {
public:
void run() {
- unique_ptr<Scope> scope((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> scope((getGlobalScriptEngine()->*scopeFactory)());
// scope timeout after 500ms
bool caught = false;
@@ -942,7 +929,7 @@ template <ScopeFactory scopeFactory>
class InvokeNoTimeout {
public:
void run() {
- unique_ptr<Scope> scope((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> scope((getGlobalScriptEngine()->*scopeFactory)());
// invoke completes before timeout
scope->invokeSafe(
@@ -972,8 +959,8 @@ public:
private:
void check(const BSONObj& one, const BSONObj& two) {
if (one.woCompare(two) != 0) {
- static string fail =
- string("Assertion failure expected ") + one.toString() + ", got " + two.toString();
+ static std::string fail = std::string("Assertion failure expected ") + one.toString() +
+ ", got " + two.toString();
FAIL(fail.c_str());
}
}
@@ -998,15 +985,15 @@ public:
void pp(const char* s, BSONElement e) {
int len;
const char* data = e.binData(len);
- cout << s << ":" << e.binDataType() << "\t" << len << endl;
- cout << "\t";
+ std::cout << s << ":" << e.binDataType() << "\t" << len << std::endl;
+ std::cout << "\t";
for (int i = 0; i < len; i++)
- cout << (int)(data[i]) << " ";
- cout << endl;
+ std::cout << (int)(data[i]) << " ";
+ std::cout << std::endl;
}
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
const char* foo = "asdas\0asdasd";
const char* base64 = "YXNkYXMAYXNkYXNk";
@@ -1031,14 +1018,14 @@ public:
// check that BinData js class is utilized
s->invokeSafe("q = x.b.toString();", nullptr, nullptr);
- stringstream expected;
+ std::stringstream expected;
expected << "BinData(" << BinDataGeneral << ",\"" << base64 << "\")";
ASSERT_EQUALS(expected.str(), s->getString("q"));
- stringstream scriptBuilder;
+ std::stringstream scriptBuilder;
scriptBuilder << "z = { c : new BinData( " << BinDataGeneral << ", \"" << base64
<< "\" ) };";
- string script = scriptBuilder.str();
+ std::string script = scriptBuilder.str();
s->invokeSafe(script.c_str(), nullptr, nullptr);
out = s->getObject("z");
// pp( "out" , out["c"] );
@@ -1057,7 +1044,7 @@ template <ScopeFactory scopeFactory>
class VarTests {
public:
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
ASSERT(s->exec("a = 5;", "a", false, true, false));
ASSERT_EQUALS(5, s->getNumber("a"));
@@ -1074,7 +1061,7 @@ public:
BSONObj start = BSON("x" << 5.0);
BSONObj empty;
- unique_ptr<Scope> s;
+ std::unique_ptr<Scope> s;
s.reset((getGlobalScriptEngine()->*scopeFactory)());
ScriptingFunction f = s->createFunction("return this.x + 6;");
@@ -1085,7 +1072,7 @@ public:
s->invoke(f, &empty, &start);
ASSERT_EQUALS(11, s->getNumber("__returnValue"));
}
- // cout << "speed1: " << ( n / t.millis() ) << " ops/ms" << endl;
+ // std::cout << "speed1: " << ( n / t.millis() ) << " ops/ms" << std::endl;
}
};
@@ -1093,7 +1080,7 @@ template <ScopeFactory scopeFactory>
class ScopeOut {
public:
void run() {
- unique_ptr<Scope> s;
+ std::unique_ptr<Scope> s;
s.reset((getGlobalScriptEngine()->*scopeFactory)());
s->invokeSafe("x = 5;", nullptr, nullptr);
@@ -1120,7 +1107,7 @@ template <ScopeFactory scopeFactory>
class RenameTest {
public:
void run() {
- unique_ptr<Scope> s;
+ std::unique_ptr<Scope> s;
s.reset((getGlobalScriptEngine()->*scopeFactory)());
s->setNumber("x", 5);
@@ -1164,7 +1151,7 @@ public:
0xff,
0,
};
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
s->setObject("val", BSONObj(reinterpret_cast<char*>(bits)).getOwned());
@@ -1177,7 +1164,7 @@ template <ScopeFactory scopeFactory>
class NoReturnSpecified {
public:
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
s->invoke("x=5;", nullptr, nullptr);
ASSERT_EQUALS(5, s->getNumber("__returnValue"));
@@ -1232,7 +1219,7 @@ public:
}
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
s->injectNative("foo", callback, s.get());
s->invoke("var x = 1; foo();", nullptr, nullptr);
@@ -1244,7 +1231,7 @@ template <ScopeFactory scopeFactory>
class ErrorCodeFromInvoke {
public:
void run() {
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
{
bool threwException = false;
@@ -1287,7 +1274,7 @@ public:
return {};
};
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
s->injectNative("foo", sidecarThrowingFunc);
@@ -1313,14 +1300,14 @@ public:
// Ensure that by default we can bind owned and unowned
{
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
s->setObject("unowned", unowned, true);
s->setObject("owned", owned, true);
}
// After we set the flag, we should only be able to set owned
{
- unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
s->requireOwnedObjects();
s->setObject("owned", owned, true);
@@ -1347,7 +1334,7 @@ public:
template <ScopeFactory scopeFactory>
class ConvertShardKeyToHashed {
public:
- void check(shared_ptr<Scope> s, const mongo::BSONObj& o) {
+ void check(std::shared_ptr<Scope> s, const mongo::BSONObj& o) {
s->setObject("o", o, true);
s->invoke("return convertShardKeyToHashed(o);", nullptr, nullptr);
const auto scopeShardKey = s->getNumber("__returnValue");
@@ -1361,17 +1348,17 @@ public:
ASSERT_EQUALS(scopeShardKey, trueShardKey);
}
- void checkNoArgs(shared_ptr<Scope> s) {
+ void checkNoArgs(std::shared_ptr<Scope> s) {
s->invoke("return convertShardKeyToHashed();", nullptr, nullptr);
}
- void checkWithExtraArg(shared_ptr<Scope> s, const mongo::BSONObj& o, int seed) {
+ void checkWithExtraArg(std::shared_ptr<Scope> s, const mongo::BSONObj& o, int seed) {
s->setObject("o", o, true);
s->invoke("return convertShardKeyToHashed(o, 1);", nullptr, nullptr);
}
void run() {
- shared_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
+ std::shared_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
shell_utils::installShellUtils(*s);
// Check a few elementary objects
@@ -1399,7 +1386,7 @@ template <ScopeFactory scopeFactory>
class BasicAsyncJS {
public:
void run() {
- unique_ptr<Scope> scope((getGlobalScriptEngine()->*scopeFactory)());
+ std::unique_ptr<Scope> scope((getGlobalScriptEngine()->*scopeFactory)());
scope->setNumber("x", 0);
/* The async code will get run after the return, so
@@ -1483,3 +1470,4 @@ public:
OldStyleSuiteInitializer<All> myall;
} // namespace JSTests
+} // namespace mongo
diff --git a/src/mongo/dbtests/multikey_paths_test.cpp b/src/mongo/dbtests/multikey_paths_test.cpp
index 34afd00fa0e..6e7a9e80520 100644
--- a/src/mongo/dbtests/multikey_paths_test.cpp
+++ b/src/mongo/dbtests/multikey_paths_test.cpp
@@ -27,11 +27,7 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
-#include <iostream>
-#include <string>
-
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/client.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/index/index_descriptor.h"
@@ -140,8 +136,9 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnIndexCreation) {
WriteUnitOfWork wuow(_opCtx.get());
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(collection->insertDocument(
+ ASSERT_OK(collection_internal::insertDocument(
_opCtx.get(),
+ *collection,
InsertStatement(BSON("_id" << 0 << "a" << 5 << "b" << BSON_ARRAY(1 << 2 << 3))),
nullOpDebug));
wuow.commit();
@@ -164,12 +161,14 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnIndexCreationWithMultipleDocuments) {
WriteUnitOfWork wuow(_opCtx.get());
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(collection->insertDocument(
+ ASSERT_OK(collection_internal::insertDocument(
_opCtx.get(),
+ *collection,
InsertStatement(BSON("_id" << 0 << "a" << 5 << "b" << BSON_ARRAY(1 << 2 << 3))),
nullOpDebug));
- ASSERT_OK(collection->insertDocument(
+ ASSERT_OK(collection_internal::insertDocument(
_opCtx.get(),
+ *collection,
InsertStatement(BSON("_id" << 1 << "a" << BSON_ARRAY(1 << 2 << 3) << "b" << 5)),
nullOpDebug));
wuow.commit();
@@ -198,8 +197,9 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentInsert) {
{
WriteUnitOfWork wuow(_opCtx.get());
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(collection->insertDocument(
+ ASSERT_OK(collection_internal::insertDocument(
_opCtx.get(),
+ *collection,
InsertStatement(BSON("_id" << 0 << "a" << 5 << "b" << BSON_ARRAY(1 << 2 << 3))),
nullOpDebug));
wuow.commit();
@@ -210,8 +210,9 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentInsert) {
{
WriteUnitOfWork wuow(_opCtx.get());
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(collection->insertDocument(
+ ASSERT_OK(collection_internal::insertDocument(
_opCtx.get(),
+ *collection,
InsertStatement(BSON("_id" << 1 << "a" << BSON_ARRAY(1 << 2 << 3) << "b" << 5)),
nullOpDebug));
wuow.commit();
@@ -232,8 +233,8 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentUpdate) {
{
WriteUnitOfWork wuow(_opCtx.get());
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(collection->insertDocument(
- _opCtx.get(), InsertStatement(BSON("_id" << 0 << "a" << 5)), nullOpDebug));
+ ASSERT_OK(collection_internal::insertDocument(
+ _opCtx.get(), *collection, InsertStatement(BSON("_id" << 0 << "a" << 5)), nullOpDebug));
wuow.commit();
}
@@ -278,8 +279,9 @@ TEST_F(MultikeyPathsTest, PathsNotUpdatedOnDocumentDelete) {
{
WriteUnitOfWork wuow(_opCtx.get());
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(collection->insertDocument(
+ ASSERT_OK(collection_internal::insertDocument(
_opCtx.get(),
+ *collection,
InsertStatement(BSON("_id" << 0 << "a" << 5 << "b" << BSON_ARRAY(1 << 2 << 3))),
nullOpDebug));
wuow.commit();
@@ -322,8 +324,9 @@ TEST_F(MultikeyPathsTest, PathsUpdatedForMultipleIndexesOnDocumentInsert) {
{
WriteUnitOfWork wuow(_opCtx.get());
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(collection->insertDocument(
+ ASSERT_OK(collection_internal::insertDocument(
_opCtx.get(),
+ *collection,
InsertStatement(
BSON("_id" << 0 << "a" << BSON_ARRAY(1 << 2 << 3) << "b" << 5 << "c" << 8)),
nullOpDebug));
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
index b22fee7e066..c5d41844fa6 100644
--- a/src/mongo/dbtests/pdfiletests.cpp
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -27,22 +27,17 @@
* it in the license file.
*/
-/**
- * pdfile unit tests
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/client.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/json.h"
#include "mongo/db/ops/insert.h"
#include "mongo/dbtests/dbtests.h"
+namespace mongo {
namespace PdfileTests {
-
namespace Insert {
+
class Base {
public:
Base() : _lk(&_opCtx), _context(&_opCtx, nss()) {}
@@ -82,13 +77,15 @@ public:
}
ASSERT(coll);
OpDebug* const nullOpDebug = nullptr;
- ASSERT(!coll->insertDocument(&_opCtx, InsertStatement(x), nullOpDebug, true).isOK());
+ ASSERT_NOT_OK(collection_internal::insertDocument(
+ &_opCtx, coll, InsertStatement(x), nullOpDebug, true));
StatusWith<BSONObj> fixed = fixDocumentForInsert(&_opCtx, x);
ASSERT(fixed.isOK());
x = fixed.getValue();
ASSERT(x["_id"].type() == jstOID);
- ASSERT(coll->insertDocument(&_opCtx, InsertStatement(x), nullOpDebug, true).isOK());
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll, InsertStatement(x), nullOpDebug, true));
wunit.commit();
}
};
@@ -171,3 +168,4 @@ public:
OldStyleSuiteInitializer<All> myall;
} // namespace PdfileTests
+} // namespace mongo
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index a503185daf2..a9be3d8c35b 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -31,11 +31,6 @@
* This file tests db/query/plan_ranker.cpp and db/query/multi_plan_runner.cpp.
*/
-#include "mongo/platform/basic.h"
-
-#include <iostream>
-#include <memory>
-
#include "mongo/client/dbclient_cursor.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
@@ -67,13 +62,8 @@ extern AtomicWord<int> internalQueryMaxBlockingSortMemoryUsageBytes;
extern AtomicWord<int> internalQueryPlanEvaluationMaxResults;
-} // namespace mongo
-
namespace PlanRankingTests {
-using std::unique_ptr;
-using std::vector;
-
static const NamespaceString nss("unittests.PlanRankingTests");
class PlanRankingTestBase {
@@ -128,7 +118,7 @@ public:
// Fill out the MPR.
_mps.reset(new MultiPlanStage(_expCtx.get(), collection.getCollection(), cq));
- unique_ptr<WorkingSet> ws(new WorkingSet());
+ std::unique_ptr<WorkingSet> ws(new WorkingSet());
// Put each solution from the planner into the MPR.
for (size_t i = 0; i < solutions.size(); ++i) {
auto&& root = stage_builder::buildClassicExecutableTree(
@@ -181,7 +171,7 @@ private:
// of the test.
bool _enableHashIntersection;
- unique_ptr<MultiPlanStage> _mps;
+ std::unique_ptr<MultiPlanStage> _mps;
DBDirectClient _client;
};
@@ -236,7 +226,7 @@ public:
findCommand->setSort(BSON("d" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand));
ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+ std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(cq);
auto soln = pickBestPlan(cq.get());
@@ -285,7 +275,7 @@ public:
addIndex(BSON("a" << 1));
addIndex(BSON("b" << 1));
- unique_ptr<CanonicalQuery> cq;
+ std::unique_ptr<CanonicalQuery> cq;
// Run the query {a:4, b:1}.
{
@@ -348,7 +338,7 @@ public:
findCommand->setFilter(BSON("a" << 1 << "b" << BSON("$gt" << 1)));
auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand));
verify(statusWithCQ.isOK());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+ std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(nullptr != cq.get());
// Turn on the "force intersect" option.
@@ -389,7 +379,7 @@ public:
findCommand->setProjection(BSON("_id" << 0 << "a" << 1 << "b" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand));
ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+ std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(nullptr != cq.get());
auto soln = pickBestPlan(cq.get());
@@ -424,7 +414,7 @@ public:
findCommand->setFilter(BSON("a" << 1 << "b" << 1 << "c" << 99));
auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand));
ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+ std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(nullptr != cq.get());
auto soln = pickBestPlan(cq.get());
@@ -463,7 +453,7 @@ public:
auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand));
ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+ std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(nullptr != cq.get());
auto soln = pickBestPlan(cq.get());
@@ -497,7 +487,7 @@ public:
findCommand->setFilter(BSON("a" << N + 1 << "b" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand));
verify(statusWithCQ.isOK());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+ std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(nullptr != cq.get());
// {a: 100} is super selective so choose that.
@@ -534,7 +524,7 @@ public:
findCommand->setFilter(BSON("a" << BSON("$gte" << N + 1) << "b" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand));
verify(statusWithCQ.isOK());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+ std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(nullptr != cq.get());
// {a: 100} is super selective so choose that.
@@ -565,7 +555,7 @@ public:
findCommand->setSort(BSON("c" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand));
ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+ std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
auto soln = pickBestPlan(cq.get());
@@ -595,7 +585,7 @@ public:
findCommand->setFilter(BSON("foo" << 2001));
auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand));
verify(statusWithCQ.isOK());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+ std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(nullptr != cq.get());
auto soln = pickBestPlan(cq.get());
@@ -630,7 +620,7 @@ public:
findCommand->setSort(BSON("d" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand));
ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+ std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(nullptr != cq.get());
// No results will be returned during the trial period,
@@ -668,7 +658,7 @@ public:
findCommand->setFilter(fromjson("{a: 1, b: 1, c: {$gte: 5000}}"));
auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand));
ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+ std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(nullptr != cq.get());
// Use index on 'b'.
@@ -701,7 +691,7 @@ public:
findCommand->setFilter(fromjson("{a: 9, b: {$ne: 10}, c: 9}"));
auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand));
ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+ std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(nullptr != cq.get());
// Expect to use index {a: 1, b: 1}.
@@ -736,3 +726,4 @@ public:
OldStyleSuiteInitializer<All> planRankingAll;
} // namespace PlanRankingTests
+} // namespace mongo
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index 26ab912dcff..5b92c768874 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -27,12 +27,8 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
-#include <memory>
-
#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/client.h"
@@ -53,6 +49,7 @@
#include "mongo/db/query/query_planner_params.h"
#include "mongo/dbtests/dbtests.h"
+namespace mongo {
namespace QueryStageCachedPlan {
static const NamespaceString nss("unittests.QueryStageCachedPlan");
@@ -116,7 +113,8 @@ public:
WriteUnitOfWork wuow(&_opCtx);
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(collection->insertDocument(&_opCtx, InsertStatement(obj), nullOpDebug));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, collection, InsertStatement(obj), nullOpDebug));
wuow.commit();
}
@@ -550,3 +548,4 @@ TEST_F(QueryStageCachedPlan, DoesNotThrowOnYieldRecoveryWhenIndexIsDroppedAferPl
}
} // namespace QueryStageCachedPlan
+} // namespace mongo
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index 0b2fccc32b0..fa180a525b6 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -31,11 +31,7 @@
* This file tests db/exec/collection_scan.cpp.
*/
-
-#include "mongo/platform/basic.h"
-
#include <fmt/printf.h>
-#include <memory>
#include "mongo/client/dbclient_cursor.h"
#include "mongo/db/catalog/clustered_collection_options_gen.h"
@@ -56,17 +52,13 @@
#include "mongo/db/storage/record_store.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/logv2/log.h"
-#include "mongo/unittest/unittest.h"
#include "mongo/util/fail_point.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
+namespace mongo {
namespace query_stage_collection_scan {
-using std::unique_ptr;
-using std::vector;
-
static const NamespaceString nss{"unittests.QueryStageCollectionScan"};
//
@@ -106,11 +98,11 @@ public:
StatusWithMatchExpression statusWithMatcher =
MatchExpressionParser::parse(filterObj, _expCtx);
verify(statusWithMatcher.isOK());
- unique_ptr<MatchExpression> filterExpr = std::move(statusWithMatcher.getValue());
+ std::unique_ptr<MatchExpression> filterExpr = std::move(statusWithMatcher.getValue());
// Make a scan and have the runner own it.
- unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
- unique_ptr<PlanStage> ps = std::make_unique<CollectionScan>(
+ std::unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
+ std::unique_ptr<PlanStage> ps = std::make_unique<CollectionScan>(
_expCtx.get(), collection.getCollection(), params, ws.get(), filterExpr.get());
auto statusWithPlanExecutor =
@@ -135,14 +127,14 @@ public:
void getRecordIds(const CollectionPtr& collection,
CollectionScanParams::Direction direction,
- vector<RecordId>* out) {
+ std::vector<RecordId>* out) {
WorkingSet ws;
CollectionScanParams params;
params.direction = direction;
params.tailable = false;
- unique_ptr<CollectionScan> scan(
+ std::unique_ptr<CollectionScan> scan(
new CollectionScan(_expCtx.get(), collection, params, &ws, nullptr));
while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
@@ -207,7 +199,9 @@ public:
_client.insert(ns.ns(), doc);
}
- void insertDocuments(const NamespaceString& ns, const vector<BSONObj>& docs, bool ordered) {
+ void insertDocuments(const NamespaceString& ns,
+ const std::vector<BSONObj>& docs,
+ bool ordered) {
_client.insert(ns.ns(), docs, ordered);
}
@@ -266,7 +260,7 @@ public:
boost::optional<RecordIdBound> minRecord,
boost::optional<RecordIdBound> maxRecord,
CollectionScanParams::ScanBoundInclusion boundInclusion,
- const vector<BSONObj>& expectedResults,
+ const std::vector<BSONObj>& expectedResults,
const MatchExpression* filter = nullptr) {
AutoGetCollectionForRead autoColl(&_opCtx, ns);
@@ -351,8 +345,8 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanObjectsInOrderForward) {
params.tailable = false;
// Make a scan and have the runner own it.
- unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
- unique_ptr<PlanStage> ps = std::make_unique<CollectionScan>(
+ std::unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
+ std::unique_ptr<PlanStage> ps = std::make_unique<CollectionScan>(
_expCtx.get(), collection.getCollection(), params, ws.get(), nullptr);
auto statusWithPlanExecutor =
@@ -384,8 +378,8 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanObjectsInOrderBackward) {
params.direction = CollectionScanParams::BACKWARD;
params.tailable = false;
- unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
- unique_ptr<PlanStage> ps = std::make_unique<CollectionScan>(
+ std::unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
+ std::unique_ptr<PlanStage> ps = std::make_unique<CollectionScan>(
_expCtx.get(), collection.getCollection(), params, ws.get(), nullptr);
auto statusWithPlanExecutor =
@@ -416,7 +410,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObject) {
const CollectionPtr& coll = ctx.getCollection();
// Get the RecordIds that would be returned by an in-order scan.
- vector<RecordId> recordIds;
+ std::vector<RecordId> recordIds;
getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds);
// Configure the scan.
@@ -425,7 +419,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObject) {
params.tailable = false;
WorkingSet ws;
- unique_ptr<PlanStage> scan(new CollectionScan(_expCtx.get(), coll, params, &ws, nullptr));
+ std::unique_ptr<PlanStage> scan(new CollectionScan(_expCtx.get(), coll, params, &ws, nullptr));
int count = 0;
while (count < 10) {
@@ -469,7 +463,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObjectBackw
const CollectionPtr& coll = ctx.getCollection();
// Get the RecordIds that would be returned by an in-order scan.
- vector<RecordId> recordIds;
+ std::vector<RecordId> recordIds;
getRecordIds(coll, CollectionScanParams::BACKWARD, &recordIds);
// Configure the scan.
@@ -478,7 +472,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObjectBackw
params.tailable = false;
WorkingSet ws;
- unique_ptr<PlanStage> scan(new CollectionScan(_expCtx.get(), coll, params, &ws, nullptr));
+ std::unique_ptr<PlanStage> scan(new CollectionScan(_expCtx.get(), coll, params, &ws, nullptr));
int count = 0;
while (count < 10) {
@@ -521,7 +515,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekSuc
AutoGetCollectionForReadCommand collection(&_opCtx, nss);
// Get the RecordIds that would be returned by an in-order scan.
- vector<RecordId> recordIds;
+ std::vector<RecordId> recordIds;
getRecordIds(collection.getCollection(), CollectionScanParams::FORWARD, &recordIds);
// We will resume the collection scan this many results in.
@@ -535,8 +529,8 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekSuc
params.resumeAfterRecordId = recordIds[offset - 1];
// Create plan stage.
- unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
- unique_ptr<PlanStage> ps = std::make_unique<CollectionScan>(
+ std::unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
+ std::unique_ptr<PlanStage> ps = std::make_unique<CollectionScan>(
_expCtx.get(), collection.getCollection(), params, ws.get(), nullptr);
WorkingSetID id = WorkingSet::INVALID_ID;
@@ -572,7 +566,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekFai
auto coll = ctx.getCollection();
// Get the RecordIds that would be returned by an in-order scan.
- vector<RecordId> recordIds;
+ std::vector<RecordId> recordIds;
getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds);
// We will resume the collection scan this many results in.
@@ -589,8 +583,8 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekFai
params.resumeAfterRecordId = recordId;
// Create plan stage.
- unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
- unique_ptr<PlanStage> ps =
+ std::unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
+ std::unique_ptr<PlanStage> ps =
std::make_unique<CollectionScan>(_expCtx.get(), coll, params, ws.get(), nullptr);
WorkingSetID id = WorkingSet::INVALID_ID;
@@ -608,7 +602,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredMinMax) {
ASSERT(coll->isClustered());
// Get the RecordIds that would be returned by an in-order scan.
- vector<RecordId> recordIds;
+ std::vector<RecordId> recordIds;
getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds);
ASSERT(recordIds.size());
@@ -813,7 +807,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredReverse) {
ASSERT(coll->isClustered());
// Get the RecordIds that would be returned by a backwards scan.
- vector<RecordId> recordIds;
+ std::vector<RecordId> recordIds;
getRecordIds(coll, CollectionScanParams::BACKWARD, &recordIds);
ASSERT(recordIds.size());
@@ -857,7 +851,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredMinMaxFullObjectI
ASSERT(coll->isClustered());
// Get the RecordIds that would be returned by an in-order scan.
- vector<RecordId> recordIds;
+ std::vector<RecordId> recordIds;
getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds);
ASSERT(recordIds.size());
@@ -901,7 +895,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInnerRange) {
ASSERT(coll->isClustered());
// Get the RecordIds that would be returned by an in-order scan.
- vector<RecordId> recordIds;
+ std::vector<RecordId> recordIds;
getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds);
ASSERT(recordIds.size());
@@ -950,7 +944,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInnerRangeExclusi
ASSERT(coll->isClustered());
// Get the RecordIds that would be returned by an in-order scan.
- vector<RecordId> recordIds;
+ std::vector<RecordId> recordIds;
getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds);
ASSERT(recordIds.size());
@@ -1015,7 +1009,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInnerRangeExclusi
ASSERT(coll->isClustered());
// Get the RecordIds that would be returned by a reverse scan.
- vector<RecordId> recordIds;
+ std::vector<RecordId> recordIds;
getRecordIds(coll, CollectionScanParams::BACKWARD, &recordIds);
ASSERT(recordIds.size());
@@ -1373,3 +1367,4 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanChangeCollectionGetLatestO
ASSERT_EQUALS(Timestamp(16, 1), scanStage->getLatestOplogTimestamp());
}
} // namespace query_stage_collection_scan
+} // namespace mongo
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 712ad9b7298..67f25307768 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -27,10 +27,7 @@
* it in the license file.
*/
-#include <memory>
-
-#include "mongo/platform/basic.h"
-
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/client.h"
#include "mongo/db/concurrency/d_concurrency.h"
@@ -46,11 +43,9 @@
#include "mongo/db/query/count_command_gen.h"
#include "mongo/dbtests/dbtests.h"
+namespace mongo {
namespace QueryStageCount {
-using std::unique_ptr;
-using std::vector;
-
const int kDocuments = 100;
const int kInterjections = kDocuments;
const NamespaceString kTestNss = NamespaceString("db.dummy");
@@ -97,7 +92,7 @@ public:
params.direction = CollectionScanParams::FORWARD;
params.tailable = false;
- unique_ptr<CollectionScan> scan(
+ std::unique_ptr<CollectionScan> scan(
new CollectionScan(_expCtx.get(), _coll, params, &ws, nullptr));
while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
@@ -113,7 +108,8 @@ public:
void insert(const BSONObj& doc) {
WriteUnitOfWork wunit(&_opCtx);
OpDebug* const nullOpDebug = nullptr;
- _coll->insertDocument(&_opCtx, InsertStatement(doc), nullOpDebug).transitional_ignore();
+ collection_internal::insertDocument(&_opCtx, _coll, InsertStatement(doc), nullOpDebug)
+ .transitional_ignore();
wunit.commit();
}
@@ -150,12 +146,12 @@ public:
setup();
getRecordIds();
- unique_ptr<WorkingSet> ws(new WorkingSet);
+ std::unique_ptr<WorkingSet> ws(new WorkingSet);
StatusWithMatchExpression statusWithMatcher =
MatchExpressionParser::parse(request.getQuery(), _expCtx);
ASSERT(statusWithMatcher.isOK());
- unique_ptr<MatchExpression> expression = std::move(statusWithMatcher.getValue());
+ std::unique_ptr<MatchExpression> expression = std::move(statusWithMatcher.getValue());
PlanStage* scan;
if (indexed) {
@@ -236,7 +232,7 @@ public:
}
protected:
- vector<RecordId> _recordIds;
+ std::vector<RecordId> _recordIds;
const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_opCtxPtr;
Lock::DBLock _dbLock;
@@ -380,3 +376,4 @@ public:
OldStyleSuiteInitializer<All> queryStageCountAll;
} // namespace QueryStageCount
+} // namespace mongo
diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp
index eebc893d2d5..397027be9d2 100644
--- a/src/mongo/dbtests/query_stage_ixscan.cpp
+++ b/src/mongo/dbtests/query_stage_ixscan.cpp
@@ -27,8 +27,7 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/client.h"
#include "mongo/db/db_raii.h"
@@ -39,6 +38,7 @@
#include "mongo/db/json.h"
#include "mongo/dbtests/dbtests.h"
+namespace mongo {
namespace QueryStageIxscan {
namespace {
const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
@@ -73,7 +73,12 @@ public:
void insert(const BSONObj& doc) {
WriteUnitOfWork wunit(&_opCtx);
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(_coll->insertDocument(&_opCtx, InsertStatement(doc), nullOpDebug, false));
+ ASSERT_OK(
+ collection_internal::insertDocument(&_opCtx,
+ CollectionPtr(_coll, CollectionPtr::NoYieldTag{}),
+ InsertStatement(doc),
+ nullOpDebug,
+ false));
wunit.commit();
}
@@ -335,3 +340,4 @@ public:
OldStyleSuiteInitializer<All> aueryStageIxscanAll;
} // namespace QueryStageIxscan
+} // namespace mongo
diff --git a/src/mongo/dbtests/query_stage_near.cpp b/src/mongo/dbtests/query_stage_near.cpp
index e26ac67509c..cce8ad0dab5 100644
--- a/src/mongo/dbtests/query_stage_near.cpp
+++ b/src/mongo/dbtests/query_stage_near.cpp
@@ -44,15 +44,10 @@
#include "mongo/db/exec/queued_data_stage.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/dbtests/dbtests.h"
-#include "mongo/unittest/unittest.h"
+namespace mongo {
namespace {
-using namespace mongo;
-using std::shared_ptr;
-using std::unique_ptr;
-using std::vector;
-
const std::string kTestNamespace = "test.coll";
const BSONObj kTestKeyPattern = BSON("testIndex" << 1);
@@ -101,10 +96,10 @@ protected:
class MockNearStage final : public NearStage {
public:
struct MockInterval {
- MockInterval(const vector<BSONObj>& data, double min, double max)
+ MockInterval(const std::vector<BSONObj>& data, double min, double max)
: data(data), min(min), max(max) {}
- vector<BSONObj> data;
+ std::vector<BSONObj> data;
double min;
double max;
};
@@ -121,7 +116,7 @@ public:
indexDescriptor),
_pos(0) {}
- void addInterval(vector<BSONObj> data, double min, double max) {
+ void addInterval(std::vector<BSONObj> data, double min, double max) {
_intervals.push_back(std::make_unique<MockInterval>(data, min, max));
}
@@ -167,8 +162,8 @@ private:
int _pos;
};
-static vector<BSONObj> advanceStage(PlanStage* stage, WorkingSet* workingSet) {
- vector<BSONObj> results;
+static std::vector<BSONObj> advanceStage(PlanStage* stage, WorkingSet* workingSet) {
+ std::vector<BSONObj> results;
WorkingSetID nextMemberID;
PlanStage::StageState state = PlanStage::NEED_TIME;
@@ -182,9 +177,9 @@ static vector<BSONObj> advanceStage(PlanStage* stage, WorkingSet* workingSet) {
return results;
}
-static void assertAscendingAndValid(const vector<BSONObj>& results) {
+static void assertAscendingAndValid(const std::vector<BSONObj>& results) {
double lastDistance = -1.0;
- for (vector<BSONObj>::const_iterator it = results.begin(); it != results.end(); ++it) {
+ for (std::vector<BSONObj>::const_iterator it = results.begin(); it != results.end(); ++it) {
double distance = (*it)["distance"].numberDouble();
bool shouldInclude = (*it)["$included"].eoo() || (*it)["$included"].trueValue();
ASSERT(shouldInclude);
@@ -194,7 +189,7 @@ static void assertAscendingAndValid(const vector<BSONObj>& results) {
}
TEST_F(QueryStageNearTest, Basic) {
- vector<BSONObj> mockData;
+ std::vector<BSONObj> mockData;
WorkingSet workingSet;
MockNearStage nearStage(_expCtx.get(), &workingSet, getCollection(), _mockGeoIndex);
@@ -223,13 +218,13 @@ TEST_F(QueryStageNearTest, Basic) {
mockData.push_back(BSON("distance" << 3.5)); // Not included
nearStage.addInterval(mockData, 2.0, 3.0);
- vector<BSONObj> results = advanceStage(&nearStage, &workingSet);
+ std::vector<BSONObj> results = advanceStage(&nearStage, &workingSet);
ASSERT_EQUALS(results.size(), 8u);
assertAscendingAndValid(results);
}
TEST_F(QueryStageNearTest, EmptyResults) {
- vector<BSONObj> mockData;
+ std::vector<BSONObj> mockData;
WorkingSet workingSet;
AutoGetCollectionForReadMaybeLockFree autoColl(_opCtx, NamespaceString{kTestNamespace});
@@ -249,8 +244,10 @@ TEST_F(QueryStageNearTest, EmptyResults) {
mockData.push_back(BSON("distance" << 1.0));
nearStage.addInterval(mockData, 1.0, 2.0);
- vector<BSONObj> results = advanceStage(&nearStage, &workingSet);
+ std::vector<BSONObj> results = advanceStage(&nearStage, &workingSet);
ASSERT_EQUALS(results.size(), 3u);
assertAscendingAndValid(results);
}
+
} // namespace
+} // namespace mongo
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index a10c20504aa..1e5407d62b5 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -27,13 +27,11 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include <boost/optional.hpp>
#include <iostream>
#include "mongo/client/dbclient_cursor.h"
-#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/multi_index_block.h"
#include "mongo/db/client.h"
#include "mongo/db/clientcursor.h"
@@ -49,7 +47,6 @@
#include "mongo/db/query/find.h"
#include "mongo/db/service_context.h"
#include "mongo/dbtests/dbtests.h"
-#include "mongo/unittest/unittest.h"
#include "mongo/util/timer.h"
namespace mongo {
@@ -62,7 +59,7 @@ void insertOplogDocument(OperationContext* opCtx, Timestamp ts, const char* ns)
InsertStatement stmt;
stmt.doc = doc;
stmt.oplogSlot = OplogSlot{ts, OplogSlot::kInitialTerm};
- auto status = coll->insertDocument(opCtx, stmt, nullptr);
+ auto status = collection_internal::insertDocument(opCtx, *coll, stmt, nullptr);
if (!status.isOK()) {
std::cout << "Failed to insert oplog document: " << status.toString() << std::endl;
}
@@ -158,10 +155,12 @@ protected:
oid.init();
b.appendOID("_id", &oid);
b.appendElements(o);
- _collection->insertDocument(&_opCtx, InsertStatement(b.obj()), nullOpDebug, false)
+ collection_internal::insertDocument(
+ &_opCtx, _collection, InsertStatement(b.obj()), nullOpDebug, false)
.transitional_ignore();
} else {
- _collection->insertDocument(&_opCtx, InsertStatement(o), nullOpDebug, false)
+ collection_internal::insertDocument(
+ &_opCtx, _collection, InsertStatement(o), nullOpDebug, false)
.transitional_ignore();
}
wunit.commit();
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index 84cf1bc9fbd..272c20d0e64 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -27,12 +27,9 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/bson/mutable/document.h"
#include "mongo/bson/mutable/mutable_bson_test_utils.h"
-#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/client.h"
#include "mongo/db/concurrency/exception_util.h"
@@ -51,15 +48,10 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault
-
namespace mongo {
namespace repl {
namespace ReplTests {
-using std::string;
-using std::unique_ptr;
-using std::vector;
-
/**
* Creates an OplogEntry with given parameters and preset defaults for this test suite.
*/
@@ -223,7 +215,7 @@ protected:
}
void applyAllOperations() {
Lock::GlobalWrite lk(&_opCtx);
- vector<BSONObj> ops;
+ std::vector<BSONObj> ops;
{
DBDirectClient db(&_opCtx);
auto cursor = db.find(FindCommandRequest{NamespaceString{cllNS()}});
@@ -242,10 +234,7 @@ protected:
}
OldClientContext ctx(&_opCtx, nss());
- for (vector<BSONObj>::iterator i = ops.begin(); i != ops.end(); ++i) {
- if (0) {
- LOGV2(22501, "op: {i}", "i"_attr = *i);
- }
+ for (std::vector<BSONObj>::iterator i = ops.begin(); i != ops.end(); ++i) {
repl::UnreplicatedWritesBlock uwb(&_opCtx);
auto entry = uassertStatusOK(OplogEntry::parse(*i));
// Handle the case of batched writes which generate command-type (applyOps) oplog
@@ -316,7 +305,8 @@ protected:
OpDebug* const nullOpDebug = nullptr;
if (o.hasField("_id")) {
repl::UnreplicatedWritesBlock uwb(&_opCtx);
- coll->insertDocument(&_opCtx, InsertStatement(o), nullOpDebug, true)
+ collection_internal::insertDocument(
+ &_opCtx, coll, InsertStatement(o), nullOpDebug, true)
.transitional_ignore();
ASSERT_OK(_opCtx.recoveryUnit()->setTimestamp(nextTimestamp));
wunit.commit();
@@ -329,7 +319,8 @@ protected:
b.appendOID("_id", &id);
b.appendElements(o);
repl::UnreplicatedWritesBlock uwb(&_opCtx);
- coll->insertDocument(&_opCtx, InsertStatement(b.obj()), nullOpDebug, true)
+ collection_internal::insertDocument(
+ &_opCtx, coll, InsertStatement(b.obj()), nullOpDebug, true)
.transitional_ignore();
ASSERT_OK(_opCtx.recoveryUnit()->setTimestamp(nextTimestamp));
wunit.commit();
@@ -446,7 +437,7 @@ class InsertTwo : public Recovering {
public:
InsertTwo() : o_(fromjson("{'_id':1,a:'b'}")), t_(fromjson("{'_id':2,c:'d'}")) {}
void doIt() const {
- vector<BSONObj> v;
+ std::vector<BSONObj> v;
v.push_back(o_);
v.push_back(t_);
_client.insert(ns(), v);
@@ -787,7 +778,7 @@ protected:
class MultiInc : public Recovering {
public:
- string s() const {
+ std::string s() const {
StringBuilder ss;
FindCommandRequest findRequest{NamespaceString{ns()}};
findRequest.setSort(BSON("_id" << 1));
diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp
index 27cc030a169..5058eb3f8c7 100644
--- a/src/mongo/dbtests/rollbacktests.cpp
+++ b/src/mongo/dbtests/rollbacktests.cpp
@@ -27,10 +27,8 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/bson/bsonobj.h"
-#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/drop_collection.h"
#include "mongo/db/catalog/rename_collection.h"
@@ -40,16 +38,11 @@
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/record_id.h"
#include "mongo/dbtests/dbtests.h"
-#include "mongo/unittest/unittest.h"
-
-using mongo::unittest::assertGet;
-using std::list;
-using std::string;
-using std::unique_ptr;
+namespace mongo {
namespace RollbackTests {
-
namespace {
+
const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
void dropDatabase(OperationContext* opCtx, const NamespaceString& nss) {
@@ -61,7 +54,8 @@ void dropDatabase(OperationContext* opCtx, const NamespaceString& nss) {
databaseHolder->dropDb(opCtx, db);
}
}
-bool collectionExists(OperationContext* opCtx, OldClientContext* ctx, const string& ns) {
+
+bool collectionExists(OperationContext* opCtx, OldClientContext* ctx, const std::string& ns) {
return (bool)CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx,
NamespaceString(ns));
}
@@ -78,12 +72,14 @@ void createCollection(OperationContext* opCtx, const NamespaceString& nss) {
uow.commit();
}
}
+
Status renameCollection(OperationContext* opCtx,
const NamespaceString& source,
const NamespaceString& target) {
ASSERT_EQ(source.db(), target.db());
return renameCollection(opCtx, source, target, {});
}
+
Status truncateCollection(OperationContext* opCtx, const NamespaceString& nss) {
CollectionWriter coll(opCtx, nss);
return coll.getWritableCollection(opCtx)->truncate(opCtx);
@@ -92,8 +88,10 @@ Status truncateCollection(OperationContext* opCtx, const NamespaceString& nss) {
void insertRecord(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& data) {
auto coll = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss);
OpDebug* const nullOpDebug = nullptr;
- ASSERT_OK(coll->insertDocument(opCtx, InsertStatement(data), nullOpDebug, false));
+ ASSERT_OK(collection_internal::insertDocument(
+ opCtx, coll, InsertStatement(data), nullOpDebug, false));
}
+
void assertOnlyRecord(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& data) {
auto coll = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss);
auto cursor = coll->getCursor(opCtx);
@@ -104,11 +102,13 @@ void assertOnlyRecord(OperationContext* opCtx, const NamespaceString& nss, const
ASSERT(!cursor->next());
}
+
void assertEmpty(OperationContext* opCtx, const NamespaceString& nss) {
auto coll = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss);
ASSERT(!coll->getCursor(opCtx)->next());
}
-bool indexExists(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) {
+
+bool indexExists(OperationContext* opCtx, const NamespaceString& nss, const std::string& idxName) {
auto coll = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss);
return coll->getIndexCatalog()->findIndexByName(
opCtx,
@@ -116,14 +116,16 @@ bool indexExists(OperationContext* opCtx, const NamespaceString& nss, const stri
IndexCatalog::InclusionPolicy::kReady |
IndexCatalog::InclusionPolicy::kUnfinished) != nullptr;
}
-bool indexReady(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) {
+
+bool indexReady(OperationContext* opCtx, const NamespaceString& nss, const std::string& idxName) {
auto coll = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss);
return coll->getIndexCatalog()->findIndexByName(
opCtx, idxName, IndexCatalog::InclusionPolicy::kReady) != nullptr;
}
+
size_t getNumIndexEntries(OperationContext* opCtx,
const NamespaceString& nss,
- const string& idxName) {
+ const std::string& idxName) {
size_t numEntries = 0;
auto coll = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss);
@@ -144,7 +146,7 @@ size_t getNumIndexEntries(OperationContext* opCtx,
return numEntries;
}
-void dropIndex(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) {
+void dropIndex(OperationContext* opCtx, const NamespaceString& nss, const std::string& idxName) {
CollectionWriter coll(opCtx, nss);
auto desc =
coll.getWritableCollection(opCtx)->getIndexCatalog()->findIndexByName(opCtx, idxName);
@@ -152,6 +154,7 @@ void dropIndex(OperationContext* opCtx, const NamespaceString& nss, const string
ASSERT_OK(coll.getWritableCollection(opCtx)->getIndexCatalog()->dropIndex(
opCtx, coll.getWritableCollection(opCtx), desc));
}
+
} // namespace
template <bool rollback, bool defaultIndexes, bool capped>
@@ -163,7 +166,7 @@ public:
return;
}
- string ns = "unittests.rollback_create_collection";
+ std::string ns = "unittests.rollback_create_collection";
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
NamespaceString nss(ns);
@@ -200,7 +203,7 @@ public:
return;
}
- string ns = "unittests.rollback_drop_collection";
+ std::string ns = "unittests.rollback_drop_collection";
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
NamespaceString nss(ns);
@@ -481,7 +484,7 @@ template <bool rollback>
class CreateIndex {
public:
void run() {
- string ns = "unittests.rollback_create_index";
+ std::string ns = "unittests.rollback_create_index";
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
NamespaceString nss(ns);
@@ -492,7 +495,7 @@ public:
CollectionWriter coll(&opCtx, nss);
- string idxName = "a";
+ std::string idxName = "a";
BSONObj spec = BSON("key" << BSON("a" << 1) << "name" << idxName << "v"
<< static_cast<int>(kIndexVersion));
@@ -523,7 +526,7 @@ template <bool rollback>
class DropIndex {
public:
void run() {
- string ns = "unittests.rollback_drop_index";
+ std::string ns = "unittests.rollback_drop_index";
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
NamespaceString nss(ns);
@@ -534,7 +537,7 @@ public:
CollectionWriter coll(&opCtx, nss);
- string idxName = "a";
+ std::string idxName = "a";
BSONObj spec = BSON("key" << BSON("a" << 1) << "name" << idxName << "v"
<< static_cast<int>(kIndexVersion));
@@ -577,7 +580,7 @@ template <bool rollback>
class CreateDropIndex {
public:
void run() {
- string ns = "unittests.rollback_create_drop_index";
+ std::string ns = "unittests.rollback_create_drop_index";
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
NamespaceString nss(ns);
@@ -587,7 +590,7 @@ public:
AutoGetDb autoDb(&opCtx, nss.dbName(), MODE_X);
CollectionWriter coll(&opCtx, nss);
- string idxName = "a";
+ std::string idxName = "a";
BSONObj spec = BSON("key" << BSON("a" << 1) << "name" << idxName << "v"
<< static_cast<int>(kIndexVersion));
@@ -621,7 +624,7 @@ template <bool rollback>
class CreateCollectionAndIndexes {
public:
void run() {
- string ns = "unittests.rollback_create_collection_and_indexes";
+ std::string ns = "unittests.rollback_create_collection_and_indexes";
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
NamespaceString nss(ns);
@@ -630,9 +633,9 @@ public:
Lock::DBLock dbXLock(&opCtx, nss.dbName(), MODE_X);
OldClientContext ctx(&opCtx, nss);
- string idxNameA = "indexA";
- string idxNameB = "indexB";
- string idxNameC = "indexC";
+ std::string idxNameA = "indexA";
+ std::string idxNameB = "indexB";
+ std::string idxNameC = "indexC";
BSONObj specA = BSON("key" << BSON("a" << 1) << "name" << idxNameA << "v"
<< static_cast<int>(kIndexVersion));
BSONObj specB = BSON("key" << BSON("b" << 1) << "name" << idxNameB << "v"
@@ -719,3 +722,4 @@ public:
OldStyleSuiteInitializer<All> all;
} // namespace RollbackTests
+} // namespace mongo
diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp
index f1fa76d2bb9..cc054d253f7 100644
--- a/src/mongo/dbtests/validate_tests.cpp
+++ b/src/mongo/dbtests/validate_tests.cpp
@@ -27,13 +27,9 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
-#include <cstdint>
-
#include "mongo/db/catalog/clustered_collection_util.h"
-#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/collection_validation.h"
+#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/client.h"
#include "mongo/db/db_raii.h"
@@ -47,10 +43,8 @@
#include "mongo/dbtests/dbtests.h"
#include "mongo/dbtests/storage_debug_util.h"
+namespace mongo {
namespace ValidateTests {
-
-using std::unique_ptr;
-
namespace {
const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
@@ -208,7 +202,7 @@ protected:
bool _full;
bool _background;
const NamespaceString _nss;
- unique_ptr<AutoGetDb> _autoDb;
+ std::unique_ptr<AutoGetDb> _autoDb;
Database* _db;
};
@@ -232,11 +226,11 @@ public:
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
_db->createCollection(&_opCtx, _nss);
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 1)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 1)), nullOpDebug, true));
id1 = coll()->getCursor(&_opCtx)->next()->id;
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 2)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 2)), nullOpDebug, true));
wunit.commit();
}
releaseDb();
@@ -288,11 +282,11 @@ public:
WriteUnitOfWork wunit(&_opCtx);
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
_db->createCollection(&_opCtx, _nss);
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
id1 = coll()->getCursor(&_opCtx)->next()->id;
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
wunit.commit();
}
@@ -354,13 +348,13 @@ public:
WriteUnitOfWork wunit(&_opCtx);
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
_db->createCollection(&_opCtx, _nss);
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
id1 = coll()->getCursor(&_opCtx)->next()->id;
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 3 << "b" << 3)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 3 << "b" << 3)), nullOpDebug, true));
wunit.commit();
}
@@ -413,11 +407,11 @@ public:
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
_db->createCollection(&_opCtx, _nss);
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 1)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 1)), nullOpDebug, true));
id1 = coll()->getCursor(&_opCtx)->next()->id;
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 2)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 2)), nullOpDebug, true));
wunit.commit();
}
releaseDb();
@@ -498,10 +492,13 @@ public:
_db->createCollection(&_opCtx, _nss);
- ASSERT_OK(coll()->insertDocument(&_opCtx, InsertStatement(doc1), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(doc1), nullOpDebug, true));
id1 = coll()->getCursor(&_opCtx)->next()->id;
- ASSERT_OK(coll()->insertDocument(&_opCtx, InsertStatement(doc2), nullOpDebug, true));
- ASSERT_OK(coll()->insertDocument(&_opCtx, InsertStatement(doc3), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(doc2), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(doc3), nullOpDebug, true));
wunit.commit();
}
releaseDb();
@@ -569,13 +566,13 @@ public:
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
_db->createCollection(&_opCtx, _nss);
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
id1 = coll()->getCursor(&_opCtx)->next()->id;
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 3 << "b" << 1)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 3 << "b" << 1)), nullOpDebug, true));
wunit.commit();
}
@@ -628,15 +625,16 @@ public:
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
_db->createCollection(&_opCtx, _nss);
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
id1 = coll()->getCursor(&_opCtx)->next()->id;
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
// Explicitly test that multi-key partial indexes containing documents that
// don't match the filter expression are handled correctly.
- ASSERT_OK(coll()->insertDocument(
+ ASSERT_OK(collection_internal::insertDocument(
&_opCtx,
+ coll(),
InsertStatement(BSON("_id" << 3 << "a" << BSON_ARRAY(-1 << -2 << -3))),
nullOpDebug,
true));
@@ -694,11 +692,12 @@ public:
WriteUnitOfWork wunit(&_opCtx);
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
_db->createCollection(&_opCtx, _nss);
- ASSERT_OK(
- coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << 1 << "x" << 1 << "a" << 2)),
- nullOpDebug,
- true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << 1 << "x" << 1 << "a" << 2)),
+ nullOpDebug,
+ true));
wunit.commit();
}
@@ -753,23 +752,25 @@ public:
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
_db->createCollection(&_opCtx, _nss);
- ASSERT_OK(
- coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << 1 << "a" << 1 << "b" << 4)),
- nullOpDebug,
- true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << 1 << "a" << 1 << "b" << 4)),
+ nullOpDebug,
+ true));
id1 = coll()->getCursor(&_opCtx)->next()->id;
- ASSERT_OK(
- coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << 2 << "a" << 2 << "b" << 5)),
- nullOpDebug,
- true));
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 3 << "a" << 3)), nullOpDebug, true));
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 4 << "b" << 6)), nullOpDebug, true));
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 5 << "c" << 7)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << 2 << "a" << 2 << "b" << 5)),
+ nullOpDebug,
+ true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 3 << "a" << 3)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 4 << "b" << 6)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 5 << "c" << 7)), nullOpDebug, true));
wunit.commit();
}
@@ -835,13 +836,13 @@ public:
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
_db->createCollection(&_opCtx, _nss);
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
id1 = coll()->getCursor(&_opCtx)->next()->id;
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 3 << "b" << 1)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 3 << "b" << 1)), nullOpDebug, true));
wunit.commit();
}
@@ -966,16 +967,18 @@ public:
lockDb(MODE_X);
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(
- coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << 1 << "a" << 1 << "b" << 1)),
- nullOpDebug,
- true));
- ASSERT_OK(
- coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << 2 << "b" << BSON("0" << 1))),
- nullOpDebug,
- true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << 1 << "a" << 1 << "b" << 1)),
+ nullOpDebug,
+ true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << 2 << "b" << BSON("0" << 1))),
+ nullOpDebug,
+ true));
wunit.commit();
}
releaseDb();
@@ -985,13 +988,15 @@ public:
lockDb(MODE_X);
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(coll()->insertDocument(
+ ASSERT_OK(collection_internal::insertDocument(
&_opCtx,
+ coll(),
InsertStatement(BSON("_id" << 3 << "mk_1" << BSON_ARRAY(1 << 2 << 3))),
nullOpDebug,
true));
- ASSERT_OK(coll()->insertDocument(
+ ASSERT_OK(collection_internal::insertDocument(
&_opCtx,
+ coll(),
InsertStatement(BSON("_id" << 4 << "mk_2" << BSON_ARRAY(BSON("e" << 1)))),
nullOpDebug,
true));
@@ -1086,30 +1091,35 @@ public:
lockDb(MODE_X);
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(
- coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << 1 << "a" << 1 << "b" << 1)),
- nullOpDebug,
- true));
- ASSERT_OK(
- coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << 2 << "a" << BSON("w" << 1))),
- nullOpDebug,
- true));
- ASSERT_OK(coll()->insertDocument(
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << 1 << "a" << 1 << "b" << 1)),
+ nullOpDebug,
+ true));
+ ASSERT_OK(collection_internal::insertDocument(
&_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << 2 << "a" << BSON("w" << 1))),
+ nullOpDebug,
+ true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
InsertStatement(BSON("_id" << 3 << "a" << BSON_ARRAY("x" << 1))),
nullOpDebug,
true));
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 4 << "b" << 2)), nullOpDebug, true));
- ASSERT_OK(
- coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << 5 << "b" << BSON("y" << 1))),
- nullOpDebug,
- true));
- ASSERT_OK(coll()->insertDocument(
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 4 << "b" << 2)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
&_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << 5 << "b" << BSON("y" << 1))),
+ nullOpDebug,
+ true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
InsertStatement(BSON("_id" << 6 << "b" << BSON_ARRAY("z" << 1))),
nullOpDebug,
true));
@@ -1182,12 +1192,12 @@ public:
lockDb(MODE_X);
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 3 << "a" << 3)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 3 << "a" << 3)), nullOpDebug, true));
rid = coll()->getCursor(&_opCtx)->next()->id;
wunit.commit();
}
@@ -1276,12 +1286,12 @@ public:
lockDb(MODE_X);
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 3 << "a" << 3)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 3 << "a" << 3)), nullOpDebug, true));
rid = coll()->getCursor(&_opCtx)->next()->id;
wunit.commit();
}
@@ -1391,12 +1401,12 @@ public:
lockDb(MODE_X);
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 3 << "a" << 3)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 3 << "a" << 3)), nullOpDebug, true));
rid = coll()->getCursor(&_opCtx)->next()->id;
wunit.commit();
}
@@ -1486,21 +1496,24 @@ public:
lockDb(MODE_X);
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(
- coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << 1 << "a" << 1 << "b" << 1)),
- nullOpDebug,
- true));
- ASSERT_OK(
- coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << 2 << "a" << 3 << "b" << 3)),
- nullOpDebug,
- true));
- ASSERT_OK(
- coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << 3 << "a" << 6 << "b" << 6)),
- nullOpDebug,
- true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << 1 << "a" << 1 << "b" << 1)),
+ nullOpDebug,
+ true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << 2 << "a" << 3 << "b" << 3)),
+ nullOpDebug,
+ true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << 3 << "a" << 6 << "b" << 6)),
+ nullOpDebug,
+ true));
wunit.commit();
}
releaseDb();
@@ -1660,12 +1673,12 @@ public:
lockDb(MODE_X);
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 3 << "a" << 3)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 3 << "a" << 3)), nullOpDebug, true));
rid = coll()->getCursor(&_opCtx)->next()->id;
wunit.commit();
}
@@ -1839,12 +1852,12 @@ public:
lockDb(MODE_X);
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 3 << "a" << 3)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 2 << "a" << 2)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 3 << "a" << 3)), nullOpDebug, true));
rid = coll()->getCursor(&_opCtx)->next()->id;
wunit.commit();
}
@@ -1977,8 +1990,8 @@ public:
WriteUnitOfWork wunit(&_opCtx);
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
_db->createCollection(&_opCtx, _nss);
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
wunit.commit();
}
@@ -1999,8 +2012,8 @@ public:
BSONObj dupObj = BSON("_id" << 2 << "a" << 1);
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_NOT_OK(
- coll()->insertDocument(&_opCtx, InsertStatement(dupObj), nullOpDebug, true));
+ ASSERT_NOT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(dupObj), nullOpDebug, true));
}
releaseDb();
ensureValidateWorked();
@@ -2197,11 +2210,12 @@ public:
WriteUnitOfWork wunit(&_opCtx);
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
_db->createCollection(&_opCtx, _nss);
- ASSERT_OK(
- coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << 1 << "a" << 1 << "b" << 1)),
- nullOpDebug,
- true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << 1 << "a" << 1 << "b" << 1)),
+ nullOpDebug,
+ true));
wunit.commit();
}
@@ -2234,8 +2248,8 @@ public:
BSONObj dupObj = BSON("_id" << 2 << "a" << 1 << "b" << 1);
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_NOT_OK(
- coll()->insertDocument(&_opCtx, InsertStatement(dupObj), nullOpDebug, true));
+ ASSERT_NOT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(dupObj), nullOpDebug, true));
}
releaseDb();
ensureValidateWorked();
@@ -2437,11 +2451,12 @@ public:
WriteUnitOfWork wunit(&_opCtx);
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
_db->createCollection(&_opCtx, _nss);
- ASSERT_OK(
- coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << 1 << "a" << 1 << "b" << 1)),
- nullOpDebug,
- true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << 1 << "a" << 1 << "b" << 1)),
+ nullOpDebug,
+ true));
rid1 = coll()->getCursor(&_opCtx)->next()->id;
wunit.commit();
}
@@ -2475,8 +2490,8 @@ public:
BSONObj dupObj = BSON("_id" << 2 << "a" << 1 << "b" << 1);
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_NOT_OK(
- coll()->insertDocument(&_opCtx, InsertStatement(dupObj), nullOpDebug, true));
+ ASSERT_NOT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(dupObj), nullOpDebug, true));
}
releaseDb();
ensureValidateWorked();
@@ -2768,7 +2783,8 @@ public:
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
_db->createCollection(&_opCtx, _nss);
- ASSERT_OK(coll()->insertDocument(&_opCtx, InsertStatement(doc), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(doc), nullOpDebug, true));
id1 = coll()->getCursor(&_opCtx)->next()->id;
wunit.commit();
}
@@ -2976,11 +2992,12 @@ public:
lockDb(MODE_X);
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(
- coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << 1 << "a" << 1 << "b" << 1)),
- nullOpDebug,
- true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << 1 << "a" << 1 << "b" << 1)),
+ nullOpDebug,
+ true));
rid = coll()->getCursor(&_opCtx)->next()->id;
wunit.commit();
}
@@ -3112,8 +3129,8 @@ public:
lockDb(MODE_X);
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(coll()->insertDocument(
- &_opCtx, InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(BSON("_id" << 1 << "a" << 1)), nullOpDebug, true));
wunit.commit();
}
@@ -3122,8 +3139,8 @@ public:
BSONObj dupObj = BSON("_id" << 2 << "a" << 1);
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_NOT_OK(
- coll()->insertDocument(&_opCtx, InsertStatement(dupObj), nullOpDebug, true));
+ ASSERT_NOT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(dupObj), nullOpDebug, true));
}
releaseDb();
ensureValidateWorked();
@@ -3515,7 +3532,8 @@ public:
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
_db->createCollection(&_opCtx, _nss);
- ASSERT_OK(coll()->insertDocument(&_opCtx, InsertStatement(doc), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(doc), nullOpDebug, true));
id1 = coll()->getCursor(&_opCtx)->next()->id;
wunit.commit();
}
@@ -3739,7 +3757,8 @@ public:
ASSERT_OK(_db->dropCollection(&_opCtx, _nss));
_db->createCollection(&_opCtx, _nss);
- ASSERT_OK(coll()->insertDocument(&_opCtx, InsertStatement(doc1), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(doc1), nullOpDebug, true));
id1 = coll()->getCursor(&_opCtx)->next()->id;
wunit.commit();
}
@@ -3985,7 +4004,8 @@ public:
OpDebug* const nullOpDebug = nullptr;
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(coll()->insertDocument(&_opCtx, InsertStatement(doc1), nullOpDebug, true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx, coll(), InsertStatement(doc1), nullOpDebug, true));
id1 = coll()->getCursor(&_opCtx)->next()->id;
wunit.commit();
}
@@ -4157,19 +4177,24 @@ public:
const OID firstRecordId = OID::gen();
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(
- coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << firstRecordId << "a" << 1)),
- nullOpDebug,
- true));
- ASSERT_OK(coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << OID::gen() << "a" << 2)),
- nullOpDebug,
- true));
- ASSERT_OK(coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << OID::gen() << "a" << 3)),
- nullOpDebug,
- true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << firstRecordId << "a" << 1)),
+ nullOpDebug,
+ true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << OID::gen() << "a" << 2)),
+ nullOpDebug,
+ true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << OID::gen() << "a" << 3)),
+ nullOpDebug,
+ true));
rid = coll()->getCursor(&_opCtx)->next()->id;
wunit.commit();
}
@@ -4252,19 +4277,24 @@ public:
const OID firstRecordId = OID::gen();
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(
- coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << firstRecordId << "a" << 1)),
- nullOpDebug,
- true));
- ASSERT_OK(coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << OID::gen() << "a" << 2)),
- nullOpDebug,
- true));
- ASSERT_OK(coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << OID::gen() << "a" << 3)),
- nullOpDebug,
- true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << firstRecordId << "a" << 1)),
+ nullOpDebug,
+ true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << OID::gen() << "a" << 2)),
+ nullOpDebug,
+ true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << OID::gen() << "a" << 3)),
+ nullOpDebug,
+ true));
rid = coll()->getCursor(&_opCtx)->next()->id;
wunit.commit();
}
@@ -4390,19 +4420,24 @@ public:
const OID firstRecordId = OID::gen();
{
WriteUnitOfWork wunit(&_opCtx);
- ASSERT_OK(
- coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << firstRecordId << "a" << 1)),
- nullOpDebug,
- true));
- ASSERT_OK(coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << OID::gen() << "a" << 2)),
- nullOpDebug,
- true));
- ASSERT_OK(coll()->insertDocument(&_opCtx,
- InsertStatement(BSON("_id" << OID::gen() << "a" << 3)),
- nullOpDebug,
- true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << firstRecordId << "a" << 1)),
+ nullOpDebug,
+ true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << OID::gen() << "a" << 2)),
+ nullOpDebug,
+ true));
+ ASSERT_OK(collection_internal::insertDocument(
+ &_opCtx,
+ coll(),
+ InsertStatement(BSON("_id" << OID::gen() << "a" << 3)),
+ nullOpDebug,
+ true));
wunit.commit();
}
releaseDb();
@@ -4556,3 +4591,4 @@ public:
OldStyleSuiteInitializer<ValidateTests> validateTests;
} // namespace ValidateTests
+} // namespace mongo
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index 0e3e603a269..c1d4d03af7e 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/s/catalog/sharding_catalog_client_impl.h"
#include <fmt/format.h>
@@ -79,17 +76,7 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
-
-using repl::OpTime;
-using std::set;
-using std::shared_ptr;
-using std::string;
-using std::unique_ptr;
-using std::vector;
-using str::stream;
-
namespace {
using namespace fmt::literals;
@@ -332,7 +319,7 @@ DatabaseType ShardingCatalogClientImpl::getDatabase(OperationContext* opCtx,
StringData dbName,
repl::ReadConcernLevel readConcernLevel) {
uassert(ErrorCodes::InvalidNamespace,
- stream() << dbName << " is not a valid db name",
+ str::stream() << dbName << " is not a valid db name",
NamespaceString::validDBName(dbName, NamespaceString::DollarInDbNameBehavior::Allow));
// The admin database is always hosted on the config server.
@@ -403,7 +390,8 @@ StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::_fetchData
const auto& docsWithOpTime = findStatus.getValue();
if (docsWithOpTime.value.empty()) {
- return {ErrorCodes::NamespaceNotFound, stream() << "database " << dbName << " not found"};
+ return {ErrorCodes::NamespaceNotFound,
+ str::stream() << "database " << dbName << " not found"};
}
invariant(docsWithOpTime.value.size() == 1);
@@ -430,7 +418,7 @@ CollectionType ShardingCatalogClientImpl::getCollection(OperationContext* opCtx,
1))
.value;
uassert(ErrorCodes::NamespaceNotFound,
- stream() << "collection " << nss.ns() << " not found",
+ str::stream() << "collection " << nss.ns() << " not found",
!collDoc.empty());
return CollectionType(collDoc[0]);
@@ -449,7 +437,7 @@ CollectionType ShardingCatalogClientImpl::getCollection(OperationContext* opCtx,
1))
.value;
uassert(ErrorCodes::NamespaceNotFound,
- stream() << "Collection with UUID '" << uuid << "' not found",
+ str::stream() << "Collection with UUID '" << uuid << "' not found",
!collDoc.empty());
return CollectionType(collDoc[0]);
@@ -576,7 +564,7 @@ StatusWith<std::vector<std::string>> ShardingCatalogClientImpl::getDatabasesForS
std::vector<std::string> dbs;
dbs.reserve(values.size());
for (const BSONObj& obj : values) {
- string dbName;
+ std::string dbName;
Status status = bsonExtractStringField(obj, DatabaseType::kNameFieldName, &dbName);
if (!status.isOK()) {
return status;
@@ -593,7 +581,7 @@ StatusWith<std::vector<ChunkType>> ShardingCatalogClientImpl::getChunks(
const BSONObj& query,
const BSONObj& sort,
boost::optional<int> limit,
- OpTime* opTime,
+ repl::OpTime* opTime,
const OID& epoch,
const Timestamp& timestamp,
repl::ReadConcernLevel readConcern,
@@ -616,8 +604,8 @@ StatusWith<std::vector<ChunkType>> ShardingCatalogClientImpl::getChunks(
for (const BSONObj& obj : chunkDocsOpTimePair.value) {
auto chunkRes = ChunkType::parseFromConfigBSON(obj, epoch, timestamp);
if (!chunkRes.isOK()) {
- return chunkRes.getStatus().withContext(stream() << "Failed to parse chunk with id "
- << obj[ChunkType::name()]);
+ return chunkRes.getStatus().withContext(
+ str::stream() << "Failed to parse chunk with id " << obj[ChunkType::name()]);
}
chunks.push_back(std::move(chunkRes.getValue()));
@@ -681,7 +669,7 @@ std::pair<CollectionType, std::vector<ChunkType>> ShardingCatalogClientImpl::get
}
uassert(ErrorCodes::NamespaceNotFound,
- stream() << "Collection " << nss.ns() << " not found",
+ str::stream() << "Collection " << nss.ns() << " not found",
!aggResult.empty());
@@ -720,7 +708,7 @@ std::pair<CollectionType, std::vector<ChunkType>> ShardingCatalogClientImpl::get
}
uassert(ErrorCodes::ConflictingOperationInProgress,
- stream() << "No chunks were found for the collection " << nss,
+ str::stream() << "No chunks were found for the collection " << nss,
!chunks.empty());
}
@@ -776,13 +764,13 @@ StatusWith<repl::OpTimeWith<std::vector<ShardType>>> ShardingCatalogClientImpl::
for (const BSONObj& doc : findStatus.getValue().value) {
auto shardRes = ShardType::fromBSON(doc);
if (!shardRes.isOK()) {
- return shardRes.getStatus().withContext(stream()
+ return shardRes.getStatus().withContext(str::stream()
<< "Failed to parse shard document " << doc);
}
Status validateStatus = shardRes.getValue().validate();
if (!validateStatus.isOK()) {
- return validateStatus.withContext(stream()
+ return validateStatus.withContext(str::stream()
<< "Failed to validate shard document " << doc);
}
@@ -950,10 +938,12 @@ Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* opCtx,
auto existingDocs = fetchDuplicate.getValue().value;
if (existingDocs.empty()) {
- return {status.withContext(
- stream() << "DuplicateKey error was returned after a retry attempt, but no "
- "documents were found. This means a concurrent change occurred "
- "together with the retries.")};
+ return {
+ status.withContext(
+ str::stream()
+ << "DuplicateKey error was returned after a retry attempt, but no "
+ "documents were found. This means a concurrent change occurred "
+ "together with the retries.")};
}
invariant(existingDocs.size() == 1);
@@ -1059,23 +1049,23 @@ Status ShardingCatalogClientImpl::removeConfigDocuments(OperationContext* opCtx,
return response.toStatus();
}
-StatusWith<repl::OpTimeWith<vector<BSONObj>>> ShardingCatalogClientImpl::_exhaustiveFindOnConfig(
- OperationContext* opCtx,
- const ReadPreferenceSetting& readPref,
- const repl::ReadConcernLevel& readConcern,
- const NamespaceString& nss,
- const BSONObj& query,
- const BSONObj& sort,
- boost::optional<long long> limit,
- const boost::optional<BSONObj>& hint) {
+StatusWith<repl::OpTimeWith<std::vector<BSONObj>>>
+ShardingCatalogClientImpl::_exhaustiveFindOnConfig(OperationContext* opCtx,
+ const ReadPreferenceSetting& readPref,
+ const repl::ReadConcernLevel& readConcern,
+ const NamespaceString& nss,
+ const BSONObj& query,
+ const BSONObj& sort,
+ boost::optional<long long> limit,
+ const boost::optional<BSONObj>& hint) {
auto response = Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
opCtx, readPref, readConcern, nss, query, sort, limit, hint);
if (!response.isOK()) {
return response.getStatus();
}
- return repl::OpTimeWith<vector<BSONObj>>(std::move(response.getValue().docs),
- response.getValue().opTime);
+ return repl::OpTimeWith<std::vector<BSONObj>>(std::move(response.getValue().docs),
+ response.getValue().opTime);
}
StatusWith<std::vector<KeysCollectionDocument>> ShardingCatalogClientImpl::getNewKeys(
diff --git a/src/mongo/s/catalog/sharding_catalog_client_test.cpp b/src/mongo/s/catalog/sharding_catalog_client_test.cpp
index ba77fd693e7..5ff6a5d5fff 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/bson/json.h"
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/commands.h"
@@ -47,7 +45,6 @@
#include "mongo/s/catalog/type_database_gen.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog/type_tags.h"
-#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/database_version.h"
#include "mongo/s/sharding_router_test_fixture.h"
diff --git a/src/mongo/s/chunk.h b/src/mongo/s/chunk.h
index 3f60e997737..b2e96e2fe6e 100644
--- a/src/mongo/s/chunk.h
+++ b/src/mongo/s/chunk.h
@@ -30,8 +30,6 @@
#pragma once
#include "mongo/s/catalog/type_chunk.h"
-#include "mongo/s/chunk_version.h"
-#include "mongo/s/shard_id.h"
namespace mongo {
diff --git a/src/mongo/s/chunk_test.cpp b/src/mongo/s/chunk_test.cpp
index d1c595c05e8..61ffc30f269 100644
--- a/src/mongo/s/chunk_test.cpp
+++ b/src/mongo/s/chunk_test.cpp
@@ -27,11 +27,7 @@
* it in the license file.
*/
-#include "mongo/db/namespace_string.h"
-#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/chunk.h"
-#include "mongo/s/chunk_version.h"
-#include "mongo/s/shard_id.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
diff --git a/src/mongo/s/chunk_version_test.cpp b/src/mongo/s/chunk_version_test.cpp
index 7693ff55c85..e92bec19aa8 100644
--- a/src/mongo/s/chunk_version_test.cpp
+++ b/src/mongo/s/chunk_version_test.cpp
@@ -30,7 +30,6 @@
#include <limits>
#include "mongo/s/chunk_version.h"
-#include "mongo/s/chunk_version_gen.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
diff --git a/src/mongo/s/shard_util.h b/src/mongo/s/shard_util.h
index 997dcc1e4c9..7558906ab27 100644
--- a/src/mongo/s/shard_util.h
+++ b/src/mongo/s/shard_util.h
@@ -34,7 +34,6 @@
#include <vector>
#include "mongo/s/catalog/type_chunk.h"
-#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard.h"
namespace mongo {