summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCharlie Swanson <charlie.swanson@mongodb.com>2017-03-02 13:54:24 -0500
committerCharlie Swanson <charlie.swanson@mongodb.com>2017-03-15 11:03:44 -0400
commitf05b9437fbdc53deecf55ed3c20e36af3d733953 (patch)
tree8b66acc133f83f27ef00f31600538f225ec2746a
parenta1f15dfd788a78da77bda6675132d4144691fce1 (diff)
downloadmongo-f05b9437fbdc53deecf55ed3c20e36af3d733953.tar.gz
SERVER-22541 Refactor RAII locking helpers.
Removes the class 'ScopedTransaction' and moves the responsibility of abandoning the snapshot onto the GlobalLock class. Also renames the AutoGetCollectionForRead class to AutoGetCollectionForReadCommand, and adds a new AutoGetCollectionForRead class. Unlike AutoGetCollectionForReadCommand, this new class will not update the namespace on the CurrentOp object, nor will it add an entry to Top.
-rw-r--r--jstests/core/operation_latency_histogram.js6
-rw-r--r--jstests/noPassthroughWithMongod/top_drop.js4
-rw-r--r--src/mongo/db/auth/auth_index_d.cpp1
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_d.cpp2
-rw-r--r--src/mongo/db/catalog/apply_ops.cpp5
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp2
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp1
-rw-r--r--src/mongo/db/catalog/collection.cpp1
-rw-r--r--src/mongo/db/catalog/create_collection.cpp3
-rw-r--r--src/mongo/db/catalog/cursor_manager.cpp4
-rw-r--r--src/mongo/db/catalog/database.cpp4
-rw-r--r--src/mongo/db/catalog/drop_collection.cpp2
-rw-r--r--src/mongo/db/catalog/drop_database.cpp3
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp1
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.cpp1
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp3
-rw-r--r--src/mongo/db/cloner.cpp10
-rw-r--r--src/mongo/db/commands/clone.cpp4
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp1
-rw-r--r--src/mongo/db/commands/compact.cpp1
-rw-r--r--src/mongo/db/commands/copydb.cpp7
-rw-r--r--src/mongo/db/commands/count_cmd.cpp4
-rw-r--r--src/mongo/db/commands/cpuprofile.cpp6
-rw-r--r--src/mongo/db/commands/create_indexes.cpp4
-rw-r--r--src/mongo/db/commands/dbcommands.cpp17
-rw-r--r--src/mongo/db/commands/dbhash.cpp1
-rw-r--r--src/mongo/db/commands/distinct.cpp4
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp3
-rw-r--r--src/mongo/db/commands/eval.cpp3
-rw-r--r--src/mongo/db/commands/feature_compatibility_version.cpp2
-rw-r--r--src/mongo/db/commands/find_cmd.cpp4
-rw-r--r--src/mongo/db/commands/fsync.cpp8
-rw-r--r--src/mongo/db/commands/geo_near_cmd.cpp2
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp14
-rw-r--r--src/mongo/db/commands/group_cmd.cpp4
-rw-r--r--src/mongo/db/commands/haystack.cpp2
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp6
-rw-r--r--src/mongo/db/commands/killcursors_cmd.cpp4
-rw-r--r--src/mongo/db/commands/list_collections.cpp1
-rw-r--r--src/mongo/db/commands/list_databases.cpp6
-rw-r--r--src/mongo/db/commands/list_indexes.cpp2
-rw-r--r--src/mongo/db/commands/mr.cpp39
-rw-r--r--src/mongo/db/commands/oplog_note.cpp4
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp2
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp6
-rw-r--r--src/mongo/db/commands/repair_cursor.cpp2
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp6
-rw-r--r--src/mongo/db/commands/snapshot_management.cpp7
-rw-r--r--src/mongo/db/commands/test_commands.cpp9
-rw-r--r--src/mongo/db/commands/touch.cpp2
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp2
-rw-r--r--src/mongo/db/concurrency/SConscript1
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp38
-rw-r--r--src/mongo/db/concurrency/d_concurrency.h32
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp617
-rw-r--r--src/mongo/db/db.cpp16
-rw-r--r--src/mongo/db/db_raii.cpp92
-rw-r--r--src/mongo/db/db_raii.h107
-rw-r--r--src/mongo/db/dbhelpers.cpp6
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp1
-rw-r--r--src/mongo/db/ftdc/collector.cpp5
-rw-r--r--src/mongo/db/index_builder.cpp3
-rw-r--r--src/mongo/db/index_builder.h1
-rw-r--r--src/mongo/db/index_rebuilder.cpp7
-rw-r--r--src/mongo/db/introspect.cpp2
-rw-r--r--src/mongo/db/matcher/expression_text.cpp1
-rw-r--r--src/mongo/db/operation_context.h35
-rw-r--r--src/mongo/db/ops/update.cpp3
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp2
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp10
-rw-r--r--src/mongo/db/prefetch.cpp1
-rw-r--r--src/mongo/db/query/find.cpp12
-rw-r--r--src/mongo/db/read_concern.cpp4
-rw-r--r--src/mongo/db/repl/bgsync.cpp3
-rw-r--r--src/mongo/db/repl/database_task.cpp10
-rw-r--r--src/mongo/db/repl/master_slave.cpp41
-rw-r--r--src/mongo/db/repl/noop_writer.cpp4
-rw-r--r--src/mongo/db/repl/oplog.cpp12
-rw-r--r--src/mongo/db/repl/oplog_buffer_collection_test.cpp16
-rw-r--r--src/mongo/db/repl/oplog_interface_local.cpp4
-rw-r--r--src/mongo/db/repl/repl_set_commands.cpp3
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp19
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp5
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp16
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp2
-rw-r--r--src/mongo/db/repl/replication_info.cpp2
-rw-r--r--src/mongo/db/repl/resync.cpp3
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp6
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp20
-rw-r--r--src/mongo/db/repl/rs_rollback_test.cpp22
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp14
-rw-r--r--src/mongo/db/repl/storage_interface_impl_test.cpp28
-rw-r--r--src/mongo/db/repl/sync_tail.cpp17
-rw-r--r--src/mongo/db/repl/sync_tail_test.cpp15
-rw-r--r--src/mongo/db/restapi.cpp3
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp2
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp6
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp11
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp6
-rw-r--r--src/mongo/db/s/sharding_state.cpp1
-rw-r--r--src/mongo/db/service_context_d_test_fixture.cpp3
-rw-r--r--src/mongo/db/stats/storage_stats.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recover.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp1
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp2
-rw-r--r--src/mongo/db/ttl.cpp1
-rw-r--r--src/mongo/db/views/durable_view_catalog.cpp1
-rw-r--r--src/mongo/dbtests/counttests.cpp8
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp3
-rw-r--r--src/mongo/dbtests/indexcatalogtests.cpp12
-rw-r--r--src/mongo/dbtests/matchertests.cpp2
-rw-r--r--src/mongo/dbtests/mmaptests.cpp3
-rw-r--r--src/mongo/dbtests/namespacetests.cpp6
-rw-r--r--src/mongo/dbtests/oplogstarttests.cpp11
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp7
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp2
-rw-r--r--src/mongo/dbtests/query_plan_executor.cpp12
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp11
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp11
-rw-r--r--src/mongo/dbtests/query_stage_distinct.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp7
-rw-r--r--src/mongo/dbtests/query_stage_ixscan.cpp10
-rw-r--r--src/mongo/dbtests/query_stage_multiplan.cpp8
-rw-r--r--src/mongo/dbtests/query_stage_tests.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp4
-rw-r--r--src/mongo/dbtests/querytests.cpp30
-rw-r--r--src/mongo/dbtests/repltests.cpp22
-rw-r--r--src/mongo/dbtests/rollbacktests.cpp34
-rw-r--r--src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp1
130 files changed, 838 insertions, 925 deletions
diff --git a/jstests/core/operation_latency_histogram.js b/jstests/core/operation_latency_histogram.js
index 6e6f7ec1539..0e616b8c313 100644
--- a/jstests/core/operation_latency_histogram.js
+++ b/jstests/core/operation_latency_histogram.js
@@ -85,9 +85,7 @@
for (var i = 0; i < numRecords; i++) {
testColl.aggregate([{$match: {x: i}}, {$group: {_id: "$x"}}]);
}
- // TODO SERVER-24704: Agg is currently counted by Top as two operations, but should be counted
- // as one.
- lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 2 * numRecords, 0, 0);
+ lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, numRecords, 0, 0);
// Count
for (var i = 0; i < numRecords; i++) {
@@ -172,4 +170,4 @@
// Test non-command.
assert.commandFailed(testColl.runCommand("IHopeNobodyEverMakesThisACommand"));
lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 0);
-}()); \ No newline at end of file
+}());
diff --git a/jstests/noPassthroughWithMongod/top_drop.js b/jstests/noPassthroughWithMongod/top_drop.js
index e9076671bae..cc6df88a274 100644
--- a/jstests/noPassthroughWithMongod/top_drop.js
+++ b/jstests/noPassthroughWithMongod/top_drop.js
@@ -36,8 +36,8 @@
tojson(res.totals));
// We allow an unexpected entry in top if the insert counter has been cleared. This is
- // probably due to a background job releasing an AutoGetCollectionForRead for that
- // collection.
+ // probably due to a background job releasing an AutoGetCollectionForReadCommand for
+ // that collection.
entriesInTop.forEach(function(coll) {
if (expectedEntryNames.includes(coll)) {
return;
diff --git a/src/mongo/db/auth/auth_index_d.cpp b/src/mongo/db/auth/auth_index_d.cpp
index 0fe036e03d5..c7ebb0a7054 100644
--- a/src/mongo/db/auth/auth_index_d.cpp
+++ b/src/mongo/db/auth/auth_index_d.cpp
@@ -85,7 +85,6 @@ Status verifySystemIndexes(OperationContext* opCtx) {
const NamespaceString systemUsers = AuthorizationManager::usersCollectionNamespace;
// Make sure the old unique index from v2.4 on system.users doesn't exist.
- ScopedTransaction scopedXact(opCtx, MODE_IX);
AutoGetDb autoDb(opCtx, systemUsers.db(), MODE_X);
if (!autoDb.getDb()) {
return Status::OK();
diff --git a/src/mongo/db/auth/authz_manager_external_state_d.cpp b/src/mongo/db/auth/authz_manager_external_state_d.cpp
index 6d497417ac0..0e259a9d11c 100644
--- a/src/mongo/db/auth/authz_manager_external_state_d.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_d.cpp
@@ -78,7 +78,7 @@ Status AuthzManagerExternalStateMongod::findOne(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query,
BSONObj* result) {
- AutoGetCollectionForRead ctx(opCtx, collectionName);
+ AutoGetCollectionForReadCommand ctx(opCtx, collectionName);
BSONObj found;
if (Helpers::findOne(opCtx, ctx.getCollection(), query, found)) {
diff --git a/src/mongo/db/catalog/apply_ops.cpp b/src/mongo/db/catalog/apply_ops.cpp
index 5e4e00aefb0..ca20b2b848a 100644
--- a/src/mongo/db/catalog/apply_ops.cpp
+++ b/src/mongo/db/catalog/apply_ops.cpp
@@ -156,7 +156,7 @@ Status _applyOps(OperationContext* opCtx,
//
// We do not have a wrapping WriteUnitOfWork so it is possible for a journal
// commit to happen with a subset of ops applied.
- Lock::GlobalWrite globalWriteLockDisallowTempRelease(opCtx->lockState());
+ Lock::GlobalWrite globalWriteLockDisallowTempRelease(opCtx);
// Ensures that yielding will not happen (see the comment above).
DEV {
@@ -309,8 +309,7 @@ Status applyOps(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& applyOpCmd,
BSONObjBuilder* result) {
- ScopedTransaction scopedXact(opCtx, MODE_X);
- Lock::GlobalWrite globalWriteLock(opCtx->lockState());
+ Lock::GlobalWrite globalWriteLock(opCtx);
bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
!repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(opCtx, dbName);
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index d05c1ef2f92..5d4ac36c168 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -53,7 +53,6 @@
namespace mongo {
Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionName) {
- ScopedTransaction scopedXact(opCtx, MODE_IX);
AutoGetDb autoDb(opCtx, collectionName.db(), MODE_X);
bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
@@ -242,7 +241,6 @@ Status convertToCapped(OperationContext* opCtx,
StringData dbname = collectionName.db();
StringData shortSource = collectionName.coll();
- ScopedTransaction transaction(opCtx, MODE_IX);
AutoGetDb autoDb(opCtx, collectionName.db(), MODE_X);
bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index fdbfd26677b..a2df7e2c791 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -220,7 +220,6 @@ Status collMod(OperationContext* opCtx,
const BSONObj& cmdObj,
BSONObjBuilder* result) {
StringData dbName = nss.db();
- ScopedTransaction transaction(opCtx, MODE_IX);
AutoGetDb autoDb(opCtx, dbName, MODE_X);
Database* const db = autoDb.getDb();
Collection* coll = db ? db->getCollection(nss) : nullptr;
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 8ef63ac7ad7..a86944badf3 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -48,6 +48,7 @@
#include "mongo/db/catalog/index_create.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/commands/server_status_metric.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/curop.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/keypattern.h"
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index ec963bdf0a8..f8a39c20350 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -73,8 +73,7 @@ Status createCollection(OperationContext* opCtx,
options.hasField("$nExtents"));
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock dbXLock(opCtx->lockState(), dbName, MODE_X);
+ Lock::DBLock dbXLock(opCtx, dbName, MODE_X);
OldClientContext ctx(opCtx, nss.ns());
if (opCtx->writesAreReplicated() &&
!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nss)) {
diff --git a/src/mongo/db/catalog/cursor_manager.cpp b/src/mongo/db/catalog/cursor_manager.cpp
index 2c9f9b2d9d8..b2079e9e03e 100644
--- a/src/mongo/db/catalog/cursor_manager.cpp
+++ b/src/mongo/db/catalog/cursor_manager.cpp
@@ -226,7 +226,7 @@ bool GlobalCursorIdCache::eraseCursor(OperationContext* opCtx, CursorId id, bool
// If not, then the cursor must be owned by a collection. Erase the cursor under the
// collection lock (to prevent the collection from going away during the erase).
- AutoGetCollectionForRead ctx(opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (!collection) {
if (checkAuth)
@@ -266,7 +266,7 @@ std::size_t GlobalCursorIdCache::timeoutCursors(OperationContext* opCtx, int mil
// For each collection, time out its cursors under the collection lock (to prevent the
// collection from going away during the erase).
for (unsigned i = 0; i < todo.size(); i++) {
- AutoGetCollectionOrViewForRead ctx(opCtx, NamespaceString(todo[i]));
+ AutoGetCollectionOrViewForReadCommand ctx(opCtx, NamespaceString(todo[i]));
if (!ctx.getDb()) {
continue;
}
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index 1f68f3256ee..a5287efea20 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -47,6 +47,7 @@
#include "mongo/db/catalog/database_catalog_entry.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/clientcursor.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/index/index_access_method.h"
@@ -581,8 +582,7 @@ const DatabaseCatalogEntry* Database::getDatabaseCatalogEntry() const {
}
void dropAllDatabasesExceptLocal(OperationContext* opCtx) {
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx);
vector<string> n;
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp
index 3699f1a76ad..70ced6c5e05 100644
--- a/src/mongo/db/catalog/drop_collection.cpp
+++ b/src/mongo/db/catalog/drop_collection.cpp
@@ -59,8 +59,6 @@ Status dropCollection(OperationContext* opCtx,
const std::string dbname = collectionName.db().toString();
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(opCtx, MODE_IX);
-
AutoGetDb autoDb(opCtx, dbname, MODE_X);
Database* const db = autoDb.getDb();
Collection* coll = db ? db->getCollection(collectionName) : nullptr;
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index 4c20d20ffa9..1819f05b72d 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -60,8 +60,7 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx);
AutoGetDb autoDB(opCtx, dbName, MODE_X);
Database* const db = autoDB.getDb();
if (!db) {
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
index 7f3ee19710b..152c2d1243c 100644
--- a/src/mongo/db/catalog/drop_indexes.cpp
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -160,7 +160,6 @@ Status dropIndexes(OperationContext* opCtx,
const BSONObj& cmdObj,
BSONObjBuilder* result) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(opCtx, MODE_IX);
AutoGetDb autoDb(opCtx, nss.db(), MODE_X);
bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
diff --git a/src/mongo/db/catalog/index_catalog_entry.cpp b/src/mongo/db/catalog/index_catalog_entry.cpp
index 9c540e4a102..ae22ff4d633 100644
--- a/src/mongo/db/catalog/index_catalog_entry.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry.cpp
@@ -38,6 +38,7 @@
#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/catalog/head_manager.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/index/index_descriptor.h"
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 86676854c4e..b0c5e6b3aa0 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -68,8 +68,7 @@ Status renameCollection(OperationContext* opCtx,
bool stayTemp) {
DisableDocumentValidation validationDisabler(opCtx);
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite globalWriteLock(opCtx->lockState());
+ Lock::GlobalWrite globalWriteLock(opCtx);
// We stay in source context the whole time. This is mostly to set the CurOp namespace.
OldClientContext ctx(opCtx, source.ns());
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 20455ad7c22..9c3252b83d6 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -144,8 +144,7 @@ struct Cloner::Fun {
invariant(from_collection.coll() != "system.indexes");
// XXX: can probably take dblock instead
- unique_ptr<ScopedTransaction> scopedXact(new ScopedTransaction(opCtx, MODE_X));
- unique_ptr<Lock::GlobalWrite> globalWriteLock(new Lock::GlobalWrite(opCtx->lockState()));
+ unique_ptr<Lock::GlobalWrite> globalWriteLock(new Lock::GlobalWrite(opCtx));
uassert(
ErrorCodes::NotMaster,
str::stream() << "Not primary while cloning collection " << from_collection.ns()
@@ -198,13 +197,11 @@ struct Cloner::Fun {
}
opCtx->checkForInterrupt();
- scopedXact.reset();
globalWriteLock.reset();
CurOp::get(opCtx)->yielded();
- scopedXact.reset(new ScopedTransaction(opCtx, MODE_X));
- globalWriteLock.reset(new Lock::GlobalWrite(opCtx->lockState()));
+ globalWriteLock.reset(new Lock::GlobalWrite(opCtx));
// Check if everything is still all right.
if (opCtx->writesAreReplicated()) {
@@ -479,8 +476,7 @@ bool Cloner::copyCollection(OperationContext* opCtx,
auto sourceIndexes = _conn->getIndexSpecs(nss.ns(), QueryOption_SlaveOk);
auto idIndexSpec = getIdIndexSpec(sourceIndexes);
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock dbWrite(opCtx->lockState(), dbname, MODE_X);
+ Lock::DBLock dbWrite(opCtx, dbname, MODE_X);
uassert(ErrorCodes::PrimarySteppedDown,
str::stream() << "Not primary while copying collection " << ns << " (Cloner)",
diff --git a/src/mongo/db/commands/clone.cpp b/src/mongo/db/commands/clone.cpp
index 8548152087d..e5979d6d41b 100644
--- a/src/mongo/db/commands/clone.cpp
+++ b/src/mongo/db/commands/clone.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/cloner.h"
#include "mongo/db/commands.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/jsobj.h"
#include "mongo/s/grid.h"
@@ -119,8 +120,7 @@ public:
set<string> clonedColls;
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock dbXLock(opCtx->lockState(), dbname, MODE_X);
+ Lock::DBLock dbXLock(opCtx, dbname, MODE_X);
Cloner cloner;
Status status = cloner.copyDb(opCtx, dbname, from, opts, &clonedColls);
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index 222e6d8887f..121d7d0a04a 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -118,7 +118,6 @@ public:
return false;
}
- ScopedTransaction transaction(opCtx, MODE_IX);
AutoGetDb autoDb(opCtx, dbname, MODE_X);
NamespaceString nss(dbname, to);
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index e93b94a5892..c1b9a3d0a6c 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -144,7 +144,6 @@ public:
if (cmdObj.hasElement("validate"))
compactOptions.validateDocuments = cmdObj["validate"].trueValue();
- ScopedTransaction transaction(opCtx, MODE_IX);
AutoGetDb autoDb(opCtx, db, MODE_X);
Database* const collDB = autoDb.getDb();
diff --git a/src/mongo/db/commands/copydb.cpp b/src/mongo/db/commands/copydb.cpp
index b13066949df..b665bcefb12 100644
--- a/src/mongo/db/commands/copydb.cpp
+++ b/src/mongo/db/commands/copydb.cpp
@@ -38,6 +38,7 @@
#include "mongo/db/commands.h"
#include "mongo/db/commands/copydb.h"
#include "mongo/db/commands/copydb_start_commands.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
@@ -226,12 +227,10 @@ public:
if (fromSelf) {
// SERVER-4328 todo lock just the two db's not everything for the fromself case
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx);
uassertStatusOK(cloner.copyDb(opCtx, todb, fromhost, cloneOptions, NULL));
} else {
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock lk(opCtx->lockState(), todb, MODE_X);
+ Lock::DBLock lk(opCtx, todb, MODE_X);
uassertStatusOK(cloner.copyDb(opCtx, todb, fromhost, cloneOptions, NULL));
}
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index b8418b0bff3..e5c7827ae0e 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -122,7 +122,7 @@ public:
}
// Acquire the db read lock.
- AutoGetCollectionOrViewForRead ctx(opCtx, request.getValue().getNs());
+ AutoGetCollectionOrViewForReadCommand ctx(opCtx, request.getValue().getNs());
Collection* collection = ctx.getCollection();
if (ctx.getView()) {
@@ -187,7 +187,7 @@ public:
"http://dochub.mongodb.org/core/3.4-feature-compatibility."));
}
- AutoGetCollectionOrViewForRead ctx(opCtx, request.getValue().getNs());
+ AutoGetCollectionOrViewForReadCommand ctx(opCtx, request.getValue().getNs());
Collection* collection = ctx.getCollection();
if (ctx.getView()) {
diff --git a/src/mongo/db/commands/cpuprofile.cpp b/src/mongo/db/commands/cpuprofile.cpp
index 608a626aa92..00cddd59397 100644
--- a/src/mongo/db/commands/cpuprofile.cpp
+++ b/src/mongo/db/commands/cpuprofile.cpp
@@ -140,8 +140,7 @@ bool CpuProfilerStartCommand::run(OperationContext* opCtx,
std::string& errmsg,
BSONObjBuilder& result) {
// The DB lock here is just so we have IX on the global lock in order to prevent shutdown
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock dbXLock(opCtx->lockState(), db, MODE_X);
+ Lock::DBLock dbXLock(opCtx, db, MODE_X);
OldClientContext ctx(opCtx, db, false /* no shard version checking */);
std::string profileFilename = cmdObj[commandName]["profileFilename"].String();
@@ -159,8 +158,7 @@ bool CpuProfilerStopCommand::run(OperationContext* opCtx,
std::string& errmsg,
BSONObjBuilder& result) {
// The DB lock here is just so we have IX on the global lock in order to prevent shutdown
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock dbXLock(opCtx->lockState(), db, MODE_X);
+ Lock::DBLock dbXLock(opCtx, db, MODE_X);
OldClientContext ctx(opCtx, db, false /* no shard version checking */);
::ProfilerStop();
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 26901b5043e..93833f09b8e 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/catalog/index_key_validate.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/curop.h"
#include "mongo/db/index/index_descriptor.h"
@@ -247,8 +248,7 @@ public:
// now we know we have to create index(es)
// Note: createIndexes command does not currently respect shard versioning.
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock dbLock(opCtx->lockState(), ns.db(), MODE_X);
+ Lock::DBLock dbLock(opCtx, ns.db(), MODE_X);
if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, ns)) {
return appendCommandStatus(
result,
diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp
index 0831a974ecc..10c75252cd2 100644
--- a/src/mongo/db/commands/dbcommands.cpp
+++ b/src/mongo/db/commands/dbcommands.cpp
@@ -293,8 +293,7 @@ public:
}
// Closing a database requires a global lock.
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx);
if (!dbHolder().get(opCtx, dbname)) {
// If the name doesn't make an exact match, check for a case insensitive match.
std::set<std::string> otherCasing = dbHolder().getNamesWithConflictingCasing(dbname);
@@ -399,11 +398,9 @@ public:
const bool readOnly = (profilingLevel < 0 || profilingLevel > 2);
const LockMode dbMode = readOnly ? MODE_S : MODE_X;
- const LockMode transactionMode = readOnly ? MODE_IS : MODE_IX;
Status status = Status::OK();
- ScopedTransaction transaction(opCtx, transactionMode);
AutoGetDb ctx(opCtx, dbname, dbMode);
Database* db = ctx.getDb();
@@ -482,9 +479,7 @@ public:
// This doesn't look like it requires exclusive DB lock, because it uses its own diag
// locking, but originally the lock was set to be WRITE, so preserving the behaviour.
- //
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock dbXLock(opCtx->lockState(), dbname, MODE_X);
+ Lock::DBLock dbXLock(opCtx, dbname, MODE_X);
// TODO (Kal): OldClientContext legacy, needs to be removed
{
@@ -773,7 +768,8 @@ public:
// Check shard version at startup.
// This will throw before we've done any work if shard version is outdated
// We drop and re-acquire these locks every document because md5'ing is expensive
- unique_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(opCtx, nss));
+ unique_ptr<AutoGetCollectionForReadCommand> ctx(
+ new AutoGetCollectionForReadCommand(opCtx, nss));
Collection* coll = ctx->getCollection();
auto statusWithPlanExecutor = getExecutor(opCtx,
@@ -819,7 +815,7 @@ public:
try {
// RELOCKED
- ctx.reset(new AutoGetCollectionForRead(opCtx, nss));
+ ctx.reset(new AutoGetCollectionForReadCommand(opCtx, nss));
} catch (const SendStaleConfigException& ex) {
LOG(1) << "chunk metadata changed during filemd5, will retarget and continue";
break;
@@ -919,7 +915,7 @@ public:
BSONObj keyPattern = jsobj.getObjectField("keyPattern");
bool estimate = jsobj["estimate"].trueValue();
- AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
+ AutoGetCollectionForReadCommand ctx(opCtx, NamespaceString(ns));
Collection* collection = ctx.getCollection();
long long numRecords = 0;
@@ -1168,7 +1164,6 @@ public:
// We lock the entire database in S-mode in order to ensure that the contents will not
// change for the stats snapshot. This might be unnecessary and if it becomes a
// performance issue, we can take IS lock and then lock collection-by-collection.
- ScopedTransaction scopedXact(opCtx, MODE_IS);
AutoGetDb autoDb(opCtx, ns, MODE_S);
result.append("db", ns);
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 0da2752a28d..d1761d1f103 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -109,7 +109,6 @@ public:
// We lock the entire database in S-mode in order to ensure that the contents will not
// change for the snapshot.
- ScopedTransaction scopedXact(opCtx, MODE_IS);
AutoGetDb autoDb(opCtx, ns, MODE_S);
Database* db = autoDb.getDb();
if (db) {
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index cec31d5f9a5..fee7ac17c58 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -134,7 +134,7 @@ public:
"http://dochub.mongodb.org/core/3.4-feature-compatibility.");
}
- AutoGetCollectionOrViewForRead ctx(opCtx, nss);
+ AutoGetCollectionOrViewForReadCommand ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (ctx.getView()) {
@@ -189,7 +189,7 @@ public:
"http://dochub.mongodb.org/core/3.4-feature-compatibility."));
}
- AutoGetCollectionOrViewForRead ctx(opCtx, nss);
+ AutoGetCollectionOrViewForReadCommand ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (ctx.getView()) {
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 86082761cce..0857c0b025b 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -128,8 +128,7 @@ public:
LOG(0) << "CMD: reIndex " << toReIndexNs;
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock dbXLock(opCtx->lockState(), dbname, MODE_X);
+ Lock::DBLock dbXLock(opCtx, dbname, MODE_X);
OldClientContext ctx(opCtx, toReIndexNs.ns());
Collection* collection = ctx.db()->getCollection(toReIndexNs.ns());
diff --git a/src/mongo/db/commands/eval.cpp b/src/mongo/db/commands/eval.cpp
index 20945eb4828..cec1369e4ad 100644
--- a/src/mongo/db/commands/eval.cpp
+++ b/src/mongo/db/commands/eval.cpp
@@ -181,8 +181,7 @@ public:
return dbEval(opCtx, dbname, cmdObj, result, errmsg);
}
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx);
OldClientContext ctx(opCtx, dbname, false /* no shard version checking */);
diff --git a/src/mongo/db/commands/feature_compatibility_version.cpp b/src/mongo/db/commands/feature_compatibility_version.cpp
index d97f5cf0c54..e636c8f9987 100644
--- a/src/mongo/db/commands/feature_compatibility_version.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version.cpp
@@ -211,7 +211,6 @@ void FeatureCompatibilityVersion::set(OperationContext* opCtx, StringData versio
std::vector<BSONObj> indexSpecs{k32IncompatibleIndexSpec};
{
- ScopedTransaction transaction(opCtx, MODE_IX);
AutoGetOrCreateDb autoDB(opCtx, nss.db(), MODE_X);
uassert(ErrorCodes::NotMaster,
@@ -303,7 +302,6 @@ void FeatureCompatibilityVersion::setIfCleanStartup(OperationContext* opCtx,
std::vector<BSONObj> indexSpecs{k32IncompatibleIndexSpec};
{
- ScopedTransaction transaction(opCtx, MODE_IX);
AutoGetOrCreateDb autoDB(opCtx, nss.db(), MODE_X);
IndexBuilder builder(k32IncompatibleIndexSpec, false);
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 5bb043d438a..d43e991950c 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -169,7 +169,7 @@ public:
// Acquire locks. If the namespace is a view, we release our locks and convert the query
// request into an aggregation command.
- AutoGetCollectionOrViewForRead ctx(opCtx, nss);
+ AutoGetCollectionOrViewForReadCommand ctx(opCtx, nss);
if (ctx.getView()) {
// Relinquish locks. The aggregation command will re-acquire them.
ctx.releaseLocksForView();
@@ -297,7 +297,7 @@ public:
// Acquire locks. If the query is on a view, we release our locks and convert the query
// request into an aggregation command.
- AutoGetCollectionOrViewForRead ctx(opCtx, nss);
+ AutoGetCollectionOrViewForReadCommand ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (ctx.getView()) {
// Relinquish locks. The aggregation command will re-acquire them.
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index dfe417b6c06..1cae66a6b5f 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -138,8 +138,7 @@ public:
// the simple fsync command case
if (sync) {
// can this be GlobalRead? and if it can, it should be nongreedy.
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite w(opCtx->lockState());
+ Lock::GlobalWrite w(opCtx);
// TODO SERVER-26822: Replace MMAPv1 specific calls with ones that are storage
// engine agnostic.
getDur().commitNow(opCtx);
@@ -148,7 +147,7 @@ public:
}
// Take a global IS lock to ensure the storage engine is not shutdown
- Lock::GlobalLock global(opCtx->lockState(), MODE_IS, UINT_MAX);
+ Lock::GlobalLock global(opCtx, MODE_IS, UINT_MAX);
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
result.append("numFiles", storageEngine->flushAllFiles(opCtx, sync));
return true;
@@ -345,8 +344,7 @@ void FSyncLockThread::run() {
try {
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
- ScopedTransaction transaction(&opCtx, MODE_X);
- Lock::GlobalWrite global(opCtx.lockState()); // No WriteUnitOfWork needed
+ Lock::GlobalWrite global(&opCtx); // No WriteUnitOfWork needed
try {
// TODO SERVER-26822: Replace MMAPv1 specific calls with ones that are storage engine
diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp
index 3c26005121b..8345c82c9fb 100644
--- a/src/mongo/db/commands/geo_near_cmd.cpp
+++ b/src/mongo/db/commands/geo_near_cmd.cpp
@@ -111,7 +111,7 @@ public:
}
const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
- AutoGetCollectionForRead ctx(opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (!collection) {
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index ed4b43a81a2..78c7d822404 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -185,7 +185,7 @@ public:
// Note that we declare our locks before our ClientCursorPin, in order to ensure that
// the pin's destructor is called before the lock destructors (so that the unpin occurs
// under the lock).
- std::unique_ptr<AutoGetCollectionForRead> ctx;
+ std::unique_ptr<AutoGetCollectionForReadCommand> ctx;
std::unique_ptr<Lock::DBLock> unpinDBLock;
std::unique_ptr<Lock::CollectionLock> unpinCollLock;
@@ -193,8 +193,8 @@ public:
if (request.nss.isListIndexesCursorNS() || request.nss.isListCollectionsCursorNS()) {
cursorManager = CursorManager::getGlobalCursorManager();
} else {
- ctx = stdx::make_unique<AutoGetCollectionOrViewForRead>(opCtx, request.nss);
- auto viewCtx = static_cast<AutoGetCollectionOrViewForRead*>(ctx.get());
+ ctx = stdx::make_unique<AutoGetCollectionOrViewForReadCommand>(opCtx, request.nss);
+ auto viewCtx = static_cast<AutoGetCollectionOrViewForReadCommand*>(ctx.get());
Collection* collection = ctx->getCollection();
if (!collection) {
// Rewrite a getMore on a view to a getMore on the original underlying collection.
@@ -251,7 +251,7 @@ public:
invariant(!unpinCollLock);
sleepFor(Milliseconds(10));
ctx.reset();
- ctx = stdx::make_unique<AutoGetCollectionForRead>(opCtx, request.nss);
+ ctx = stdx::make_unique<AutoGetCollectionForReadCommand>(opCtx, request.nss);
}
if (request.nss.ns() != cursor->ns()) {
@@ -402,7 +402,7 @@ public:
// CappedInsertNotifier.
curOp->setExpectedLatencyMs(durationCount<Milliseconds>(timeout));
- ctx.reset(new AutoGetCollectionForRead(opCtx, request.nss));
+ ctx.reset(new AutoGetCollectionForReadCommand(opCtx, request.nss));
exec->restoreState();
// We woke up because either the timed_wait expired, or there was more data. Either
@@ -463,7 +463,7 @@ public:
// earlier and need to reacquire it in order to clean up our ClientCursorPin.
if (cursor->isAggCursor()) {
invariant(NULL == ctx.get());
- unpinDBLock.reset(new Lock::DBLock(opCtx->lockState(), request.nss.db(), MODE_IS));
+ unpinDBLock.reset(new Lock::DBLock(opCtx, request.nss.db(), MODE_IS));
unpinCollLock.reset(
new Lock::CollectionLock(opCtx->lockState(), request.nss.ns(), MODE_IS));
}
@@ -567,7 +567,7 @@ public:
std::unique_ptr<Lock::CollectionLock> unpinCollLock;
if (cursor->isAggCursor()) {
- unpinDBLock.reset(new Lock::DBLock(opCtx->lockState(), request.nss.db(), MODE_IS));
+ unpinDBLock.reset(new Lock::DBLock(opCtx, request.nss.db(), MODE_IS));
unpinCollLock.reset(
new Lock::CollectionLock(opCtx->lockState(), request.nss.ns(), MODE_IS));
}
diff --git a/src/mongo/db/commands/group_cmd.cpp b/src/mongo/db/commands/group_cmd.cpp
index a9e6eeca6ca..4b41fc9e42c 100644
--- a/src/mongo/db/commands/group_cmd.cpp
+++ b/src/mongo/db/commands/group_cmd.cpp
@@ -134,7 +134,7 @@ private:
groupRequest.explain = true;
- AutoGetCollectionForRead ctx(opCtx, groupRequest.ns);
+ AutoGetCollectionForReadCommand ctx(opCtx, groupRequest.ns);
Collection* coll = ctx.getCollection();
auto statusWithPlanExecutor =
@@ -166,7 +166,7 @@ private:
return appendCommandStatus(result, parseRequestStatus);
}
- AutoGetCollectionForRead ctx(opCtx, groupRequest.ns);
+ AutoGetCollectionForReadCommand ctx(opCtx, groupRequest.ns);
Collection* coll = ctx.getCollection();
auto statusWithPlanExecutor =
diff --git a/src/mongo/db/commands/haystack.cpp b/src/mongo/db/commands/haystack.cpp
index d760ee9b866..f9a41f0ccd2 100644
--- a/src/mongo/db/commands/haystack.cpp
+++ b/src/mongo/db/commands/haystack.cpp
@@ -103,7 +103,7 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss = parseNsCollectionRequired(dbname, cmdObj);
- AutoGetCollectionForRead ctx(opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (!collection) {
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index 68230d587af..d85565ab49c 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -165,7 +165,7 @@ Status ListFilters::runIndexFilterCommand(OperationContext* opCtx,
BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query settings is owned by the collection.
- AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
+ AutoGetCollectionForReadCommand ctx(opCtx, NamespaceString(ns));
QuerySettings* querySettings;
PlanCache* unused;
@@ -233,7 +233,7 @@ Status ClearFilters::runIndexFilterCommand(OperationContext* opCtx,
BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query settings is owned by the collection.
- AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
+ AutoGetCollectionForReadCommand ctx(opCtx, NamespaceString(ns));
QuerySettings* querySettings;
PlanCache* planCache;
@@ -335,7 +335,7 @@ Status SetFilter::runIndexFilterCommand(OperationContext* opCtx,
BSONObjBuilder* bob) {
// This is a read lock. The query settings is owned by the collection.
const NamespaceString nss(ns);
- AutoGetCollectionForRead ctx(opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(opCtx, nss);
QuerySettings* querySettings;
PlanCache* planCache;
diff --git a/src/mongo/db/commands/killcursors_cmd.cpp b/src/mongo/db/commands/killcursors_cmd.cpp
index e51e4d65f8a..c1526d990b5 100644
--- a/src/mongo/db/commands/killcursors_cmd.cpp
+++ b/src/mongo/db/commands/killcursors_cmd.cpp
@@ -48,7 +48,7 @@ private:
Status _killCursor(OperationContext* opCtx,
const NamespaceString& nss,
CursorId cursorId) final {
- std::unique_ptr<AutoGetCollectionOrViewForRead> ctx;
+ std::unique_ptr<AutoGetCollectionOrViewForReadCommand> ctx;
CursorManager* cursorManager;
if (nss.isListIndexesCursorNS() || nss.isListCollectionsCursorNS()) {
@@ -57,7 +57,7 @@ private:
// data within a collection.
cursorManager = CursorManager::getGlobalCursorManager();
} else {
- ctx = stdx::make_unique<AutoGetCollectionOrViewForRead>(opCtx, nss);
+ ctx = stdx::make_unique<AutoGetCollectionOrViewForReadCommand>(opCtx, nss);
Collection* collection = ctx->getCollection();
ViewDefinition* view = ctx->getView();
if (view) {
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index a0f998fe346..da72422b4eb 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -255,7 +255,6 @@ public:
return appendCommandStatus(result, parseCursorStatus);
}
- ScopedTransaction scopedXact(opCtx, MODE_IS);
AutoGetDb autoDb(opCtx, dbname, MODE_S);
Database* db = autoDb.getDb();
diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp
index ccc2f82cc49..ffe00a55362 100644
--- a/src/mongo/db/commands/list_databases.cpp
+++ b/src/mongo/db/commands/list_databases.cpp
@@ -114,8 +114,7 @@ public:
vector<string> dbNames;
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
{
- ScopedTransaction transaction(opCtx, MODE_IS);
- Lock::GlobalLock lk(opCtx->lockState(), MODE_IS, UINT_MAX);
+ Lock::GlobalLock lk(opCtx, MODE_IS, UINT_MAX);
storageEngine->listDatabases(&dbNames);
}
@@ -135,8 +134,7 @@ public:
if (filterNameOnly && !filter->matchesBSON(b.asTempObj()))
continue;
- ScopedTransaction transaction(opCtx, MODE_IS);
- Lock::DBLock dbLock(opCtx->lockState(), dbname, MODE_IS);
+ Lock::DBLock dbLock(opCtx, dbname, MODE_IS);
Database* db = dbHolder().get(opCtx, dbname);
if (!db)
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index 940edb13eda..73e3d19289e 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -132,7 +132,7 @@ public:
return appendCommandStatus(result, parseCursorStatus);
}
- AutoGetCollectionForRead autoColl(opCtx, ns);
+ AutoGetCollectionForReadCommand autoColl(opCtx, ns);
if (!autoColl.getDb()) {
return appendCommandStatus(result,
Status(ErrorCodes::NamespaceNotFound, "no database"));
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 9d36a06731b..ffeecffaca5 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -369,7 +369,6 @@ Config::Config(const string& _dbname, const BSONObj& cmdObj) {
void State::dropTempCollections() {
if (!_config.tempNamespace.isEmpty()) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction scopedXact(_opCtx, MODE_IX);
AutoGetDb autoDb(_opCtx, _config.tempNamespace.db(), MODE_X);
if (auto db = autoDb.getDb()) {
WriteUnitOfWork wunit(_opCtx);
@@ -392,8 +391,7 @@ void State::dropTempCollections() {
repl::UnreplicatedWritesBlock uwb(_opCtx);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction scopedXact(_opCtx, MODE_IX);
- Lock::DBLock lk(_opCtx->lockState(), _config.incLong.db(), MODE_X);
+ Lock::DBLock lk(_opCtx, _config.incLong.db(), MODE_X);
if (Database* db = dbHolder().get(_opCtx, _config.incLong.ns())) {
WriteUnitOfWork wunit(_opCtx);
db->dropCollection(_opCtx, _config.incLong.ns());
@@ -611,9 +609,8 @@ long long State::postProcessCollection(OperationContext* opCtx,
invariant(!opCtx->lockState()->isLocked());
- ScopedTransaction transaction(opCtx, MODE_X);
// This must be global because we may write across different databases.
- Lock::GlobalWrite lock(opCtx->lockState());
+ Lock::GlobalWrite lock(opCtx);
holdingGlobalLock = true;
return postProcessCollectionNonAtomic(opCtx, curOp, pm, holdingGlobalLock);
}
@@ -626,10 +623,10 @@ unsigned long long _collectionCount(OperationContext* opCtx,
const NamespaceString& nss,
bool callerHoldsGlobalLock) {
Collection* coll = nullptr;
- boost::optional<AutoGetCollectionForRead> ctx;
+ boost::optional<AutoGetCollectionForReadCommand> ctx;
- // If the global write lock is held, we must avoid using AutoGetCollectionForRead as it may lead
- // to deadlock when waiting for a majority snapshot to be committed. See SERVER-24596.
+ // If the global write lock is held, we must avoid using AutoGetCollectionForReadCommand as it
+ // may lead to deadlock when waiting for a majority snapshot to be committed. See SERVER-24596.
if (callerHoldsGlobalLock) {
Database* db = dbHolder().get(opCtx, nss.ns());
if (db) {
@@ -654,9 +651,8 @@ long long State::postProcessCollectionNonAtomic(OperationContext* opCtx,
if (_config.outputOptions.outType == Config::REPLACE ||
_collectionCount(opCtx, _config.outputOptions.finalNamespace, callerHoldsGlobalLock) == 0) {
- ScopedTransaction transaction(opCtx, MODE_X);
// This must be global because we may write across different databases.
- Lock::GlobalWrite lock(opCtx->lockState());
+ Lock::GlobalWrite lock(opCtx);
// replace: just rename from temp to final collection name, dropping previous collection
_db.dropCollection(_config.outputOptions.finalNamespace.ns());
BSONObj info;
@@ -682,9 +678,7 @@ long long State::postProcessCollectionNonAtomic(OperationContext* opCtx,
}
unique_ptr<DBClientCursor> cursor = _db.query(_config.tempNamespace.ns(), BSONObj());
while (cursor->more()) {
- ScopedTransaction scopedXact(opCtx, MODE_X);
- Lock::DBLock lock(
- opCtx->lockState(), _config.outputOptions.finalNamespace.db(), MODE_X);
+ Lock::DBLock lock(opCtx, _config.outputOptions.finalNamespace.db(), MODE_X);
BSONObj o = cursor->nextSafe();
Helpers::upsert(opCtx, _config.outputOptions.finalNamespace.ns(), o);
pm.hit();
@@ -704,9 +698,8 @@ long long State::postProcessCollectionNonAtomic(OperationContext* opCtx,
}
unique_ptr<DBClientCursor> cursor = _db.query(_config.tempNamespace.ns(), BSONObj());
while (cursor->more()) {
- ScopedTransaction transaction(opCtx, MODE_X);
// This must be global because we may write across different databases.
- Lock::GlobalWrite lock(opCtx->lockState());
+ Lock::GlobalWrite lock(opCtx);
BSONObj temp = cursor->nextSafe();
BSONObj old;
@@ -1088,7 +1081,8 @@ void State::finalReduce(OperationContext* opCtx, CurOp* curOp, ProgressMeterHold
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_opCtx, "finalReduce", _config.incLong.ns());
- unique_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(_opCtx, _config.incLong));
+ unique_ptr<AutoGetCollectionForReadCommand> ctx(
+ new AutoGetCollectionForReadCommand(_opCtx, _config.incLong));
BSONObj prev;
BSONList all;
@@ -1143,7 +1137,7 @@ void State::finalReduce(OperationContext* opCtx, CurOp* curOp, ProgressMeterHold
// reduce a finalize array
finalReduce(all);
- ctx.reset(new AutoGetCollectionForRead(_opCtx, _config.incLong));
+ ctx.reset(new AutoGetCollectionForReadCommand(_opCtx, _config.incLong));
all.clear();
prev = o;
@@ -1163,7 +1157,7 @@ void State::finalReduce(OperationContext* opCtx, CurOp* curOp, ProgressMeterHold
ctx.reset();
// reduce and finalize last array
finalReduce(all);
- ctx.reset(new AutoGetCollectionForRead(_opCtx, _config.incLong));
+ ctx.reset(new AutoGetCollectionForReadCommand(_opCtx, _config.incLong));
pm.finished();
}
@@ -1405,7 +1399,7 @@ public:
unique_ptr<RangePreserver> rangePreserver;
ScopedCollectionMetadata collMetadata;
{
- AutoGetCollectionForRead ctx(opCtx, config.nss);
+ AutoGetCollectionForReadCommand ctx(opCtx, config.nss);
Collection* collection = ctx.getCollection();
if (collection) {
@@ -1424,8 +1418,8 @@ public:
// be done under the lock.
ON_BLOCK_EXIT([opCtx, &config, &rangePreserver] {
if (rangePreserver) {
- // Be sure not to use AutoGetCollectionForRead here, since that has side-effects
- // other than lock acquisition.
+ // Be sure not to use AutoGetCollectionForReadCommand here, since that has
+ // side-effects other than lock acquisition.
AutoGetCollection ctx(opCtx, config.nss, MODE_IS);
rangePreserver.reset();
}
@@ -1489,7 +1483,6 @@ public:
// useful cursor.
// Need lock and context to use it
- unique_ptr<ScopedTransaction> scopedXact(new ScopedTransaction(opCtx, MODE_IS));
unique_ptr<AutoGetDb> scopedAutoDb(new AutoGetDb(opCtx, config.nss.db(), MODE_S));
auto qr = stdx::make_unique<QueryRequest>(config.nss);
@@ -1565,11 +1558,9 @@ public:
exec->saveState();
scopedAutoDb.reset();
- scopedXact.reset();
state.reduceAndSpillInMemoryStateIfNeeded();
- scopedXact.reset(new ScopedTransaction(opCtx, MODE_IS));
scopedAutoDb.reset(new AutoGetDb(opCtx, config.nss.db(), MODE_S));
if (!exec->restoreState()) {
diff --git a/src/mongo/db/commands/oplog_note.cpp b/src/mongo/db/commands/oplog_note.cpp
index 39d3d175ff0..0c59dca849a 100644
--- a/src/mongo/db/commands/oplog_note.cpp
+++ b/src/mongo/db/commands/oplog_note.cpp
@@ -33,6 +33,7 @@
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/commands.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/operation_context.h"
@@ -87,8 +88,7 @@ public:
return appendCommandStatus(result, status);
}
- ScopedTransaction scopedXact(opCtx, MODE_X);
- Lock::GlobalWrite globalWrite(opCtx->lockState());
+ Lock::GlobalWrite globalWrite(opCtx);
WriteUnitOfWork wuow(opCtx);
getGlobalServiceContext()->getOpObserver()->onOpMessage(opCtx, dataElement.Obj());
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index df783b46062..8ff2e100a58 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -93,7 +93,7 @@ public:
BSONObjBuilder& result) {
const NamespaceString ns(parseNsCollectionRequired(dbname, cmdObj));
- AutoGetCollectionForRead ctx(opCtx, ns);
+ AutoGetCollectionForReadCommand ctx(opCtx, ns);
Collection* collection = ctx.getCollection();
if (!collection)
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index ff9bd872cf3..bb4eebcb998 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -227,7 +227,7 @@ Status PlanCacheListQueryShapes::runPlanCacheCommand(OperationContext* opCtx,
BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query cache is owned by the collection.
- AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
+ AutoGetCollectionForReadCommand ctx(opCtx, NamespaceString(ns));
PlanCache* planCache;
Status status = getPlanCache(opCtx, ctx.getCollection(), ns, &planCache);
@@ -279,7 +279,7 @@ Status PlanCacheClear::runPlanCacheCommand(OperationContext* opCtx,
BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query cache is owned by the collection.
- AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
+ AutoGetCollectionForReadCommand ctx(opCtx, NamespaceString(ns));
PlanCache* planCache;
Status status = getPlanCache(opCtx, ctx.getCollection(), ns, &planCache);
@@ -355,7 +355,7 @@ Status PlanCacheListPlans::runPlanCacheCommand(OperationContext* opCtx,
const std::string& ns,
BSONObj& cmdObj,
BSONObjBuilder* bob) {
- AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
+ AutoGetCollectionForReadCommand ctx(opCtx, NamespaceString(ns));
PlanCache* planCache;
Status status = getPlanCache(opCtx, ctx.getCollection(), ns, &planCache);
diff --git a/src/mongo/db/commands/repair_cursor.cpp b/src/mongo/db/commands/repair_cursor.cpp
index b5d7c2fde6f..ac6155cc394 100644
--- a/src/mongo/db/commands/repair_cursor.cpp
+++ b/src/mongo/db/commands/repair_cursor.cpp
@@ -75,7 +75,7 @@ public:
BSONObjBuilder& result) {
NamespaceString ns(parseNs(dbname, cmdObj));
- AutoGetCollectionForRead ctx(opCtx, ns);
+ AutoGetCollectionForReadCommand ctx(opCtx, ns);
Collection* collection = ctx.getCollection();
if (!collection) {
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index 684b954b0c5..35164560c16 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -309,7 +309,7 @@ Status runAggregate(OperationContext* opCtx,
// same sharding version that we synchronize on here. This is also why we always need to
// create a ClientCursor even when we aren't outputting to a cursor. See the comment on
// ShardFilterStage for more details.
- AutoGetCollectionOrViewForRead ctx(opCtx, nss);
+ AutoGetCollectionOrViewForReadCommand ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
// If this is a view, resolve it by finding the underlying collection and stitching view
@@ -487,7 +487,7 @@ Status runAggregate(OperationContext* opCtx,
// AutoGetCollectionForRead. AutoGetCollectionForRead will throw if the
// sharding version is out of date, and we don't care if the sharding version
// has changed.
- Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_IS);
+ Lock::DBLock dbLock(opCtx, nss.db(), MODE_IS);
Lock::CollectionLock collLock(opCtx->lockState(), nss.ns(), MODE_IS);
if (keepCursor) {
pin->release();
@@ -498,7 +498,7 @@ Status runAggregate(OperationContext* opCtx,
} catch (...) {
// On our way out of scope, we clean up our ClientCursorPin if needed.
if (pin) {
- Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_IS);
+ Lock::DBLock dbLock(opCtx, nss.db(), MODE_IS);
Lock::CollectionLock collLock(opCtx->lockState(), nss.ns(), MODE_IS);
pin->deleteUnderlying();
}
diff --git a/src/mongo/db/commands/snapshot_management.cpp b/src/mongo/db/commands/snapshot_management.cpp
index 8ab963eb71e..1eb6e2c2126 100644
--- a/src/mongo/db/commands/snapshot_management.cpp
+++ b/src/mongo/db/commands/snapshot_management.cpp
@@ -32,6 +32,7 @@
#include "mongo/base/init.h"
#include "mongo/db/commands.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/service_context.h"
@@ -75,8 +76,7 @@ public:
return appendCommandStatus(result, {ErrorCodes::CommandNotSupported, ""});
}
- ScopedTransaction st(opCtx, MODE_IX);
- Lock::GlobalLock lk(opCtx->lockState(), MODE_IX, UINT_MAX);
+ Lock::GlobalLock lk(opCtx, MODE_IX, UINT_MAX);
auto status = snapshotManager->prepareForCreateSnapshot(opCtx);
if (status.isOK()) {
@@ -126,8 +126,7 @@ public:
return appendCommandStatus(result, {ErrorCodes::CommandNotSupported, ""});
}
- ScopedTransaction st(opCtx, MODE_IX);
- Lock::GlobalLock lk(opCtx->lockState(), MODE_IX, UINT_MAX);
+ Lock::GlobalLock lk(opCtx, MODE_IX, UINT_MAX);
auto name = SnapshotName(cmdObj.firstElement().Long());
snapshotManager->setCommittedSnapshot(name);
return true;
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 34bc757d554..685e8e7a013 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -85,8 +85,7 @@ public:
log() << "test only command godinsert invoked coll:" << nss.coll();
BSONObj obj = cmdObj["obj"].embeddedObjectUserCheck();
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock lk(opCtx->lockState(), dbname, MODE_X);
+ Lock::DBLock lk(opCtx, dbname, MODE_X);
OldClientContext ctx(opCtx, nss.ns());
Database* db = ctx.db();
@@ -141,14 +140,12 @@ public:
std::vector<Privilege>* out) {}
void _sleepInReadLock(mongo::OperationContext* opCtx, long long millis) {
- ScopedTransaction transaction(opCtx, MODE_S);
- Lock::GlobalRead lk(opCtx->lockState());
+ Lock::GlobalRead lk(opCtx);
sleepmillis(millis);
}
void _sleepInWriteLock(mongo::OperationContext* opCtx, long long millis) {
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx);
sleepmillis(millis);
}
diff --git a/src/mongo/db/commands/touch.cpp b/src/mongo/db/commands/touch.cpp
index 1f28da9e3fc..7cae0f979ee 100644
--- a/src/mongo/db/commands/touch.cpp
+++ b/src/mongo/db/commands/touch.cpp
@@ -102,7 +102,7 @@ public:
return false;
}
- AutoGetCollectionForRead context(opCtx, nss);
+ AutoGetCollectionForReadCommand context(opCtx, nss);
Collection* collection = context.getCollection();
if (!collection) {
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index 3c30ddc9e66..b6896022583 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -328,7 +328,6 @@ public:
// Explains of write commands are read-only, but we take write locks so that timing
// info is more accurate.
- ScopedTransaction scopedXact(opCtx, MODE_IX);
AutoGetCollection collection(opCtx, batch.ns, MODE_IX);
auto exec = uassertStatusOK(getExecutorUpdate(
@@ -396,7 +395,6 @@ public:
// Explains of write commands are read-only, but we take write locks so that timing
// info is more accurate.
- ScopedTransaction scopedXact(opCtx, MODE_IX);
AutoGetCollection collection(opCtx, batch.ns, MODE_IX);
// Explain the plan tree.
diff --git a/src/mongo/db/concurrency/SConscript b/src/mongo/db/concurrency/SConscript
index 16a254db7aa..6a41d6e8564 100644
--- a/src/mongo/db/concurrency/SConscript
+++ b/src/mongo/db/concurrency/SConscript
@@ -45,6 +45,7 @@ env.CppUnitTest(
],
LIBDEPS=[
'$BUILD_DIR/mongo/util/progress_meter',
+ '$BUILD_DIR/mongo/db/service_context_noop_init',
'lock_manager',
]
)
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index e12f38a13fe..5360ca958f2 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -135,51 +135,53 @@ bool Lock::ResourceMutex::isAtLeastReadLocked(Locker* locker) {
return locker->isLockHeldForMode(_rid, MODE_IS);
}
-Lock::GlobalLock::GlobalLock(Locker* locker, LockMode lockMode, unsigned timeoutMs)
- : GlobalLock(locker, lockMode, EnqueueOnly()) {
+Lock::GlobalLock::GlobalLock(OperationContext* opCtx, LockMode lockMode, unsigned timeoutMs)
+ : GlobalLock(opCtx, lockMode, EnqueueOnly()) {
waitForLock(timeoutMs);
}
-Lock::GlobalLock::GlobalLock(Locker* locker, LockMode lockMode, EnqueueOnly enqueueOnly)
- : _locker(locker), _result(LOCK_INVALID), _pbwm(locker, resourceIdParallelBatchWriterMode) {
+Lock::GlobalLock::GlobalLock(OperationContext* opCtx, LockMode lockMode, EnqueueOnly enqueueOnly)
+ : _opCtx(opCtx),
+ _result(LOCK_INVALID),
+ _pbwm(opCtx->lockState(), resourceIdParallelBatchWriterMode) {
_enqueue(lockMode);
}
void Lock::GlobalLock::_enqueue(LockMode lockMode) {
- if (_locker->shouldConflictWithSecondaryBatchApplication()) {
+ if (_opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()) {
_pbwm.lock(MODE_IS);
}
- _result = _locker->lockGlobalBegin(lockMode);
+ _result = _opCtx->lockState()->lockGlobalBegin(lockMode);
}
void Lock::GlobalLock::waitForLock(unsigned timeoutMs) {
if (_result == LOCK_WAITING) {
- _result = _locker->lockGlobalComplete(timeoutMs);
+ _result = _opCtx->lockState()->lockGlobalComplete(timeoutMs);
}
- if (_result != LOCK_OK && _locker->shouldConflictWithSecondaryBatchApplication()) {
+ if (_result != LOCK_OK && _opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()) {
_pbwm.unlock();
}
}
void Lock::GlobalLock::_unlock() {
if (isLocked()) {
- _locker->unlockGlobal();
+ _opCtx->lockState()->unlockGlobal();
_result = LOCK_INVALID;
}
}
-Lock::DBLock::DBLock(Locker* locker, StringData db, LockMode mode)
+Lock::DBLock::DBLock(OperationContext* opCtx, StringData db, LockMode mode)
: _id(RESOURCE_DATABASE, db),
- _locker(locker),
+ _opCtx(opCtx),
_mode(mode),
- _globalLock(locker, isSharedLockMode(_mode) ? MODE_IS : MODE_IX, UINT_MAX) {
+ _globalLock(opCtx, isSharedLockMode(_mode) ? MODE_IS : MODE_IX, UINT_MAX) {
massert(28539, "need a valid database name", !db.empty() && nsIsDbOnly(db));
// Need to acquire the flush lock
- _locker->lockMMAPV1Flush();
+ _opCtx->lockState()->lockMMAPV1Flush();
// The check for the admin db is to ensure direct writes to auth collections
// are serialized (see SERVER-16092).
@@ -187,24 +189,24 @@ Lock::DBLock::DBLock(Locker* locker, StringData db, LockMode mode)
_mode = MODE_X;
}
- invariant(LOCK_OK == _locker->lock(_id, _mode));
+ invariant(LOCK_OK == _opCtx->lockState()->lock(_id, _mode));
}
Lock::DBLock::~DBLock() {
- _locker->unlock(_id);
+ _opCtx->lockState()->unlock(_id);
}
void Lock::DBLock::relockWithMode(LockMode newMode) {
// 2PL would delay the unlocking
- invariant(!_locker->inAWriteUnitOfWork());
+ invariant(!_opCtx->lockState()->inAWriteUnitOfWork());
// Not allowed to change global intent
invariant(!isSharedLockMode(_mode) || isSharedLockMode(newMode));
- _locker->unlock(_id);
+ _opCtx->lockState()->unlock(_id);
_mode = newMode;
- invariant(LOCK_OK == _locker->lock(_id, _mode));
+ invariant(LOCK_OK == _opCtx->lockState()->lock(_id, _mode));
}
diff --git a/src/mongo/db/concurrency/d_concurrency.h b/src/mongo/db/concurrency/d_concurrency.h
index 0d93b3ef2d0..5eb5de3a5e2 100644
--- a/src/mongo/db/concurrency/d_concurrency.h
+++ b/src/mongo/db/concurrency/d_concurrency.h
@@ -31,6 +31,7 @@
#include <climits> // For UINT_MAX
#include "mongo/db/concurrency/locker.h"
+#include "mongo/db/operation_context.h"
#include "mongo/util/timer.h"
namespace mongo {
@@ -161,23 +162,30 @@ public:
* Global lock.
*
* Grabs global resource lock. Allows further (recursive) acquisition of the global lock
- * in any mode, see LockMode.
+ * in any mode, see LockMode. An outermost GlobalLock calls abandonSnapshot() on destruction, so
+ * that the storage engine can release resources, such as snapshots or locks, that it may have
+ * acquired during the transaction. Note that any writes are committed in nested WriteUnitOfWork
+ * scopes, so write conflicts cannot happen when releasing the GlobalLock.
+ *
* NOTE: Does not acquire flush lock.
*/
class GlobalLock {
public:
class EnqueueOnly {};
- GlobalLock(Locker* locker, LockMode lockMode, unsigned timeoutMs);
+ GlobalLock(OperationContext* opCtx, LockMode lockMode, unsigned timeoutMs);
/**
* Enqueues lock but does not block on lock acquisition.
* Call waitForLock() to complete locking process.
*/
- GlobalLock(Locker* locker, LockMode lockMode, EnqueueOnly enqueueOnly);
+ GlobalLock(OperationContext* opCtx, LockMode lockMode, EnqueueOnly enqueueOnly);
~GlobalLock() {
_unlock();
+ if (!_opCtx->lockState()->isLocked()) {
+ _opCtx->recoveryUnit()->abandonSnapshot();
+ }
}
/**
@@ -193,7 +201,7 @@ public:
void _enqueue(LockMode lockMode);
void _unlock();
- Locker* const _locker;
+ OperationContext* const _opCtx;
LockResult _result;
ResourceLock _pbwm;
};
@@ -208,10 +216,10 @@ public:
*/
class GlobalWrite : public GlobalLock {
public:
- explicit GlobalWrite(Locker* locker, unsigned timeoutMs = UINT_MAX)
- : GlobalLock(locker, MODE_X, timeoutMs) {
+ explicit GlobalWrite(OperationContext* opCtx, unsigned timeoutMs = UINT_MAX)
+ : GlobalLock(opCtx, MODE_X, timeoutMs) {
if (isLocked()) {
- locker->lockMMAPV1Flush();
+ opCtx->lockState()->lockMMAPV1Flush();
}
}
};
@@ -226,10 +234,10 @@ public:
*/
class GlobalRead : public GlobalLock {
public:
- explicit GlobalRead(Locker* locker, unsigned timeoutMs = UINT_MAX)
- : GlobalLock(locker, MODE_S, timeoutMs) {
+ explicit GlobalRead(OperationContext* opCtx, unsigned timeoutMs = UINT_MAX)
+ : GlobalLock(opCtx, MODE_S, timeoutMs) {
if (isLocked()) {
- locker->lockMMAPV1Flush();
+ opCtx->lockState()->lockMMAPV1Flush();
}
}
};
@@ -251,7 +259,7 @@ public:
*/
class DBLock {
public:
- DBLock(Locker* locker, StringData db, LockMode mode);
+ DBLock(OperationContext* opCtx, StringData db, LockMode mode);
~DBLock();
/**
@@ -264,7 +272,7 @@ public:
private:
const ResourceId _id;
- Locker* const _locker;
+ OperationContext* const _opCtx;
// May be changed through relockWithMode. The global lock mode won't change though,
// because we never change from IS/S to IX/X or vice versa, just convert locks from
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 1058d8a0163..2c497e4fa84 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/lock_manager_test_help.h"
#include "mongo/stdx/functional.h"
+#include "mongo/stdx/memory.h"
#include "mongo/stdx/thread.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/debug_util.h"
@@ -69,53 +70,94 @@ private:
bool _oldSupportsDocLocking;
};
-/**
- * Calls fn the given number of iterations, spread out over up to maxThreads threads.
- * The threadNr passed is an integer between 0 and maxThreads exclusive. Logs timing
- * statistics for for all power-of-two thread counts from 1 up to maxThreds.
- */
-void perfTest(stdx::function<void(int threadNr)> fn, int maxThreads) {
- for (int numThreads = 1; numThreads <= maxThreads; numThreads *= 2) {
- std::vector<stdx::thread> threads;
-
- AtomicInt32 ready{0};
- AtomicInt64 elapsedNanos{0};
- AtomicInt64 timedIters{0};
-
- for (int threadId = 0; threadId < numThreads; threadId++)
- threads.emplace_back([&, threadId]() {
- // Busy-wait until everybody is ready
- ready.fetchAndAdd(1);
- while (ready.load() < numThreads) {
- }
- uint64_t micros = 0;
- int iters;
- // Ensure at least 16 iterations are done and at least 25 milliseconds is timed
- for (iters = 16; iters < (1 << 30) && micros < kMinPerfMillis * 1000; iters *= 2) {
- // Measure the number of loops
- Timer t;
+class DConcurrencyTestFixture : public unittest::Test {
+public:
+ DConcurrencyTestFixture() : _client(getGlobalServiceContext()->makeClient("testClient")) {}
+
+ /**
+ * Constructs and returns a new OperationContext.
+ */
+ ServiceContext::UniqueOperationContext makeOpCtx() const {
+ auto opCtx = _client->makeOperationContext();
+ opCtx->releaseLockState();
+ return opCtx;
+ }
+
+ /**
+ * Returns a vector of Clients of length 'k', each of which has an OperationContext with its
+ * lockState set to a DefaultLockerImpl.
+ */
+ template <typename LockerType>
+ std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
+ makeKClientsWithLockers(int k) {
+ std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
+ clients;
+ clients.reserve(k);
+ for (int i = 0; i < k; ++i) {
+ auto client = getGlobalServiceContext()->makeClient(
+ str::stream() << "test client for thread " << i);
+ auto opCtx = client->makeOperationContext();
+ opCtx->releaseLockState();
+ opCtx->setLockState(stdx::make_unique<LockerType>());
+ clients.emplace_back(std::move(client), std::move(opCtx));
+ }
+ return clients;
+ }
+
+ /**
+ * Calls fn the given number of iterations, spread out over up to maxThreads threads.
+ * The threadNr passed is an integer between 0 and maxThreads exclusive. Logs timing
+ * statistics for for all power-of-two thread counts from 1 up to maxThreds.
+ */
+ void perfTest(stdx::function<void(int threadNr)> fn, int maxThreads) {
+ for (int numThreads = 1; numThreads <= maxThreads; numThreads *= 2) {
+ std::vector<stdx::thread> threads;
+
+ AtomicInt32 ready{0};
+ AtomicInt64 elapsedNanos{0};
+ AtomicInt64 timedIters{0};
+
+ for (int threadId = 0; threadId < numThreads; threadId++)
+ threads.emplace_back([&, threadId]() {
+ // Busy-wait until everybody is ready
+ ready.fetchAndAdd(1);
+ while (ready.load() < numThreads) {
+ }
- for (int i = 0; i < iters; i++)
- fn(threadId);
+ uint64_t micros = 0;
+ int iters;
+ // Ensure at least 16 iterations are done and at least 25 milliseconds is timed
+ for (iters = 16; iters < (1 << 30) && micros < kMinPerfMillis * 1000;
+ iters *= 2) {
+ // Measure the number of loops
+ Timer t;
- micros = t.micros();
- }
+ for (int i = 0; i < iters; i++)
+ fn(threadId);
- elapsedNanos.fetchAndAdd(micros * 1000);
- timedIters.fetchAndAdd(iters);
- });
+ micros = t.micros();
+ }
+
+ elapsedNanos.fetchAndAdd(micros * 1000);
+ timedIters.fetchAndAdd(iters);
+ });
- for (auto& thread : threads)
- thread.join();
+ for (auto& thread : threads)
+ thread.join();
- log() << numThreads
- << " threads took: " << elapsedNanos.load() / static_cast<double>(timedIters.load())
- << " ns per call" << (kDebugBuild ? " (DEBUG BUILD!)" : "");
+ log() << numThreads << " threads took: "
+ << elapsedNanos.load() / static_cast<double>(timedIters.load()) << " ns per call"
+ << (kDebugBuild ? " (DEBUG BUILD!)" : "");
+ }
}
-}
-TEST(DConcurrency, ResourceMutex) {
+private:
+ ServiceContext::UniqueClient _client;
+};
+
+
+TEST_F(DConcurrencyTestFixture, ResourceMutex) {
Lock::ResourceMutex mtx("testMutex");
DefaultLockerImpl locker1;
DefaultLockerImpl locker2;
@@ -197,284 +239,292 @@ TEST(DConcurrency, ResourceMutex) {
t3.join();
}
-TEST(DConcurrency, GlobalRead) {
- MMAPV1LockerImpl ls;
- Lock::GlobalRead globalRead(&ls);
- ASSERT(ls.isR());
+TEST_F(DConcurrencyTestFixture, GlobalRead) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ Lock::GlobalRead globalRead(opCtx.get());
+ ASSERT(opCtx->lockState()->isR());
}
-TEST(DConcurrency, GlobalWrite) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
- ASSERT(ls.isW());
+TEST_F(DConcurrencyTestFixture, GlobalWrite) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ Lock::GlobalWrite globalWrite(opCtx.get());
+ ASSERT(opCtx->lockState()->isW());
}
-TEST(DConcurrency, GlobalWriteAndGlobalRead) {
- MMAPV1LockerImpl ls;
+TEST_F(DConcurrencyTestFixture, GlobalWriteAndGlobalRead) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ auto lockState = opCtx->lockState();
- Lock::GlobalWrite globalWrite(&ls);
- ASSERT(ls.isW());
+ Lock::GlobalWrite globalWrite(opCtx.get());
+ ASSERT(lockState->isW());
{
- Lock::GlobalRead globalRead(&ls);
- ASSERT(ls.isW());
+ Lock::GlobalRead globalRead(opCtx.get());
+ ASSERT(lockState->isW());
}
- ASSERT(ls.isW());
+ ASSERT(lockState->isW());
}
-TEST(DConcurrency, GlobalLockS_Timeout) {
- MMAPV1LockerImpl ls;
- Lock::GlobalLock globalWrite(&ls, MODE_X, 0);
+TEST_F(DConcurrencyTestFixture, GlobalLockS_Timeout) {
+ auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
+
+ Lock::GlobalLock globalWrite(clients[0].second.get(), MODE_X, 0);
ASSERT(globalWrite.isLocked());
- {
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalReadTry(&lsTry, MODE_S, 1);
- ASSERT(!globalReadTry.isLocked());
- }
+ Lock::GlobalLock globalReadTry(clients[1].second.get(), MODE_S, 1);
+ ASSERT(!globalReadTry.isLocked());
}
-TEST(DConcurrency, GlobalLockX_Timeout) {
- MMAPV1LockerImpl ls;
- Lock::GlobalLock globalWrite(&ls, MODE_X, 0);
+TEST_F(DConcurrencyTestFixture, GlobalLockX_Timeout) {
+ auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
+ Lock::GlobalLock globalWrite(clients[0].second.get(), MODE_X, 0);
ASSERT(globalWrite.isLocked());
- {
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalWriteTry(&lsTry, MODE_X, 1);
- ASSERT(!globalWriteTry.isLocked());
- }
+ Lock::GlobalLock globalWriteTry(clients[1].second.get(), MODE_X, 1);
+ ASSERT(!globalWriteTry.isLocked());
}
-TEST(DConcurrency, GlobalLockS_NoTimeoutDueToGlobalLockS) {
- MMAPV1LockerImpl ls;
- Lock::GlobalRead globalRead(&ls);
+TEST_F(DConcurrencyTestFixture, GlobalLockS_NoTimeoutDueToGlobalLockS) {
+ auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalReadTry(&lsTry, MODE_S, 1);
+ Lock::GlobalRead globalRead(clients[0].second.get());
+ Lock::GlobalLock globalReadTry(clients[1].second.get(), MODE_S, 1);
ASSERT(globalReadTry.isLocked());
}
-TEST(DConcurrency, GlobalLockX_TimeoutDueToGlobalLockS) {
- MMAPV1LockerImpl ls;
- Lock::GlobalRead globalRead(&ls);
+TEST_F(DConcurrencyTestFixture, GlobalLockX_TimeoutDueToGlobalLockS) {
+ auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalWriteTry(&lsTry, MODE_X, 1);
+ Lock::GlobalRead globalRead(clients[0].second.get());
+ Lock::GlobalLock globalWriteTry(clients[1].second.get(), MODE_X, 1);
ASSERT(!globalWriteTry.isLocked());
}
-TEST(DConcurrency, GlobalLockS_TimeoutDueToGlobalLockX) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
+TEST_F(DConcurrencyTestFixture, GlobalLockS_TimeoutDueToGlobalLockX) {
+ auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalReadTry(&lsTry, MODE_S, 1);
+ Lock::GlobalWrite globalWrite(clients[0].second.get());
+ Lock::GlobalLock globalReadTry(clients[1].second.get(), MODE_S, 1);
ASSERT(!globalReadTry.isLocked());
}
-TEST(DConcurrency, GlobalLockX_TimeoutDueToGlobalLockX) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
+TEST_F(DConcurrencyTestFixture, GlobalLockX_TimeoutDueToGlobalLockX) {
+ auto clients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalWriteTry(&lsTry, MODE_X, 1);
+ Lock::GlobalWrite globalWrite(clients[0].second.get());
+ Lock::GlobalLock globalWriteTry(clients[1].second.get(), MODE_X, 1);
ASSERT(!globalWriteTry.isLocked());
}
-TEST(DConcurrency, TempReleaseGlobalWrite) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
+TEST_F(DConcurrencyTestFixture, TempReleaseGlobalWrite) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ auto lockState = opCtx->lockState();
+ Lock::GlobalWrite globalWrite(opCtx.get());
{
- Lock::TempRelease tempRelease(&ls);
- ASSERT(!ls.isLocked());
+ Lock::TempRelease tempRelease(lockState);
+ ASSERT(!lockState->isLocked());
}
- ASSERT(ls.isW());
+ ASSERT(lockState->isW());
}
-TEST(DConcurrency, TempReleaseRecursive) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
- Lock::DBLock lk(&ls, "SomeDBName", MODE_X);
+TEST_F(DConcurrencyTestFixture, TempReleaseRecursive) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ auto lockState = opCtx->lockState();
+ Lock::GlobalWrite globalWrite(opCtx.get());
+ Lock::DBLock lk(opCtx.get(), "SomeDBName", MODE_X);
{
- Lock::TempRelease tempRelease(&ls);
- ASSERT(ls.isW());
- ASSERT(ls.isDbLockedForMode("SomeDBName", MODE_X));
+ Lock::TempRelease tempRelease(lockState);
+ ASSERT(lockState->isW());
+ ASSERT(lockState->isDbLockedForMode("SomeDBName", MODE_X));
}
- ASSERT(ls.isW());
+ ASSERT(lockState->isW());
}
-TEST(DConcurrency, DBLockTakesS) {
- MMAPV1LockerImpl ls;
-
- Lock::DBLock dbRead(&ls, "db", MODE_S);
+TEST_F(DConcurrencyTestFixture, DBLockTakesS) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ Lock::DBLock dbRead(opCtx.get(), "db", MODE_S);
const ResourceId resIdDb(RESOURCE_DATABASE, std::string("db"));
- ASSERT(ls.getLockMode(resIdDb) == MODE_S);
+ ASSERT(opCtx->lockState()->getLockMode(resIdDb) == MODE_S);
}
-TEST(DConcurrency, DBLockTakesX) {
- MMAPV1LockerImpl ls;
-
- Lock::DBLock dbWrite(&ls, "db", MODE_X);
+TEST_F(DConcurrencyTestFixture, DBLockTakesX) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ Lock::DBLock dbWrite(opCtx.get(), "db", MODE_X);
const ResourceId resIdDb(RESOURCE_DATABASE, std::string("db"));
- ASSERT(ls.getLockMode(resIdDb) == MODE_X);
+ ASSERT(opCtx->lockState()->getLockMode(resIdDb) == MODE_X);
}
-TEST(DConcurrency, DBLockTakesISForAdminIS) {
- DefaultLockerImpl ls;
-
- Lock::DBLock dbRead(&ls, "admin", MODE_IS);
+TEST_F(DConcurrencyTestFixture, DBLockTakesISForAdminIS) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ Lock::DBLock dbRead(opCtx.get(), "admin", MODE_IS);
- ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_IS);
+ ASSERT(opCtx->lockState()->getLockMode(resourceIdAdminDB) == MODE_IS);
}
-TEST(DConcurrency, DBLockTakesSForAdminS) {
- DefaultLockerImpl ls;
+TEST_F(DConcurrencyTestFixture, DBLockTakesSForAdminS) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ Lock::DBLock dbRead(opCtx.get(), "admin", MODE_S);
- Lock::DBLock dbRead(&ls, "admin", MODE_S);
-
- ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_S);
+ ASSERT(opCtx->lockState()->getLockMode(resourceIdAdminDB) == MODE_S);
}
-TEST(DConcurrency, DBLockTakesXForAdminIX) {
- DefaultLockerImpl ls;
-
- Lock::DBLock dbWrite(&ls, "admin", MODE_IX);
+TEST_F(DConcurrencyTestFixture, DBLockTakesXForAdminIX) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ Lock::DBLock dbWrite(opCtx.get(), "admin", MODE_IX);
- ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_X);
+ ASSERT(opCtx->lockState()->getLockMode(resourceIdAdminDB) == MODE_X);
}
-TEST(DConcurrency, DBLockTakesXForAdminX) {
- DefaultLockerImpl ls;
+TEST_F(DConcurrencyTestFixture, DBLockTakesXForAdminX) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ Lock::DBLock dbWrite(opCtx.get(), "admin", MODE_X);
- Lock::DBLock dbWrite(&ls, "admin", MODE_X);
-
- ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_X);
+ ASSERT(opCtx->lockState()->getLockMode(resourceIdAdminDB) == MODE_X);
}
-TEST(DConcurrency, MultipleWriteDBLocksOnSameThread) {
- MMAPV1LockerImpl ls;
-
- Lock::DBLock r1(&ls, "db1", MODE_X);
- Lock::DBLock r2(&ls, "db1", MODE_X);
+TEST_F(DConcurrencyTestFixture, MultipleWriteDBLocksOnSameThread) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ Lock::DBLock r1(opCtx.get(), "db1", MODE_X);
+ Lock::DBLock r2(opCtx.get(), "db1", MODE_X);
- ASSERT(ls.isDbLockedForMode("db1", MODE_X));
+ ASSERT(opCtx->lockState()->isDbLockedForMode("db1", MODE_X));
}
-TEST(DConcurrency, MultipleConflictingDBLocksOnSameThread) {
- MMAPV1LockerImpl ls;
-
- Lock::DBLock r1(&ls, "db1", MODE_X);
- Lock::DBLock r2(&ls, "db1", MODE_S);
+TEST_F(DConcurrencyTestFixture, MultipleConflictingDBLocksOnSameThread) {
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ auto lockState = opCtx->lockState();
+ Lock::DBLock r1(opCtx.get(), "db1", MODE_X);
+ Lock::DBLock r2(opCtx.get(), "db1", MODE_S);
- ASSERT(ls.isDbLockedForMode("db1", MODE_X));
- ASSERT(ls.isDbLockedForMode("db1", MODE_S));
+ ASSERT(lockState->isDbLockedForMode("db1", MODE_X));
+ ASSERT(lockState->isDbLockedForMode("db1", MODE_S));
}
-TEST(DConcurrency, IsDbLockedForSMode) {
+TEST_F(DConcurrencyTestFixture, IsDbLockedForSMode) {
const std::string dbName("db");
- MMAPV1LockerImpl ls;
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ auto lockState = opCtx->lockState();
+ Lock::DBLock dbLock(opCtx.get(), dbName, MODE_S);
- Lock::DBLock dbLock(&ls, dbName, MODE_S);
-
- ASSERT(ls.isDbLockedForMode(dbName, MODE_IS));
- ASSERT(!ls.isDbLockedForMode(dbName, MODE_IX));
- ASSERT(ls.isDbLockedForMode(dbName, MODE_S));
- ASSERT(!ls.isDbLockedForMode(dbName, MODE_X));
+ ASSERT(lockState->isDbLockedForMode(dbName, MODE_IS));
+ ASSERT(!lockState->isDbLockedForMode(dbName, MODE_IX));
+ ASSERT(lockState->isDbLockedForMode(dbName, MODE_S));
+ ASSERT(!lockState->isDbLockedForMode(dbName, MODE_X));
}
-TEST(DConcurrency, IsDbLockedForXMode) {
+TEST_F(DConcurrencyTestFixture, IsDbLockedForXMode) {
const std::string dbName("db");
- MMAPV1LockerImpl ls;
-
- Lock::DBLock dbLock(&ls, dbName, MODE_X);
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ auto lockState = opCtx->lockState();
+ Lock::DBLock dbLock(opCtx.get(), dbName, MODE_X);
- ASSERT(ls.isDbLockedForMode(dbName, MODE_IS));
- ASSERT(ls.isDbLockedForMode(dbName, MODE_IX));
- ASSERT(ls.isDbLockedForMode(dbName, MODE_S));
- ASSERT(ls.isDbLockedForMode(dbName, MODE_X));
+ ASSERT(lockState->isDbLockedForMode(dbName, MODE_IS));
+ ASSERT(lockState->isDbLockedForMode(dbName, MODE_IX));
+ ASSERT(lockState->isDbLockedForMode(dbName, MODE_S));
+ ASSERT(lockState->isDbLockedForMode(dbName, MODE_X));
}
-TEST(DConcurrency, IsCollectionLocked_DB_Locked_IS) {
+TEST_F(DConcurrencyTestFixture, IsCollectionLocked_DB_Locked_IS) {
const std::string ns("db1.coll");
- MMAPV1LockerImpl ls;
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ auto lockState = opCtx->lockState();
- Lock::DBLock dbLock(&ls, "db1", MODE_IS);
+ Lock::DBLock dbLock(opCtx.get(), "db1", MODE_IS);
{
- Lock::CollectionLock collLock(&ls, ns, MODE_IS);
+ Lock::CollectionLock collLock(lockState, ns, MODE_IS);
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
- ASSERT(!ls.isCollectionLockedForMode(ns, MODE_IX));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IS));
+ ASSERT(!lockState->isCollectionLockedForMode(ns, MODE_IX));
// TODO: This is TRUE because Lock::CollectionLock converts IS lock to S
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_S));
- ASSERT(!ls.isCollectionLockedForMode(ns, MODE_X));
+ ASSERT(!lockState->isCollectionLockedForMode(ns, MODE_X));
}
{
- Lock::CollectionLock collLock(&ls, ns, MODE_S);
+ Lock::CollectionLock collLock(lockState, ns, MODE_S);
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
- ASSERT(!ls.isCollectionLockedForMode(ns, MODE_IX));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
- ASSERT(!ls.isCollectionLockedForMode(ns, MODE_X));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IS));
+ ASSERT(!lockState->isCollectionLockedForMode(ns, MODE_IX));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_S));
+ ASSERT(!lockState->isCollectionLockedForMode(ns, MODE_X));
}
}
-TEST(DConcurrency, IsCollectionLocked_DB_Locked_IX) {
+TEST_F(DConcurrencyTestFixture, IsCollectionLocked_DB_Locked_IX) {
const std::string ns("db1.coll");
- MMAPV1LockerImpl ls;
+ auto opCtx = makeOpCtx();
+ opCtx->setLockState(stdx::make_unique<MMAPV1LockerImpl>());
+ auto lockState = opCtx->lockState();
- Lock::DBLock dbLock(&ls, "db1", MODE_IX);
+ Lock::DBLock dbLock(opCtx.get(), "db1", MODE_IX);
{
- Lock::CollectionLock collLock(&ls, ns, MODE_IX);
+ Lock::CollectionLock collLock(lockState, ns, MODE_IX);
// TODO: This is TRUE because Lock::CollectionLock converts IX lock to X
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IS));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IX));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_X));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IX));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_S));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_X));
}
{
- Lock::CollectionLock collLock(&ls, ns, MODE_X);
+ Lock::CollectionLock collLock(lockState, ns, MODE_X);
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IX));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_X));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IS));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_IX));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_S));
+ ASSERT(lockState->isCollectionLockedForMode(ns, MODE_X));
}
}
-TEST(DConcurrency, Stress) {
+TEST_F(DConcurrencyTestFixture, Stress) {
const int kNumIterations = 5000;
ProgressMeter progressMeter(kNumIterations * kMaxStressThreads);
- std::array<DefaultLockerImpl, kMaxStressThreads> locker;
+ std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
+ clients = makeKClientsWithLockers<DefaultLockerImpl>(kMaxStressThreads);
AtomicInt32 ready{0};
std::vector<stdx::thread> threads;
- for (int threadId = 0; threadId < kMaxStressThreads; threadId++)
+
+ for (int threadId = 0; threadId < kMaxStressThreads; threadId++) {
threads.emplace_back([&, threadId]() {
// Busy-wait until everybody is ready
ready.fetchAndAdd(1);
@@ -485,119 +535,119 @@ TEST(DConcurrency, Stress) {
const bool sometimes = (std::rand() % 15 == 0);
if (i % 7 == 0 && threadId == 0 /* Only one upgrader legal */) {
- Lock::GlobalWrite w(&locker[threadId]);
+ Lock::GlobalWrite w(clients[threadId].second.get());
if (i % 7 == 2) {
- Lock::TempRelease t(&locker[threadId]);
+ Lock::TempRelease t(clients[threadId].second->lockState());
}
- ASSERT(locker[threadId].isW());
+ ASSERT(clients[threadId].second->lockState()->isW());
} else if (i % 7 == 1) {
- Lock::GlobalRead r(&locker[threadId]);
- ASSERT(locker[threadId].isReadLocked());
+ Lock::GlobalRead r(clients[threadId].second.get());
+ ASSERT(clients[threadId].second->lockState()->isReadLocked());
} else if (i % 7 == 2) {
- Lock::GlobalWrite w(&locker[threadId]);
+ Lock::GlobalWrite w(clients[threadId].second.get());
if (sometimes) {
- Lock::TempRelease t(&locker[threadId]);
+ Lock::TempRelease t(clients[threadId].second->lockState());
}
- ASSERT(locker[threadId].isW());
+ ASSERT(clients[threadId].second->lockState()->isW());
} else if (i % 7 == 3) {
- Lock::GlobalWrite w(&locker[threadId]);
- { Lock::TempRelease t(&locker[threadId]); }
+ Lock::GlobalWrite w(clients[threadId].second.get());
+ { Lock::TempRelease t(clients[threadId].second->lockState()); }
- Lock::GlobalRead r(&locker[threadId]);
+ Lock::GlobalRead r(clients[threadId].second.get());
if (sometimes) {
- Lock::TempRelease t(&locker[threadId]);
+ Lock::TempRelease t(clients[threadId].second->lockState());
}
- ASSERT(locker[threadId].isW());
+ ASSERT(clients[threadId].second->lockState()->isW());
} else if (i % 7 == 4) {
- Lock::GlobalRead r(&locker[threadId]);
- Lock::GlobalRead r2(&locker[threadId]);
- ASSERT(locker[threadId].isReadLocked());
+ Lock::GlobalRead r(clients[threadId].second.get());
+ Lock::GlobalRead r2(clients[threadId].second.get());
+ ASSERT(clients[threadId].second->lockState()->isReadLocked());
} else if (i % 7 == 5) {
- { Lock::DBLock r(&locker[threadId], "foo", MODE_S); }
- { Lock::DBLock r(&locker[threadId], "bar", MODE_S); }
+ { Lock::DBLock r(clients[threadId].second.get(), "foo", MODE_S); }
+ { Lock::DBLock r(clients[threadId].second.get(), "bar", MODE_S); }
} else if (i % 7 == 6) {
if (i > kNumIterations / 2) {
int q = i % 11;
if (q == 0) {
- Lock::DBLock r(&locker[threadId], "foo", MODE_S);
- ASSERT(locker[threadId].isDbLockedForMode("foo", MODE_S));
-
- Lock::DBLock r2(&locker[threadId], "foo", MODE_S);
- ASSERT(locker[threadId].isDbLockedForMode("foo", MODE_S));
-
- Lock::DBLock r3(&locker[threadId], "local", MODE_S);
- ASSERT(locker[threadId].isDbLockedForMode("foo", MODE_S));
- ASSERT(locker[threadId].isDbLockedForMode("local", MODE_S));
+ Lock::DBLock r(clients[threadId].second.get(), "foo", MODE_S);
+ ASSERT(clients[threadId].second->lockState()->isDbLockedForMode(
+ "foo", MODE_S));
+
+ Lock::DBLock r2(clients[threadId].second.get(), "foo", MODE_S);
+ ASSERT(clients[threadId].second->lockState()->isDbLockedForMode(
+ "foo", MODE_S));
+
+ Lock::DBLock r3(clients[threadId].second.get(), "local", MODE_S);
+ ASSERT(clients[threadId].second->lockState()->isDbLockedForMode(
+ "foo", MODE_S));
+ ASSERT(clients[threadId].second->lockState()->isDbLockedForMode(
+ "local", MODE_S));
} else if (q == 1) {
// test locking local only -- with no preceding lock
- { Lock::DBLock x(&locker[threadId], "local", MODE_S); }
+ { Lock::DBLock x(clients[threadId].second.get(), "local", MODE_S); }
- Lock::DBLock x(&locker[threadId], "local", MODE_X);
+ Lock::DBLock x(clients[threadId].second.get(), "local", MODE_X);
if (sometimes) {
- Lock::TempRelease t(&locker[threadId]);
+ Lock::TempRelease t(clients[threadId].second.get()->lockState());
}
} else if (q == 2) {
- { Lock::DBLock x(&locker[threadId], "admin", MODE_S); }
- { Lock::DBLock x(&locker[threadId], "admin", MODE_X); }
+ { Lock::DBLock x(clients[threadId].second.get(), "admin", MODE_S); }
+ { Lock::DBLock x(clients[threadId].second.get(), "admin", MODE_X); }
} else if (q == 3) {
- Lock::DBLock x(&locker[threadId], "foo", MODE_X);
- Lock::DBLock y(&locker[threadId], "admin", MODE_S);
+ Lock::DBLock x(clients[threadId].second.get(), "foo", MODE_X);
+ Lock::DBLock y(clients[threadId].second.get(), "admin", MODE_S);
} else if (q == 4) {
- Lock::DBLock x(&locker[threadId], "foo2", MODE_S);
- Lock::DBLock y(&locker[threadId], "admin", MODE_S);
+ Lock::DBLock x(clients[threadId].second.get(), "foo2", MODE_S);
+ Lock::DBLock y(clients[threadId].second.get(), "admin", MODE_S);
} else if (q == 5) {
- Lock::DBLock x(&locker[threadId], "foo", MODE_IS);
+ Lock::DBLock x(clients[threadId].second.get(), "foo", MODE_IS);
} else if (q == 6) {
- Lock::DBLock x(&locker[threadId], "foo", MODE_IX);
- Lock::DBLock y(&locker[threadId], "local", MODE_IX);
+ Lock::DBLock x(clients[threadId].second.get(), "foo", MODE_IX);
+ Lock::DBLock y(clients[threadId].second.get(), "local", MODE_IX);
} else {
- Lock::DBLock w(&locker[threadId], "foo", MODE_X);
+ Lock::DBLock w(clients[threadId].second.get(), "foo", MODE_X);
- { Lock::TempRelease t(&locker[threadId]); }
+ { Lock::TempRelease t(clients[threadId].second->lockState()); }
- Lock::DBLock r2(&locker[threadId], "foo", MODE_S);
- Lock::DBLock r3(&locker[threadId], "local", MODE_S);
+ Lock::DBLock r2(clients[threadId].second.get(), "foo", MODE_S);
+ Lock::DBLock r3(clients[threadId].second.get(), "local", MODE_S);
}
} else {
- Lock::DBLock r(&locker[threadId], "foo", MODE_S);
- Lock::DBLock r2(&locker[threadId], "foo", MODE_S);
- Lock::DBLock r3(&locker[threadId], "local", MODE_S);
+ Lock::DBLock r(clients[threadId].second.get(), "foo", MODE_S);
+ Lock::DBLock r2(clients[threadId].second.get(), "foo", MODE_S);
+ Lock::DBLock r3(clients[threadId].second.get(), "local", MODE_S);
}
}
progressMeter.hit();
}
});
+ }
for (auto& thread : threads)
thread.join();
- {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite w(&ls);
- }
-
- {
- MMAPV1LockerImpl ls;
- Lock::GlobalRead r(&ls);
- }
+ auto newClients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
+ { Lock::GlobalWrite w(newClients[0].second.get()); }
+ { Lock::GlobalRead r(newClients[1].second.get()); }
}
-TEST(DConcurrency, StressPartitioned) {
+TEST_F(DConcurrencyTestFixture, StressPartitioned) {
const int kNumIterations = 5000;
ProgressMeter progressMeter(kNumIterations * kMaxStressThreads);
- std::array<DefaultLockerImpl, kMaxStressThreads> locker;
+ std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
+ clients = makeKClientsWithLockers<DefaultLockerImpl>(kMaxStressThreads);
AtomicInt32 ready{0};
std::vector<stdx::thread> threads;
- for (int threadId = 0; threadId < kMaxStressThreads; threadId++)
+ for (int threadId = 0; threadId < kMaxStressThreads; threadId++) {
threads.emplace_back([&, threadId]() {
// Busy-wait until everybody is ready
ready.fetchAndAdd(1);
@@ -607,10 +657,10 @@ TEST(DConcurrency, StressPartitioned) {
for (int i = 0; i < kNumIterations; i++) {
if (threadId == 0) {
if (i % 100 == 0) {
- Lock::GlobalWrite w(&locker[threadId]);
+ Lock::GlobalWrite w(clients[threadId].second.get());
continue;
} else if (i % 100 == 1) {
- Lock::GlobalRead w(&locker[threadId]);
+ Lock::GlobalRead w(clients[threadId].second.get());
continue;
}
@@ -618,31 +668,26 @@ TEST(DConcurrency, StressPartitioned) {
}
if (i % 2 == 0) {
- Lock::DBLock x(&locker[threadId], "foo", MODE_IS);
+ Lock::DBLock x(clients[threadId].second.get(), "foo", MODE_IS);
} else {
- Lock::DBLock x(&locker[threadId], "foo", MODE_IX);
- Lock::DBLock y(&locker[threadId], "local", MODE_IX);
+ Lock::DBLock x(clients[threadId].second.get(), "foo", MODE_IX);
+ Lock::DBLock y(clients[threadId].second.get(), "local", MODE_IX);
}
progressMeter.hit();
}
});
+ }
for (auto& thread : threads)
thread.join();
- {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite w(&ls);
- }
-
- {
- MMAPV1LockerImpl ls;
- Lock::GlobalRead r(&ls);
- }
+ auto newClients = makeKClientsWithLockers<MMAPV1LockerImpl>(2);
+ { Lock::GlobalWrite w(newClients[0].second.get()); }
+ { Lock::GlobalRead r(newClients[1].second.get()); }
}
-TEST(DConcurrency, ResourceMutexLabels) {
+TEST_F(DConcurrencyTestFixture, ResourceMutexLabels) {
Lock::ResourceMutex mutex("label");
ASSERT(mutex.getName() == "label");
Lock::ResourceMutex mutex2("label2");
@@ -653,64 +698,68 @@ TEST(DConcurrency, ResourceMutexLabels) {
// These tests exercise single- and multi-threaded performance of uncontended lock acquisition. It
// is neither practical nor useful to run them on debug builds.
-TEST(Locker, PerformanceStdMutex) {
+TEST_F(DConcurrencyTestFixture, PerformanceStdMutex) {
stdx::mutex mtx;
perfTest([&](int threadId) { stdx::unique_lock<stdx::mutex> lk(mtx); }, kMaxPerfThreads);
}
-TEST(Locker, PerformanceResourceMutexShared) {
+TEST_F(DConcurrencyTestFixture, PerformanceResourceMutexShared) {
Lock::ResourceMutex mtx("testMutex");
std::array<DefaultLockerImpl, kMaxPerfThreads> locker;
perfTest([&](int threadId) { Lock::SharedLock lk(&locker[threadId], mtx); }, kMaxPerfThreads);
}
-TEST(Locker, PerformanceResourceMutexExclusive) {
+TEST_F(DConcurrencyTestFixture, PerformanceResourceMutexExclusive) {
Lock::ResourceMutex mtx("testMutex");
std::array<DefaultLockerImpl, kMaxPerfThreads> locker;
perfTest([&](int threadId) { Lock::ExclusiveLock lk(&locker[threadId], mtx); },
kMaxPerfThreads);
}
-TEST(Locker, PerformanceCollectionIntentSharedLock) {
- std::array<DefaultLockerImpl, kMaxPerfThreads> locker;
+TEST_F(DConcurrencyTestFixture, PerformanceCollectionIntentSharedLock) {
+ std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
+ clients = makeKClientsWithLockers<DefaultLockerImpl>(kMaxPerfThreads);
ForceSupportsDocLocking supported(true);
perfTest(
[&](int threadId) {
- Lock::DBLock dlk(&locker[threadId], "test", MODE_IS);
- Lock::CollectionLock clk(&locker[threadId], "test.coll", MODE_IS);
+ Lock::DBLock dlk(clients[threadId].second.get(), "test", MODE_IS);
+ Lock::CollectionLock clk(clients[threadId].second->lockState(), "test.coll", MODE_IS);
},
kMaxPerfThreads);
}
-TEST(Locker, PerformanceCollectionIntentExclusiveLock) {
- std::array<DefaultLockerImpl, kMaxPerfThreads> locker;
+TEST_F(DConcurrencyTestFixture, PerformanceCollectionIntentExclusiveLock) {
+ std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
+ clients = makeKClientsWithLockers<DefaultLockerImpl>(kMaxPerfThreads);
ForceSupportsDocLocking supported(true);
perfTest(
[&](int threadId) {
- Lock::DBLock dlk(&locker[threadId], "test", MODE_IX);
- Lock::CollectionLock clk(&locker[threadId], "test.coll", MODE_IX);
+ Lock::DBLock dlk(clients[threadId].second.get(), "test", MODE_IX);
+ Lock::CollectionLock clk(clients[threadId].second->lockState(), "test.coll", MODE_IX);
},
kMaxPerfThreads);
}
-TEST(Locker, PerformanceMMAPv1CollectionSharedLock) {
- std::array<MMAPV1LockerImpl, kMaxPerfThreads> locker;
+TEST_F(DConcurrencyTestFixture, PerformanceMMAPv1CollectionSharedLock) {
+ std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
+ clients = makeKClientsWithLockers<DefaultLockerImpl>(kMaxPerfThreads);
ForceSupportsDocLocking supported(false);
perfTest(
[&](int threadId) {
- Lock::DBLock dlk(&locker[threadId], "test", MODE_IS);
- Lock::CollectionLock clk(&locker[threadId], "test.coll", MODE_S);
+ Lock::DBLock dlk(clients[threadId].second.get(), "test", MODE_IS);
+ Lock::CollectionLock clk(clients[threadId].second->lockState(), "test.coll", MODE_S);
},
kMaxPerfThreads);
}
-TEST(Locker, PerformanceMMAPv1CollectionExclusive) {
- std::array<MMAPV1LockerImpl, kMaxPerfThreads> locker;
+TEST_F(DConcurrencyTestFixture, PerformanceMMAPv1CollectionExclusive) {
+ std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
+ clients = makeKClientsWithLockers<DefaultLockerImpl>(kMaxPerfThreads);
ForceSupportsDocLocking supported(false);
perfTest(
[&](int threadId) {
- Lock::DBLock dlk(&locker[threadId], "test", MODE_IX);
- Lock::CollectionLock clk(&locker[threadId], "test.coll", MODE_X);
+ Lock::DBLock dlk(clients[threadId].second.get(), "test", MODE_IX);
+ Lock::CollectionLock clk(clients[threadId].second->lockState(), "test.coll", MODE_X);
},
kMaxPerfThreads);
}
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 02294c9c71a..e6ae63880e3 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -197,8 +197,7 @@ void logStartup(OperationContext* opCtx) {
BSONObj o = toLog.obj();
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx);
AutoGetOrCreateDb autoDb(opCtx, startupLogCollectionName.db(), mongo::MODE_X);
Database* db = autoDb.getDb();
Collection* collection = db->getCollection(startupLogCollectionName);
@@ -255,6 +254,8 @@ void checkForIdIndexes(OperationContext* opCtx, Database* db) {
* --replset.
*/
unsigned long long checkIfReplMissingFromCommandLine(OperationContext* opCtx) {
+ // This is helpful for the query below to work as you can't open files when readlocked
+ Lock::GlobalWrite lk(opCtx);
if (!repl::getGlobalReplicationCoordinator()->getSettings().usingReplSets()) {
DBDirectClient c(opCtx);
return c.count(kSystemReplSetCollection.ns());
@@ -280,8 +281,7 @@ void checkForCappedOplog(OperationContext* opCtx, Database* db) {
void repairDatabasesAndCheckVersion(OperationContext* opCtx) {
LOG(1) << "enter repairDatabases (to check pdfile version #)";
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx);
vector<string> dbNames;
@@ -308,7 +308,7 @@ void repairDatabasesAndCheckVersion(OperationContext* opCtx) {
// yet, then it will be created. If the mongod is running in a read-only mode, then it is
// fine to not open the "local" database and populate the catalog entries because we won't
// attempt to drop the temporary collections anyway.
- Lock::DBLock dbLock(opCtx->lockState(), kSystemReplSetCollection.db(), MODE_X);
+ Lock::DBLock dbLock(opCtx, kSystemReplSetCollection.db(), MODE_X);
dbHolder().openDb(opCtx, kSystemReplSetCollection.db());
}
@@ -697,8 +697,7 @@ ExitCode _initAndListen(int listenPort) {
if (!replSettings.usingReplSets() && !replSettings.isSlave() &&
storageGlobalParams.engine != "devnull") {
- ScopedTransaction transaction(startupOpCtx.get(), MODE_X);
- Lock::GlobalWrite lk(startupOpCtx.get()->lockState());
+ Lock::GlobalWrite lk(startupOpCtx.get());
FeatureCompatibilityVersion::setIfCleanStartup(
startupOpCtx.get(), repl::StorageInterface::get(getGlobalServiceContext()));
}
@@ -1020,8 +1019,7 @@ static void shutdownTask() {
//
// TODO: This call chain uses the locker directly, because we do not want to start an
// operation context, which also instantiates a recovery unit. Also, using the
- // lockGlobalBegin/lockGlobalComplete sequence, we avoid taking the flush lock. This will
- // all go away if we start acquiring the global/flush lock as part of ScopedTransaction.
+ // lockGlobalBegin/lockGlobalComplete sequence, we avoid taking the flush lock.
//
// For a Windows service, dbexit does not call exit(), so we must leak the lock outside
// of this function to prevent any operations from running that need a lock.
diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp
index 051d9141ebf..84b6584b540 100644
--- a/src/mongo/db/db_raii.cpp
+++ b/src/mongo/db/db_raii.cpp
@@ -42,7 +42,7 @@
namespace mongo {
AutoGetDb::AutoGetDb(OperationContext* opCtx, StringData ns, LockMode mode)
- : _dbLock(opCtx->lockState(), ns, mode), _db(dbHolder().get(opCtx, ns)) {}
+ : _dbLock(opCtx, ns, mode), _db(dbHolder().get(opCtx, ns)) {}
AutoGetCollection::AutoGetCollection(OperationContext* opCtx,
const NamespaceString& nss,
@@ -62,9 +62,7 @@ AutoGetCollection::AutoGetCollection(OperationContext* opCtx,
}
AutoGetOrCreateDb::AutoGetOrCreateDb(OperationContext* opCtx, StringData ns, LockMode mode)
- : _transaction(opCtx, MODE_IX),
- _dbLock(opCtx->lockState(), ns, mode),
- _db(dbHolder().get(opCtx, ns)) {
+ : _dbLock(opCtx, ns, mode), _db(dbHolder().get(opCtx, ns)) {
invariant(mode == MODE_IX || mode == MODE_X);
_justCreated = false;
// If the database didn't exist, relock in MODE_X
@@ -79,36 +77,14 @@ AutoGetOrCreateDb::AutoGetOrCreateDb(OperationContext* opCtx, StringData ns, Loc
AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* opCtx,
const NamespaceString& nss,
- AutoGetCollection::ViewMode viewMode)
- : _opCtx(opCtx), _transaction(opCtx, MODE_IS) {
- {
- _autoColl.emplace(opCtx, nss, MODE_IS, MODE_IS, viewMode);
-
- auto curOp = CurOp::get(_opCtx);
- stdx::lock_guard<Client> lk(*_opCtx->getClient());
-
- // TODO: OldClientContext legacy, needs to be removed
- curOp->ensureStarted();
- curOp->setNS_inlock(nss.ns());
-
- // At this point, we are locked in shared mode for the database by the DB lock in the
- // constructor, so it is safe to load the DB pointer.
- if (_autoColl->getDb()) {
- // TODO: OldClientContext legacy, needs to be removed
- curOp->enter_inlock(nss.ns().c_str(), _autoColl->getDb()->getProfilingLevel());
- }
- }
+ AutoGetCollection::ViewMode viewMode) {
+ _autoColl.emplace(opCtx, nss, MODE_IS, MODE_IS, viewMode);
// Note: this can yield.
- _ensureMajorityCommittedSnapshotIsValid(nss);
-
- // We have both the DB and collection locked, which is the prerequisite to do a stable shard
- // version check, but we'd like to do the check after we have a satisfactory snapshot.
- auto css = CollectionShardingState::get(opCtx, nss);
- css->checkShardVersionOrThrow(opCtx);
+ _ensureMajorityCommittedSnapshotIsValid(nss, opCtx);
}
-AutoGetCollectionForRead::~AutoGetCollectionForRead() {
+AutoGetCollectionForReadCommand::~AutoGetCollectionForReadCommand() {
// Report time spent in read lock
auto currentOp = CurOp::get(_opCtx);
Top::get(_opCtx->getClient()->getServiceContext())
@@ -121,7 +97,8 @@ AutoGetCollectionForRead::~AutoGetCollectionForRead() {
currentOp->getReadWriteType());
}
-void AutoGetCollectionForRead::_ensureMajorityCommittedSnapshotIsValid(const NamespaceString& nss) {
+void AutoGetCollectionForRead::_ensureMajorityCommittedSnapshotIsValid(const NamespaceString& nss,
+ OperationContext* opCtx) {
while (true) {
auto coll = _autoColl->getCollection();
if (!coll) {
@@ -131,7 +108,7 @@ void AutoGetCollectionForRead::_ensureMajorityCommittedSnapshotIsValid(const Nam
if (!minSnapshot) {
return;
}
- auto mySnapshot = _opCtx->recoveryUnit()->getMajorityCommittedSnapshot();
+ auto mySnapshot = opCtx->recoveryUnit()->getMajorityCommittedSnapshot();
if (!mySnapshot) {
return;
}
@@ -142,31 +119,58 @@ void AutoGetCollectionForRead::_ensureMajorityCommittedSnapshotIsValid(const Nam
// Yield locks.
_autoColl = boost::none;
- repl::ReplicationCoordinator::get(_opCtx)->waitUntilSnapshotCommitted(_opCtx, *minSnapshot);
+ repl::ReplicationCoordinator::get(opCtx)->waitUntilSnapshotCommitted(opCtx, *minSnapshot);
- uassertStatusOK(_opCtx->recoveryUnit()->setReadFromMajorityCommittedSnapshot());
+ uassertStatusOK(opCtx->recoveryUnit()->setReadFromMajorityCommittedSnapshot());
{
- stdx::lock_guard<Client> lk(*_opCtx->getClient());
- CurOp::get(_opCtx)->yielded();
+ stdx::lock_guard<Client> lk(*opCtx->getClient());
+ CurOp::get(opCtx)->yielded();
}
// Relock.
- _autoColl.emplace(_opCtx, nss, MODE_IS);
+ _autoColl.emplace(opCtx, nss, MODE_IS);
+ }
+}
+
+AutoGetCollectionForReadCommand::AutoGetCollectionForReadCommand(
+ OperationContext* opCtx, const NamespaceString& nss, AutoGetCollection::ViewMode viewMode)
+ : _opCtx(opCtx) {
+ {
+ _autoCollForRead.emplace(opCtx, nss, viewMode);
+
+ auto curOp = CurOp::get(_opCtx);
+ stdx::lock_guard<Client> lk(*_opCtx->getClient());
+
+ // TODO: OldClientContext legacy, needs to be removed
+ curOp->ensureStarted();
+ curOp->setNS_inlock(nss.ns());
+
+ // At this point, we are locked in shared mode for the database by the DB lock in the
+ // constructor, so it is safe to load the DB pointer.
+ if (_autoCollForRead->getDb()) {
+ // TODO: OldClientContext legacy, needs to be removed
+ curOp->enter_inlock(nss.ns().c_str(), _autoCollForRead->getDb()->getProfilingLevel());
+ }
}
+
+ // We have both the DB and collection locked, which is the prerequisite to do a stable shard
+ // version check, but we'd like to do the check after we have a satisfactory snapshot.
+ auto css = CollectionShardingState::get(opCtx, nss);
+ css->checkShardVersionOrThrow(opCtx);
}
-AutoGetCollectionOrViewForRead::AutoGetCollectionOrViewForRead(OperationContext* opCtx,
- const NamespaceString& nss)
- : AutoGetCollectionForRead(opCtx, nss, AutoGetCollection::ViewMode::kViewsPermitted),
- _view(_autoColl->getDb() && !getCollection()
- ? _autoColl->getDb()->getViewCatalog()->lookup(opCtx, nss.ns())
+AutoGetCollectionOrViewForReadCommand::AutoGetCollectionOrViewForReadCommand(
+ OperationContext* opCtx, const NamespaceString& nss)
+ : AutoGetCollectionForReadCommand(opCtx, nss, AutoGetCollection::ViewMode::kViewsPermitted),
+ _view(_autoCollForRead->getDb() && !getCollection()
+ ? _autoCollForRead->getDb()->getViewCatalog()->lookup(opCtx, nss.ns())
: nullptr) {}
-void AutoGetCollectionOrViewForRead::releaseLocksForView() noexcept {
+void AutoGetCollectionOrViewForReadCommand::releaseLocksForView() noexcept {
invariant(_view);
_view = nullptr;
- _autoColl = boost::none;
+ _autoCollForRead = boost::none;
}
OldClientContext::OldClientContext(OperationContext* opCtx,
diff --git a/src/mongo/db/db_raii.h b/src/mongo/db/db_raii.h
index 04d1658b24d..8060d06f000 100644
--- a/src/mongo/db/db_raii.h
+++ b/src/mongo/db/db_raii.h
@@ -46,6 +46,9 @@ class Collection;
* RAII-style class, which acquires a lock on the specified database in the requested mode and
* obtains a reference to the database. Used as a shortcut for calls to dbHolder().get().
*
+ * Use this when you want to do a database-level operation, like read a list of all collections, or
+ * drop a collection.
+ *
* It is guaranteed that the lock will be released when this object goes out of scope, therefore
* the database reference returned by this class should not be retained.
*/
@@ -68,6 +71,10 @@ private:
* RAII-style class, which acquires a locks on the specified database and collection in the
* requested mode and obtains references to both.
*
+ * Use this when you want to access something at the collection level, but do not want to do any of
+ * the tasks associated with the 'ForRead' variants below. For example, you can use this to access a
+ * Collection's CursorManager, or to remove a document.
+ *
* It is guaranteed that locks will be released when this object goes out of scope, therefore
* the database and the collection references returned by this class should not be retained.
*/
@@ -87,10 +94,10 @@ public:
: AutoGetCollection(opCtx, nss, modeDB, modeColl, ViewMode::kViewsForbidden) {}
/**
- * This constructor is inteded for internal use and should not be used outside this file.
- * AutoGetCollectionForRead and AutoGetCollectionOrViewForRead use ViewMode to determine whether
- * or not it is permissible to obtain a handle on a view namespace. Use another constructor or
- * another AutoGet class instead.
+ * This constructor is intended for internal use and should not be used outside this file.
+ * AutoGetCollectionForReadCommand and AutoGetCollectionOrViewForReadCommand use 'viewMode' to
+ * determine whether or not it is permissible to obtain a handle on a view namespace. Use
+ * another constructor or another 'AutoGet' class instead.
*/
AutoGetCollection(OperationContext* opCtx,
const NamespaceString& nss,
@@ -98,10 +105,16 @@ public:
LockMode modeColl,
ViewMode viewMode);
+ /**
+ * Returns nullptr if the database didn't exist.
+ */
Database* getDb() const {
return _autoDb.getDb();
}
+ /**
+ * Returns nullptr if the collection didn't exist.
+ */
Collection* getCollection() const {
return _coll;
}
@@ -115,7 +128,8 @@ private:
Collection* const _coll;
friend class AutoGetCollectionForRead;
- friend class AutoGetCollectionOrViewForRead;
+ friend class AutoGetCollectionForReadCommand;
+ friend class AutoGetCollectionOrViewForReadCommand;
};
/**
@@ -125,6 +139,9 @@ private:
* MODE_IX or MODE_X. If the database needs to be created, the lock will automatically be
* reacquired as MODE_X.
*
+ * Use this when you are about to perform a write, and want to create the database if it doesn't
+ * already exist.
+ *
* It is guaranteed that locks will be released when this object goes out of scope, therefore
* the database reference returned by this class should not be retained.
*/
@@ -147,17 +164,20 @@ public:
}
private:
- ScopedTransaction _transaction;
Lock::DBLock _dbLock; // not const, as we may need to relock for implicit create
Database* _db;
bool _justCreated;
};
/**
- * RAII-style class, which would acquire the appropritate hierarchy of locks for obtaining
+ * RAII-style class, which would acquire the appropriate hierarchy of locks for obtaining
* a particular collection and would retrieve a reference to the collection. In addition, this
- * utility validates the shard version for the specified namespace and sets the current operation's
- * namespace for the duration while this object is alive.
+ * utility will ensure that the read will be performed against an appropriately committed snapshot
+ * if the operation is using a readConcern of 'majority'.
+ *
+ * Use this when you want to read the contents of a collection, but you are not at the top-level of
+ * some command. This will ensure your reads obey any requested readConcern, but will not update the
+ * status of CurrentOp, or add a Top entry.
*
* It is guaranteed that locks will be released when this object goes out of scope, therefore
* database and collection references returned by this class should not be retained.
@@ -169,7 +189,15 @@ public:
AutoGetCollectionForRead(OperationContext* opCtx, const NamespaceString& nss)
: AutoGetCollectionForRead(opCtx, nss, AutoGetCollection::ViewMode::kViewsForbidden) {}
- ~AutoGetCollectionForRead();
+ /**
+ * This constructor is intended for internal use and should not be used outside this file.
+ * AutoGetCollectionForReadCommand and AutoGetCollectionOrViewForReadCommand use 'viewMode' to
+ * determine whether or not it is permissible to obtain a handle on a view namespace. Use
+ * another constructor or another 'AutoGet' class instead.
+ */
+ AutoGetCollectionForRead(OperationContext* opCtx,
+ const NamespaceString& nss,
+ AutoGetCollection::ViewMode viewMode);
Database* getDb() const {
return _autoColl->getDb();
@@ -180,34 +208,73 @@ public:
}
private:
- void _ensureMajorityCommittedSnapshotIsValid(const NamespaceString& nss);
+ void _ensureMajorityCommittedSnapshotIsValid(const NamespaceString& nss,
+ OperationContext* opCtx);
- const Timer _timer;
+ boost::optional<AutoGetCollection> _autoColl;
+};
+
+/**
+ * RAII-style class, which would acquire the appropriate hierarchy of locks for obtaining
+ * a particular collection and would retrieve a reference to the collection. In addition, this
+ * utility validates the shard version for the specified namespace and sets the current operation's
+ * namespace for the duration while this object is alive.
+ *
+ * Use this when you are a read-only command and you know that your target namespace is a collection
+ * (not a view). In addition to ensuring your read obeys any requested readConcern, this will add a
+ * Top entry upon destruction and ensure the CurrentOp object has the right namespace and has
+ * started its timer.
+ *
+ * It is guaranteed that locks will be released when this object goes out of scope, therefore
+ * database and collection references returned by this class should not be retained.
+ */
+class AutoGetCollectionForReadCommand {
+ MONGO_DISALLOW_COPYING(AutoGetCollectionForReadCommand);
+
+public:
+ AutoGetCollectionForReadCommand(OperationContext* opCtx, const NamespaceString& nss)
+ : AutoGetCollectionForReadCommand(
+ opCtx, nss, AutoGetCollection::ViewMode::kViewsForbidden) {}
+
+ ~AutoGetCollectionForReadCommand();
+
+ Database* getDb() const {
+ return _autoCollForRead->getDb();
+ }
+
+ Collection* getCollection() const {
+ return _autoCollForRead->getCollection();
+ }
+
+private:
OperationContext* const _opCtx;
- const ScopedTransaction _transaction;
+ const Timer _timer;
protected:
- AutoGetCollectionForRead(OperationContext* opCtx,
- const NamespaceString& nss,
- AutoGetCollection::ViewMode viewMode);
+ AutoGetCollectionForReadCommand(OperationContext* opCtx,
+ const NamespaceString& nss,
+ AutoGetCollection::ViewMode viewMode);
/**
* This protected section must come after the private section because
* AutoGetCollectionOrViewForRead needs access to _autoColl, but _autoColl must be initialized
* after _transaction.
*/
- boost::optional<AutoGetCollection> _autoColl;
+ boost::optional<AutoGetCollectionForRead> _autoCollForRead;
};
/**
* RAII-style class for obtaining a collection or view for reading. The pointer to a view definition
* is nullptr if it does not exist.
+ *
+ * Use this when you are a read-only command, but have not yet determined if the namespace is a view
+ * or a collection.
*/
-class AutoGetCollectionOrViewForRead final : public AutoGetCollectionForRead {
- MONGO_DISALLOW_COPYING(AutoGetCollectionOrViewForRead);
+class AutoGetCollectionOrViewForReadCommand final : public AutoGetCollectionForReadCommand {
+ MONGO_DISALLOW_COPYING(AutoGetCollectionOrViewForReadCommand);
public:
- AutoGetCollectionOrViewForRead(OperationContext* opCtx, const NamespaceString& nss);
+ AutoGetCollectionOrViewForReadCommand(OperationContext* opCtx, const NamespaceString& nss);
ViewDefinition* getView() const {
return _view.get();
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 265687697ff..ae26b591272 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -209,7 +209,7 @@ RecordId Helpers::findById(OperationContext* opCtx,
}
bool Helpers::getSingleton(OperationContext* opCtx, const char* ns, BSONObj& result) {
- AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
+ AutoGetCollectionForReadCommand ctx(opCtx, NamespaceString(ns));
unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
opCtx, ns, ctx.getCollection(), PlanExecutor::YIELD_MANUAL));
PlanExecutor::ExecState state = exec->getNext(&result, NULL);
@@ -228,7 +228,7 @@ bool Helpers::getSingleton(OperationContext* opCtx, const char* ns, BSONObj& res
}
bool Helpers::getLast(OperationContext* opCtx, const char* ns, BSONObj& result) {
- AutoGetCollectionForRead autoColl(opCtx, NamespaceString(ns));
+ AutoGetCollectionForReadCommand autoColl(opCtx, NamespaceString(ns));
unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(opCtx,
ns,
autoColl.getCollection(),
@@ -319,7 +319,7 @@ long long Helpers::removeRange(OperationContext* opCtx,
BSONObj max;
{
- AutoGetCollectionForRead ctx(opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (!collection) {
warning(LogComponent::kSharding)
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 8fd48430e7a..82642010760 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -164,7 +164,6 @@ public:
// TODO A write lock is currently taken here to accommodate stages that perform writes
// (e.g. DeleteStage). This should be changed to use a read lock for read-only
// execution trees.
- ScopedTransaction transaction(opCtx, MODE_IX);
AutoGetCollection autoColl(opCtx, nss, MODE_IX);
// Make sure the collection is valid.
diff --git a/src/mongo/db/ftdc/collector.cpp b/src/mongo/db/ftdc/collector.cpp
index 4441f2ef126..78e4f0de60a 100644
--- a/src/mongo/db/ftdc/collector.cpp
+++ b/src/mongo/db/ftdc/collector.cpp
@@ -83,10 +83,7 @@ std::tuple<BSONObj, Date_t> FTDCCollectorCollection::collect(Client* client) {
subObjBuilder.appendDate(kFTDCCollectStartField, now);
- {
- ScopedTransaction st(opCtx.get(), MODE_IS);
- collector->collect(opCtx.get(), subObjBuilder);
- }
+ collector->collect(opCtx.get(), subObjBuilder);
end = client->getServiceContext()->getPreciseClockSource()->now();
subObjBuilder.appendDate(kFTDCCollectEndField, end);
diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp
index 6cbfd6e4ae6..599f81bac74 100644
--- a/src/mongo/db/index_builder.cpp
+++ b/src/mongo/db/index_builder.cpp
@@ -94,8 +94,7 @@ void IndexBuilder::run() {
}
NamespaceString ns(_index["ns"].String());
- ScopedTransaction transaction(&opCtx, MODE_IX);
- Lock::DBLock dlk(opCtx.lockState(), ns.db(), MODE_X);
+ Lock::DBLock dlk(&opCtx, ns.db(), MODE_X);
OldClientContext ctx(&opCtx, ns.getSystemIndexesCollection());
Database* db = dbHolder().get(&opCtx, ns.db().toString());
diff --git a/src/mongo/db/index_builder.h b/src/mongo/db/index_builder.h
index 926fd32a5cc..02c11695eb8 100644
--- a/src/mongo/db/index_builder.h
+++ b/src/mongo/db/index_builder.h
@@ -32,6 +32,7 @@
#include "mongo/base/status.h"
#include "mongo/db/catalog/index_catalog.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/jsobj.h"
#include "mongo/platform/atomic_word.h"
#include "mongo/util/background.h"
diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp
index 17db2f26b64..94663701223 100644
--- a/src/mongo/db/index_rebuilder.cpp
+++ b/src/mongo/db/index_rebuilder.cpp
@@ -63,10 +63,8 @@ void checkNS(OperationContext* opCtx, const std::list<std::string>& nsToCheck) {
LOG(3) << "IndexRebuilder::checkNS: " << ns;
- // This write lock is held throughout the index building process
- // for this namespace.
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock lk(opCtx->lockState(), nsToDatabaseSubstring(ns), MODE_X);
+ // This write lock is held throughout the index building process for this namespace.
+ Lock::DBLock lk(opCtx, nsToDatabaseSubstring(ns), MODE_X);
OldClientContext ctx(opCtx, ns);
Collection* collection = ctx.db()->getCollection(ns);
@@ -155,7 +153,6 @@ void restartInProgressIndexesFromLastShutdown(OperationContext* opCtx) {
for (std::vector<std::string>::const_iterator dbName = dbNames.begin();
dbName < dbNames.end();
++dbName) {
- ScopedTransaction scopedXact(opCtx, MODE_IS);
AutoGetDb autoDb(opCtx, *dbName, MODE_S);
Database* db = autoDb.getDb();
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index 9fe9dd117cd..f1e94735d5e 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -118,8 +118,6 @@ void profile(OperationContext* opCtx, NetworkOp op) {
try {
bool acquireDbXLock = false;
while (true) {
- ScopedTransaction scopedXact(opCtx, MODE_IX);
-
std::unique_ptr<AutoGetDb> autoGetDb;
if (acquireDbXLock) {
autoGetDb.reset(new AutoGetDb(opCtx, dbName, MODE_X));
diff --git a/src/mongo/db/matcher/expression_text.cpp b/src/mongo/db/matcher/expression_text.cpp
index 9dcf093e591..a3d100bda9d 100644
--- a/src/mongo/db/matcher/expression_text.cpp
+++ b/src/mongo/db/matcher/expression_text.cpp
@@ -52,7 +52,6 @@ Status TextMatchExpression::init(OperationContext* opCtx,
fts::TextIndexVersion version;
{
// Find text index.
- ScopedTransaction transaction(opCtx, MODE_IS);
AutoGetDb autoDb(opCtx, nss.db(), MODE_IS);
Lock::CollectionLock collLock(opCtx->lockState(), nss.ns(), MODE_IS);
Database* db = autoDb.getDb();
diff --git a/src/mongo/db/operation_context.h b/src/mongo/db/operation_context.h
index ac558d668ab..87c0f159a7f 100644
--- a/src/mongo/db/operation_context.h
+++ b/src/mongo/db/operation_context.h
@@ -33,7 +33,8 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/base/status.h"
#include "mongo/db/client.h"
-#include "mongo/db/concurrency/d_concurrency.h"
+#include "mongo/db/concurrency/locker.h"
+#include "mongo/db/storage/recovery_unit.h"
#include "mongo/db/storage/recovery_unit.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/db/write_concern_options.h"
@@ -48,7 +49,6 @@ namespace mongo {
class Client;
class CurOp;
-class Locker;
class ProgressMeter;
class ServiceContext;
class StringData;
@@ -504,37 +504,6 @@ private:
bool _toplevel;
};
-
-/**
- * RAII-style class to mark the scope of a transaction. ScopedTransactions may be nested.
- * An outermost ScopedTransaction calls abandonSnapshot() on destruction, so that the storage
- * engine can release resources, such as snapshots or locks, that it may have acquired during
- * the transaction. Note that any writes are committed in nested WriteUnitOfWork scopes,
- * so write conflicts cannot happen on completing a ScopedTransaction.
- *
- * TODO: The ScopedTransaction should hold the global lock
- */
-class ScopedTransaction {
- MONGO_DISALLOW_COPYING(ScopedTransaction);
-
-public:
- /**
- * The mode for the transaction indicates whether the transaction will write (MODE_IX) or
- * only read (MODE_IS), or needs to run without other writers (MODE_S) or any other
- * operations (MODE_X) on the server.
- */
- ScopedTransaction(OperationContext* opCtx, LockMode mode) : _opCtx(opCtx) {}
-
- ~ScopedTransaction() {
- if (!_opCtx->lockState()->isLocked()) {
- _opCtx->recoveryUnit()->abandonSnapshot();
- }
- }
-
-private:
- OperationContext* _opCtx;
-};
-
namespace repl {
/**
* RAII-style class to turn off replicated writes. Writes do not create oplog entries while the
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 4a02a753a78..9540b4f7ac3 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -87,8 +87,7 @@ UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest&
locker->isLockHeldForMode(ResourceId(RESOURCE_DATABASE, nsString.db()), MODE_X));
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock lk(opCtx->lockState(), nsString.db(), MODE_X);
+ Lock::DBLock lk(opCtx, nsString.db(), MODE_X);
const bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nsString);
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index a0d0f067333..60f0b7c79b6 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -506,7 +506,6 @@ static WriteResult::SingleResult performSingleUpdateOp(OperationContext* opCtx,
ParsedUpdate parsedUpdate(opCtx, &request);
uassertStatusOK(parsedUpdate.parseRequest());
- ScopedTransaction scopedXact(opCtx, MODE_IX);
boost::optional<AutoGetCollection> collection;
while (true) {
opCtx->checkForInterrupt();
@@ -631,7 +630,6 @@ static WriteResult::SingleResult performSingleDeleteOp(OperationContext* opCtx,
uasserted(ErrorCodes::InternalError, "failAllRemoves failpoint active!");
}
- ScopedTransaction scopedXact(opCtx, MODE_IX);
AutoGetCollection collection(opCtx,
ns,
MODE_IX, // DB is always IX, even if collection is X.
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index c1d6dc743ac..262deeb140b 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -96,7 +96,7 @@ public:
}
bool isSharded(const NamespaceString& nss) final {
- AutoGetCollectionForRead autoColl(_ctx->opCtx, nss);
+ AutoGetCollectionForReadCommand autoColl(_ctx->opCtx, nss);
// TODO SERVER-24960: Use CollectionShardingState::collectionIsSharded() to confirm sharding
// state.
auto css = CollectionShardingState::get(_ctx->opCtx, nss);
@@ -114,7 +114,7 @@ public:
CollectionIndexUsageMap getIndexStats(OperationContext* opCtx,
const NamespaceString& ns) final {
- AutoGetCollectionForRead autoColl(opCtx, ns);
+ AutoGetCollectionForReadCommand autoColl(opCtx, ns);
Collection* collection = autoColl.getCollection();
if (!collection) {
@@ -149,7 +149,7 @@ public:
const NamespaceString& targetNs,
const BSONObj& originalCollectionOptions,
const std::list<BSONObj>& originalIndexes) final {
- Lock::GlobalWrite globalLock(_ctx->opCtx->lockState());
+ Lock::GlobalWrite globalLock(_ctx->opCtx);
if (SimpleBSONObjComparator::kInstance.evaluate(originalCollectionOptions !=
getCollectionOptions(targetNs))) {
@@ -193,7 +193,7 @@ public:
pipeline.getValue()->optimizePipeline();
- AutoGetCollectionForRead autoColl(expCtx->opCtx, expCtx->ns);
+ AutoGetCollectionForReadCommand autoColl(expCtx->opCtx, expCtx->ns);
// makePipeline() is only called to perform secondary aggregation requests and expects the
// collection representing the document source to be not-sharded. We confirm sharding state
@@ -269,7 +269,7 @@ StatusWith<unique_ptr<PlanExecutor>> createRandomCursorExecutor(Collection* coll
}
{
- AutoGetCollection autoColl(opCtx, collection->ns(), MODE_IS);
+ AutoGetCollectionForRead autoColl(opCtx, collection->ns());
// If we're in a sharded environment, we need to filter out documents we don't own.
if (ShardingState::get(opCtx)->needCollectionMetadata(opCtx, collection->ns().ns())) {
diff --git a/src/mongo/db/prefetch.cpp b/src/mongo/db/prefetch.cpp
index 48d36f427d6..043f9bdd478 100644
--- a/src/mongo/db/prefetch.cpp
+++ b/src/mongo/db/prefetch.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/commands/server_status_metric.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/jsobj.h"
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index 9f709bd8485..44f69823096 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -256,7 +256,7 @@ Message getMore(OperationContext* opCtx,
// Note that we declare our locks before our ClientCursorPin, in order to ensure that the
// pin's destructor is called before the lock destructors (so that the unpin occurs under
// the lock).
- unique_ptr<AutoGetCollectionForRead> ctx;
+ unique_ptr<AutoGetCollectionForReadCommand> ctx;
unique_ptr<Lock::DBLock> unpinDBLock;
unique_ptr<Lock::CollectionLock> unpinCollLock;
@@ -267,8 +267,8 @@ Message getMore(OperationContext* opCtx,
// the data within a collection.
cursorManager = CursorManager::getGlobalCursorManager();
} else {
- ctx = stdx::make_unique<AutoGetCollectionOrViewForRead>(opCtx, nss);
- auto viewCtx = static_cast<AutoGetCollectionOrViewForRead*>(ctx.get());
+ ctx = stdx::make_unique<AutoGetCollectionOrViewForReadCommand>(opCtx, nss);
+ auto viewCtx = static_cast<AutoGetCollectionOrViewForReadCommand*>(ctx.get());
if (viewCtx->getView()) {
uasserted(
ErrorCodes::CommandNotSupportedOnView,
@@ -414,7 +414,7 @@ Message getMore(OperationContext* opCtx,
curOp.setExpectedLatencyMs(durationCount<Milliseconds>(timeout));
// Reacquiring locks.
- ctx = make_unique<AutoGetCollectionForRead>(opCtx, nss);
+ ctx = make_unique<AutoGetCollectionForReadCommand>(opCtx, nss);
exec->restoreState();
// We woke up because either the timed_wait expired, or there was more data. Either
@@ -449,7 +449,7 @@ Message getMore(OperationContext* opCtx,
// if the cursor is aggregation, we release these locks.
if (cc->isAggCursor()) {
invariant(NULL == ctx.get());
- unpinDBLock = make_unique<Lock::DBLock>(opCtx->lockState(), nss.db(), MODE_IS);
+ unpinDBLock = make_unique<Lock::DBLock>(opCtx, nss.db(), MODE_IS);
unpinCollLock =
make_unique<Lock::CollectionLock>(opCtx->lockState(), nss.ns(), MODE_IS);
}
@@ -531,7 +531,7 @@ std::string runQuery(OperationContext* opCtx,
LOG(2) << "Running query: " << redact(cq->toStringShort());
// Parse, canonicalize, plan, transcribe, and get a plan executor.
- AutoGetCollectionOrViewForRead ctx(opCtx, nss);
+ AutoGetCollectionOrViewForReadCommand ctx(opCtx, nss);
Collection* collection = ctx.getCollection();
if (ctx.getView()) {
diff --git a/src/mongo/db/read_concern.cpp b/src/mongo/db/read_concern.cpp
index 2ae0cc45aee..80602ff2fce 100644
--- a/src/mongo/db/read_concern.cpp
+++ b/src/mongo/db/read_concern.cpp
@@ -35,6 +35,7 @@
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
#include "mongo/db/commands.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/curop.h"
#include "mongo/db/op_observer.h"
@@ -136,8 +137,7 @@ Status waitForLinearizableReadConcern(OperationContext* opCtx) {
repl::ReplicationCoordinator::get(opCtx->getClient()->getServiceContext());
{
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock lk(opCtx->lockState(), "local", MODE_IX);
+ Lock::DBLock lk(opCtx, "local", MODE_IX);
Lock::CollectionLock lock(opCtx->lockState(), "local.oplog.rs", MODE_IX);
if (!replCoord->canAcceptWritesForDatabase(opCtx, "admin")) {
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index c0211a21022..86cc962c926 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -660,8 +660,7 @@ OpTimeWithHash BackgroundSync::_readLastAppliedOpTimeWithHash(OperationContext*
BSONObj oplogEntry;
try {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock lk(opCtx->lockState(), "local", MODE_X);
+ Lock::DBLock lk(opCtx, "local", MODE_X);
bool success = Helpers::getLast(opCtx, rsOplogName.c_str(), oplogEntry);
if (!success) {
// This can happen when we are to do an initial sync. lastHash will be set
diff --git a/src/mongo/db/repl/database_task.cpp b/src/mongo/db/repl/database_task.cpp
index 5c4f9422ea9..41f29aff342 100644
--- a/src/mongo/db/repl/database_task.cpp
+++ b/src/mongo/db/repl/database_task.cpp
@@ -46,8 +46,7 @@ DatabaseTask::Task DatabaseTask::makeGlobalExclusiveLockTask(const Task& task) {
return task(opCtx, status);
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite lock(opCtx->lockState());
+ Lock::GlobalWrite lock(opCtx);
return task(opCtx, status);
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "globalExclusiveLockTask", "global");
@@ -66,9 +65,7 @@ DatabaseTask::Task DatabaseTask::makeDatabaseLockTask(const Task& task,
return task(opCtx, status);
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- LockMode permissiveLockMode = isSharedLockMode(mode) ? MODE_IS : MODE_IX;
- ScopedTransaction transaction(opCtx, permissiveLockMode);
- Lock::DBLock lock(opCtx->lockState(), databaseName, mode);
+ Lock::DBLock lock(opCtx, databaseName, mode);
return task(opCtx, status);
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "databaseLockTask", databaseName);
@@ -88,8 +85,7 @@ DatabaseTask::Task DatabaseTask::makeCollectionLockTask(const Task& task,
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
LockMode permissiveLockMode = isSharedLockMode(mode) ? MODE_IS : MODE_IX;
- ScopedTransaction transaction(opCtx, permissiveLockMode);
- Lock::DBLock lock(opCtx->lockState(), nss.db(), permissiveLockMode);
+ Lock::DBLock lock(opCtx, nss.db(), permissiveLockMode);
Lock::CollectionLock collectionLock(opCtx->lockState(), nss.toString(), mode);
return task(opCtx, status);
}
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index f76856d2802..8b11676d9b1 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -196,8 +196,7 @@ void ReplSource::ensureMe(OperationContext* opCtx) {
bool exists = Helpers::getSingleton(opCtx, "local.me", _me);
if (!exists || !_me.hasField("host") || _me["host"].String() != myname) {
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock dblk(opCtx->lockState(), "local", MODE_X);
+ Lock::DBLock dblk(opCtx, "local", MODE_X);
WriteUnitOfWork wunit(opCtx);
// clean out local.me
Helpers::emptyCollection(opCtx, "local.me");
@@ -771,7 +770,7 @@ void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* opCtx,
}
}
- unique_ptr<Lock::GlobalWrite> lk(alreadyLocked ? 0 : new Lock::GlobalWrite(opCtx->lockState()));
+ unique_ptr<Lock::GlobalWrite> lk(alreadyLocked ? 0 : new Lock::GlobalWrite(opCtx));
if (replAllDead) {
// hmmm why is this check here and not at top of this function? does it get set between top
@@ -914,8 +913,7 @@ int ReplSource::_sync_pullOpLog(OperationContext* opCtx, int& nApplied) {
}
// obviously global isn't ideal, but non-repl set is old so
// keeping it simple
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx);
save(opCtx);
}
@@ -977,8 +975,7 @@ int ReplSource::_sync_pullOpLog(OperationContext* opCtx, int& nApplied) {
log() << ns << " oplog is empty" << endl;
}
{
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx);
save(opCtx);
}
return okResultCode;
@@ -1045,8 +1042,7 @@ int ReplSource::_sync_pullOpLog(OperationContext* opCtx, int& nApplied) {
const bool moreInitialSyncsPending = !addDbNextPass.empty() && n;
if (moreInitialSyncsPending || !oplogReader.more()) {
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx);
if (tailing) {
okResultCode = restartSync; // don't sleep
@@ -1060,8 +1056,7 @@ int ReplSource::_sync_pullOpLog(OperationContext* opCtx, int& nApplied) {
OCCASIONALLY if (n > 0 && (n > 100000 || time(0) - saveLast > 60)) {
// periodically note our progress, in case we are doing a lot of work and crash
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx);
syncedTo = nextOpTime;
// can't update local log ts since there are pending operations from our peer
save(opCtx);
@@ -1075,8 +1070,7 @@ int ReplSource::_sync_pullOpLog(OperationContext* opCtx, int& nApplied) {
int b = replApplyBatchSize.load();
bool justOne = b == 1;
- unique_ptr<Lock::GlobalWrite> lk(justOne ? 0
- : new Lock::GlobalWrite(opCtx->lockState()));
+ unique_ptr<Lock::GlobalWrite> lk(justOne ? 0 : new Lock::GlobalWrite(opCtx));
while (1) {
BSONElement ts = op.getField("ts");
if (!(ts.type() == Date || ts.type() == bsonTimestamp)) {
@@ -1108,8 +1102,7 @@ int ReplSource::_sync_pullOpLog(OperationContext* opCtx, int& nApplied) {
oplogReader.putBack(op);
_sleepAdviceTime = nextOpTime.getSecs() +
durationCount<Seconds>(replSettings.getSlaveDelaySecs()) + 1;
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx);
if (n > 0) {
syncedTo = last;
save(opCtx);
@@ -1191,8 +1184,7 @@ _ reuse that cursor when we can
int _replMain(OperationContext* opCtx, ReplSource::SourceVector& sources, int& nApplied) {
{
ReplInfo r("replMain load sources");
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx);
ReplSource::loadAll(opCtx, sources);
// only need this param for initial reset
@@ -1254,8 +1246,7 @@ static void replMain(OperationContext* opCtx) {
while (1) {
auto s = restartSync;
{
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx);
if (replAllDead) {
// throttledForceResyncDead can throw
if (!getGlobalReplicationCoordinator()->getSettings().isAutoResyncEnabled() ||
@@ -1288,8 +1279,7 @@ static void replMain(OperationContext* opCtx) {
}
{
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx);
invariant(syncing.swap(0) == 1);
}
@@ -1324,7 +1314,7 @@ static void replMasterThread() {
OperationContext& opCtx = *opCtxPtr;
AuthorizationSession::get(opCtx.getClient())->grantInternalAuthorization();
- Lock::GlobalWrite globalWrite(opCtx.lockState(), 1);
+ Lock::GlobalWrite globalWrite(&opCtx, 1);
if (globalWrite.isLocked()) {
toSleep = 10;
@@ -1405,9 +1395,8 @@ void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) {
Client::initThreadIfNotAlready("pretouchN");
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
- OperationContext& opCtx = *opCtxPtr;
- ScopedTransaction transaction(&opCtx, MODE_S);
- Lock::GlobalRead lk(opCtx.lockState());
+ OperationContext& opCtx = *opCtxPtr; // XXX
+ Lock::GlobalRead lk(&opCtx);
for (unsigned i = a; i <= b; i++) {
const BSONObj& op = v[i];
@@ -1464,7 +1453,7 @@ void pretouchOperation(OperationContext* opCtx, const BSONObj& op) {
BSONObjBuilder b;
b.append(_id);
BSONObj result;
- AutoGetCollectionForRead ctx(opCtx, NamespaceString(ns));
+ AutoGetCollectionForReadCommand ctx(opCtx, NamespaceString(ns));
if (Helpers::findById(opCtx, ctx.getDb(), ns, b.done(), result)) {
_dummy_z += result.objsize(); // touch
}
diff --git a/src/mongo/db/repl/noop_writer.cpp b/src/mongo/db/repl/noop_writer.cpp
index f127b236614..926db4aff07 100644
--- a/src/mongo/db/repl/noop_writer.cpp
+++ b/src/mongo/db/repl/noop_writer.cpp
@@ -31,6 +31,7 @@
#include "mongo/platform/basic.h"
#include "mongo/db/commands.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/curop.h"
#include "mongo/db/op_observer.h"
@@ -136,10 +137,9 @@ void NoopWriter::stopWritingPeriodicNoops() {
}
void NoopWriter::_writeNoop(OperationContext* opCtx) {
- ScopedTransaction transaction(opCtx, MODE_IX);
// Use GlobalLock + lockMMAPV1Flush instead of DBLock to allow return when the lock is not
// available. It may happen when the primary steps down and a shared global lock is acquired.
- Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1);
+ Lock::GlobalLock lock(opCtx, MODE_IX, 1);
if (!lock.isLocked()) {
LOG(1) << "Global lock is not available skipping noopWrite";
return;
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 3d5547ed526..258180af98a 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -298,7 +298,6 @@ OplogDocWriter _logOpWriter(OperationContext* opCtx,
// Truncates the oplog after and including the "truncateTimestamp" entry.
void truncateOplogTo(OperationContext* opCtx, Timestamp truncateTimestamp) {
const NamespaceString oplogNss(rsOplogName);
- ScopedTransaction transaction(opCtx, MODE_IX);
AutoGetDb autoDb(opCtx, oplogNss.db(), MODE_IX);
Lock::CollectionLock oplogCollectionLoc(opCtx->lockState(), oplogNss.ns(), MODE_X);
Collection* oplogCollection = autoDb.getDb()->getCollection(oplogNss);
@@ -402,7 +401,7 @@ void logOp(OperationContext* opCtx,
ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
Collection* oplog = getLocalOplogCollection(opCtx, _oplogCollectionName);
- Lock::DBLock lk(opCtx->lockState(), "local", MODE_IX);
+ Lock::DBLock lk(opCtx, "local", MODE_IX);
Lock::CollectionLock lock(opCtx->lockState(), _oplogCollectionName, MODE_IX);
OplogSlot slot;
getNextOpTime(opCtx, oplog, replCoord, replMode, 1, &slot);
@@ -428,7 +427,7 @@ void logOps(OperationContext* opCtx,
std::vector<OplogDocWriter> writers;
writers.reserve(count);
Collection* oplog = getLocalOplogCollection(opCtx, _oplogCollectionName);
- Lock::DBLock lk(opCtx->lockState(), "local", MODE_IX);
+ Lock::DBLock lk(opCtx, "local", MODE_IX);
Lock::CollectionLock lock(opCtx->lockState(), _oplogCollectionName, MODE_IX);
std::unique_ptr<OplogSlot[]> slots(new OplogSlot[count]);
getNextOpTime(opCtx, oplog, replCoord, replMode, count, slots.get());
@@ -490,8 +489,7 @@ long long getNewOplogSizeBytes(OperationContext* opCtx, const ReplSettings& repl
} // namespace
void createOplog(OperationContext* opCtx, const std::string& oplogCollectionName, bool isReplSet) {
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx);
const ReplSettings& replSettings = ReplicationCoordinator::get(opCtx)->getSettings();
@@ -1221,7 +1219,7 @@ void SnapshotThread::run() {
try {
auto opCtx = client.makeOperationContext();
- Lock::GlobalLock globalLock(opCtx->lockState(), MODE_IS, UINT_MAX);
+ Lock::GlobalLock globalLock(opCtx.get(), MODE_IS, UINT_MAX);
if (!replCoord->getMemberState().readable()) {
// If our MemberState isn't readable, we may not be in a consistent state so don't
@@ -1253,7 +1251,7 @@ void SnapshotThread::run() {
auto opTimeOfSnapshot = OpTime();
{
- AutoGetCollectionForRead oplog(opCtx.get(), NamespaceString(rsOplogName));
+ AutoGetCollectionForReadCommand oplog(opCtx.get(), NamespaceString(rsOplogName));
invariant(oplog.getCollection());
// Read the latest op from the oplog.
auto cursor = oplog.getCollection()->getCursor(opCtx.get(), /*forward*/ false);
diff --git a/src/mongo/db/repl/oplog_buffer_collection_test.cpp b/src/mongo/db/repl/oplog_buffer_collection_test.cpp
index 1ee2c3ccc8c..3e7d172b7c8 100644
--- a/src/mongo/db/repl/oplog_buffer_collection_test.cpp
+++ b/src/mongo/db/repl/oplog_buffer_collection_test.cpp
@@ -71,8 +71,8 @@ void OplogBufferCollectionTest::setUp() {
ServiceContextMongoDTest::setUp();
auto service = getServiceContext();
- // AutoGetCollectionForRead requires a valid replication coordinator in order to check the shard
- // version.
+ // AutoGetCollectionForReadCommand requires a valid replication coordinator in order to check
+ // the shard version.
ReplSettings replSettings;
replSettings.setOplogSizeBytes(5 * 1024 * 1024);
ReplicationCoordinator::set(
@@ -137,10 +137,10 @@ void testStartupCreatesCollection(OperationContext* opCtx,
OplogBufferCollection oplogBuffer(storageInterface, nss);
// Collection should not exist until startup() is called.
- ASSERT_FALSE(AutoGetCollectionForRead(opCtx, nss).getCollection());
+ ASSERT_FALSE(AutoGetCollectionForReadCommand(opCtx, nss).getCollection());
oplogBuffer.startup(opCtx);
- ASSERT_TRUE(AutoGetCollectionForRead(opCtx, nss).getCollection());
+ ASSERT_TRUE(AutoGetCollectionForReadCommand(opCtx, nss).getCollection());
}
TEST_F(OplogBufferCollectionTest, StartupWithDefaultNamespaceCreatesCollection) {
@@ -158,7 +158,7 @@ TEST_F(OplogBufferCollectionTest, StartupDropsExistingCollectionBeforeCreatingNe
ASSERT_OK(_storageInterface->createCollection(_opCtx.get(), nss, CollectionOptions()));
OplogBufferCollection oplogBuffer(_storageInterface, nss);
oplogBuffer.startup(_opCtx.get());
- ASSERT_TRUE(AutoGetCollectionForRead(_opCtx.get(), nss).getCollection());
+ ASSERT_TRUE(AutoGetCollectionForReadCommand(_opCtx.get(), nss).getCollection());
}
DEATH_TEST_F(OplogBufferCollectionTest,
@@ -172,9 +172,9 @@ TEST_F(OplogBufferCollectionTest, ShutdownDropsCollection) {
OplogBufferCollection oplogBuffer(_storageInterface, nss);
oplogBuffer.startup(_opCtx.get());
- ASSERT_TRUE(AutoGetCollectionForRead(_opCtx.get(), nss).getCollection());
+ ASSERT_TRUE(AutoGetCollectionForReadCommand(_opCtx.get(), nss).getCollection());
oplogBuffer.shutdown(_opCtx.get());
- ASSERT_FALSE(AutoGetCollectionForRead(_opCtx.get(), nss).getCollection());
+ ASSERT_FALSE(AutoGetCollectionForReadCommand(_opCtx.get(), nss).getCollection());
}
TEST_F(OplogBufferCollectionTest, extractEmbeddedOplogDocumentChangesIdToTimestamp) {
@@ -542,7 +542,7 @@ TEST_F(OplogBufferCollectionTest, ClearClearsCollection) {
_assertDocumentsInCollectionEquals(_opCtx.get(), nss, {oplog, sentinel, oplog2});
oplogBuffer.clear(_opCtx.get());
- ASSERT_TRUE(AutoGetCollectionForRead(_opCtx.get(), nss).getCollection());
+ ASSERT_TRUE(AutoGetCollectionForReadCommand(_opCtx.get(), nss).getCollection());
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
ASSERT_EQUALS(oplogBuffer.getSize(), 0UL);
ASSERT_EQUALS(0U, oplogBuffer.getSentinelCount_forTest());
diff --git a/src/mongo/db/repl/oplog_interface_local.cpp b/src/mongo/db/repl/oplog_interface_local.cpp
index a68fd8db0d6..c27e8ea798e 100644
--- a/src/mongo/db/repl/oplog_interface_local.cpp
+++ b/src/mongo/db/repl/oplog_interface_local.cpp
@@ -48,7 +48,6 @@ public:
StatusWith<Value> next() override;
private:
- ScopedTransaction _transaction;
Lock::DBLock _dbLock;
Lock::CollectionLock _collectionLock;
OldClientContext _ctx;
@@ -56,8 +55,7 @@ private:
};
OplogIteratorLocal::OplogIteratorLocal(OperationContext* opCtx, const std::string& collectionName)
- : _transaction(opCtx, MODE_IS),
- _dbLock(opCtx->lockState(), nsToDatabase(collectionName), MODE_IS),
+ : _dbLock(opCtx, nsToDatabase(collectionName), MODE_IS),
_collectionLock(opCtx->lockState(), collectionName, MODE_S),
_ctx(opCtx, collectionName),
_exec(InternalPlanner::collectionScan(opCtx,
diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp
index ab756559784..dec36e4d1af 100644
--- a/src/mongo/db/repl/repl_set_commands.cpp
+++ b/src/mongo/db/repl/repl_set_commands.cpp
@@ -429,8 +429,7 @@ public:
status =
getGlobalReplicationCoordinator()->processReplSetReconfig(opCtx, parsedArgs, &result);
- ScopedTransaction scopedXact(opCtx, MODE_X);
- Lock::GlobalWrite globalWrite(opCtx->lockState());
+ Lock::GlobalWrite globalWrite(opCtx);
WriteUnitOfWork wuow(opCtx);
if (status.isOK() && !parsedArgs.force) {
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index c2619816034..e85ec3c8095 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -365,9 +365,8 @@ OldThreadPool* ReplicationCoordinatorExternalStateImpl::getDbWorkThreadPool() co
Status ReplicationCoordinatorExternalStateImpl::runRepairOnLocalDB(OperationContext* opCtx) {
try {
- ScopedTransaction scopedXact(opCtx, MODE_X);
- Lock::GlobalWrite globalWrite(opCtx->lockState());
- StorageEngine* engine = _service->getGlobalStorageEngine();
+ Lock::GlobalWrite globalWrite(opCtx);
+ StorageEngine* engine = getGlobalServiceContext()->getGlobalStorageEngine();
if (!engine->isMmapV1()) {
return Status::OK();
@@ -390,8 +389,7 @@ Status ReplicationCoordinatorExternalStateImpl::initializeReplSetStorage(Operati
createOplog(opCtx);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction scopedXact(opCtx, MODE_X);
- Lock::GlobalWrite globalWrite(opCtx->lockState());
+ Lock::GlobalWrite globalWrite(opCtx);
WriteUnitOfWork wuow(opCtx);
Helpers::putSingleton(opCtx, configCollectionName, config);
@@ -436,8 +434,6 @@ OpTime ReplicationCoordinatorExternalStateImpl::onTransitionToPrimary(OperationC
if (isV1ElectionProtocol) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction scopedXact(opCtx, MODE_X);
-
WriteUnitOfWork wuow(opCtx);
opCtx->getClient()->getServiceContext()->getOpObserver()->onOpMessage(
opCtx,
@@ -466,8 +462,7 @@ OID ReplicationCoordinatorExternalStateImpl::ensureMe(OperationContext* opCtx) {
std::string myname = getHostName();
OID myRID;
{
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock lock(opCtx->lockState(), meDatabaseName, MODE_X);
+ Lock::DBLock lock(opCtx, meDatabaseName, MODE_X);
BSONObj me;
// local.me is an identifier for a server for getLastError w:2+
@@ -514,8 +509,7 @@ Status ReplicationCoordinatorExternalStateImpl::storeLocalConfigDocument(Operati
const BSONObj& config) {
try {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock dbWriteLock(opCtx->lockState(), configDatabaseName, MODE_X);
+ Lock::DBLock dbWriteLock(opCtx, configDatabaseName, MODE_X);
Helpers::putSingleton(opCtx, configCollectionName, config);
return Status::OK();
}
@@ -550,8 +544,7 @@ Status ReplicationCoordinatorExternalStateImpl::storeLocalLastVoteDocument(
BSONObj lastVoteObj = lastVote.toBSON();
try {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock dbWriteLock(opCtx->lockState(), lastVoteDatabaseName, MODE_X);
+ Lock::DBLock dbWriteLock(opCtx, lastVoteDatabaseName, MODE_X);
// If there is no last vote document, we want to store one. Otherwise, we only want to
// replace it if the new last vote document would have a higher term. We both check
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 18a25b08f26..963672cc67e 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -938,8 +938,7 @@ void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* opCtx,
}
}
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite globalWriteLock(opCtx->lockState());
+ Lock::GlobalWrite globalWriteLock(opCtx);
lk.lock();
// Exit drain mode when the buffer is empty in the current term and we're in Draining mode.
@@ -1806,7 +1805,7 @@ Status ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
return {ErrorCodes::NotMaster, "not primary so can't step down"};
}
- Lock::GlobalLock globalReadLock(opCtx->lockState(), MODE_S, Lock::GlobalLock::EnqueueOnly());
+ Lock::GlobalLock globalReadLock(opCtx, MODE_S, Lock::GlobalLock::EnqueueOnly());
// We've requested the global shared lock which will stop new writes from coming in,
// but existing writes could take a long time to finish, so kill all user operations
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
index be2370b7108..34bb0a9c101 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
@@ -1431,7 +1431,7 @@ TEST_F(PrimaryCatchUpTest, PrimaryDoNotNeedToCatchUp) {
ASSERT_EQUALS(1, countLogLinesContaining("My optime is most up-to-date, skipping catch-up"));
auto opCtx = makeOperationContext();
getReplCoord()->signalDrainComplete(opCtx.get(), getReplCoord()->getTerm());
- Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, 1);
ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
@@ -1455,7 +1455,7 @@ TEST_F(PrimaryCatchUpTest, PrimaryFreshnessScanTimeout) {
ASSERT_EQUALS(1, countLogLinesContaining("Could not access any nodes within timeout"));
auto opCtx = makeOperationContext();
getReplCoord()->signalDrainComplete(opCtx.get(), getReplCoord()->getTerm());
- Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, 1);
ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
@@ -1485,7 +1485,7 @@ TEST_F(PrimaryCatchUpTest, PrimaryCatchUpSucceeds) {
ASSERT_EQUALS(1, countLogLinesContaining("Finished catch-up oplog after becoming primary."));
auto opCtx = makeOperationContext();
getReplCoord()->signalDrainComplete(opCtx.get(), getReplCoord()->getTerm());
- Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, 1);
ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
@@ -1509,7 +1509,7 @@ TEST_F(PrimaryCatchUpTest, PrimaryCatchUpTimeout) {
ASSERT_EQUALS(1, countLogLinesContaining("Cannot catch up oplog after becoming primary"));
auto opCtx = makeOperationContext();
getReplCoord()->signalDrainComplete(opCtx.get(), getReplCoord()->getTerm());
- Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, 1);
ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
@@ -1537,7 +1537,7 @@ TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringFreshnessScan) {
stopCapturingLogMessages();
ASSERT_EQUALS(1, countLogLinesContaining("Stopped transition to primary"));
auto opCtx = makeOperationContext();
- Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, 1);
ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
@@ -1571,7 +1571,7 @@ TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringCatchUp) {
ASSERT(getReplCoord()->getApplierState() == ApplierState::Running);
stopCapturingLogMessages();
ASSERT_EQUALS(1, countLogLinesContaining("Cannot catch up oplog after becoming primary"));
- Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, 1);
ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
@@ -1620,11 +1620,11 @@ TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringDrainMode) {
ASSERT(replCoord->getApplierState() == ApplierState::Draining);
auto opCtx = makeOperationContext();
{
- Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, 1);
ASSERT_FALSE(replCoord->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
replCoord->signalDrainComplete(opCtx.get(), replCoord->getTerm());
- Lock::GlobalLock lock(opCtx->lockState(), MODE_IX, 1);
+ Lock::GlobalLock lock(opCtx.get(), MODE_IX, 1);
ASSERT(replCoord->getApplierState() == ApplierState::Stopped);
ASSERT_TRUE(replCoord->canAcceptWritesForDatabase(opCtx.get(), "test"));
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index ae8d5b5fe92..e69c85d57d1 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -1605,7 +1605,7 @@ TEST_F(StepDownTest,
const auto opCtx = makeOperationContext();
// Make sure stepDown cannot grab the global shared lock
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx.get());
Status status =
getReplCoord()->stepDown(opCtx.get(), false, Milliseconds(0), Milliseconds(1000));
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index e0a49f5849f..28bc80d04c8 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -95,7 +95,7 @@ void appendReplicationInfo(OperationContext* opCtx, BSONObjBuilder& result, int
list<BSONObj> src;
{
const NamespaceString localSources{"local.sources"};
- AutoGetCollectionForRead ctx(opCtx, localSources);
+ AutoGetCollectionForReadCommand ctx(opCtx, localSources);
unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
opCtx, localSources.ns(), ctx.getCollection(), PlanExecutor::YIELD_MANUAL));
BSONObj obj;
diff --git a/src/mongo/db/repl/resync.cpp b/src/mongo/db/repl/resync.cpp
index 848700d215a..67b230af0af 100644
--- a/src/mongo/db/repl/resync.cpp
+++ b/src/mongo/db/repl/resync.cpp
@@ -105,8 +105,7 @@ public:
}
// Master/Slave resync.
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite globalWriteLock(opCtx->lockState());
+ Lock::GlobalWrite globalWriteLock(opCtx);
// below this comment pertains only to master/slave replication
if (cmdObj.getBoolField("force")) {
if (!waitForSyncToFinish(opCtx, errmsg))
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index 32d03e31902..7c882dbe3c4 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -144,8 +144,7 @@ bool _initialSyncClone(OperationContext* opCtx,
options.createCollections = false;
// Make database stable
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock dbWrite(opCtx->lockState(), db, MODE_X);
+ Lock::DBLock dbWrite(opCtx, db, MODE_X);
Status status = cloner.copyDb(opCtx, db, host, options, nullptr, collections);
if (!status.isOK()) {
@@ -360,8 +359,7 @@ Status _initialSync(OperationContext* opCtx, BackgroundSync* bgsync) {
createCollectionParams.push_back(params);
}
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock dbWrite(opCtx->lockState(), db, MODE_X);
+ Lock::DBLock dbWrite(opCtx, db, MODE_X);
auto createStatus = cloner.createCollectionsForDb(opCtx, createCollectionParams, db);
if (!createStatus.isOK()) {
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 88fbf63f036..d32404132e4 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -438,8 +438,7 @@ void syncFixUp(OperationContext* opCtx,
{
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_X);
+ Lock::DBLock dbLock(opCtx, nss.db(), MODE_X);
Database* db = dbHolder().openDb(opCtx, nss.db().toString());
invariant(db);
WriteUnitOfWork wunit(opCtx);
@@ -454,8 +453,7 @@ void syncFixUp(OperationContext* opCtx,
log() << "rollback 4.1.2 coll metadata resync " << ns;
const NamespaceString nss(ns);
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_X);
+ Lock::DBLock dbLock(opCtx, nss.db(), MODE_X);
auto db = dbHolder().openDb(opCtx, nss.db().toString());
invariant(db);
auto collection = db->getCollection(ns);
@@ -534,9 +532,8 @@ void syncFixUp(OperationContext* opCtx,
invariant(!fixUpInfo.indexesToDrop.count(*it));
- ScopedTransaction transaction(opCtx, MODE_IX);
const NamespaceString nss(*it);
- Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_X);
+ Lock::DBLock dbLock(opCtx, nss.db(), MODE_X);
Database* db = dbHolder().get(opCtx, nsToDatabaseSubstring(*it));
if (db) {
Helpers::RemoveSaver removeSaver("rollback", "", *it);
@@ -580,8 +577,7 @@ void syncFixUp(OperationContext* opCtx,
const string& indexName = it->second;
log() << "rollback drop index: collection: " << nss.toString() << ". index: " << indexName;
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_X);
+ Lock::DBLock dbLock(opCtx, nss.db(), MODE_X);
auto db = dbHolder().get(opCtx, nss.db());
if (!db) {
continue;
@@ -641,8 +637,7 @@ void syncFixUp(OperationContext* opCtx,
// TODO: Lots of overhead in context. This can be faster.
const NamespaceString docNss(doc.ns);
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock docDbLock(opCtx->lockState(), docNss.db(), MODE_X);
+ Lock::DBLock docDbLock(opCtx, docNss.db(), MODE_X);
OldClientContext ctx(opCtx, doc.ns);
Collection* collection = ctx.db()->getCollection(doc.ns);
@@ -761,8 +756,7 @@ void syncFixUp(OperationContext* opCtx,
LOG(2) << "rollback truncate oplog after " << fixUpInfo.commonPoint.toString();
{
const NamespaceString oplogNss(rsOplogName);
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock oplogDbLock(opCtx->lockState(), oplogNss.db(), MODE_IX);
+ Lock::DBLock oplogDbLock(opCtx, oplogNss.db(), MODE_IX);
Lock::CollectionLock oplogCollectionLoc(opCtx->lockState(), oplogNss.ns(), MODE_X);
OldClientContext ctx(opCtx, rsOplogName);
Collection* oplogCollection = ctx.db()->getCollection(rsOplogName);
@@ -895,7 +889,7 @@ void rollback(OperationContext* opCtx,
// then.
{
log() << "rollback 0";
- Lock::GlobalWrite globalWrite(opCtx->lockState());
+ Lock::GlobalWrite globalWrite(opCtx);
if (!replCoord->setFollowerMode(MemberState::RS_ROLLBACK)) {
log() << "Cannot transition from " << replCoord->getMemberState().toString() << " to "
<< MemberState(MemberState::RS_ROLLBACK).toString();
diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp
index 5df25a37427..90843d9a445 100644
--- a/src/mongo/db/repl/rs_rollback_test.cpp
+++ b/src/mongo/db/repl/rs_rollback_test.cpp
@@ -292,7 +292,7 @@ TEST_F(RSRollbackTest, BothOplogsAtCommonPoint) {
Collection* _createCollection(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionOptions& options) {
- Lock::DBLock dbLock(opCtx->lockState(), nss.db(), MODE_X);
+ Lock::DBLock dbLock(opCtx, nss.db(), MODE_X);
mongo::WriteUnitOfWork wuow(opCtx);
auto db = dbHolder().openDb(opCtx, nss.db());
ASSERT_TRUE(db);
@@ -355,7 +355,7 @@ int _testRollbackDelete(OperationContext* opCtx,
storageInterface));
ASSERT_TRUE(rollbackSource.called);
- Lock::DBLock dbLock(opCtx->lockState(), "test", MODE_S);
+ Lock::DBLock dbLock(opCtx, "test", MODE_S);
Lock::CollectionLock collLock(opCtx->lockState(), "test.t", MODE_S);
auto db = dbHolder().get(opCtx, "test");
ASSERT_TRUE(db);
@@ -451,7 +451,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) {
<< "v"
<< static_cast<int>(kIndexVersion));
{
- Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_X);
+ Lock::DBLock dbLock(_opCtx.get(), "test", MODE_X);
MultiIndexBlock indexer(_opCtx.get(), collection);
ASSERT_OK(indexer.init(indexSpec).getStatus());
WriteUnitOfWork wunit(_opCtx.get());
@@ -502,7 +502,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) {
countLogLinesContaining("rollback drop index: collection: test.t. index: a_1"));
ASSERT_FALSE(rollbackSource.called);
{
- Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_S);
+ Lock::DBLock dbLock(_opCtx.get(), "test", MODE_S);
auto indexCatalog = collection->getIndexCatalog();
ASSERT(indexCatalog);
ASSERT_EQUALS(1, indexCatalog->numIndexesReady(_opCtx.get()));
@@ -520,7 +520,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandIndexNotInCatalog) {
<< "a_1");
// Skip index creation to trigger warning during rollback.
{
- Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_S);
+ Lock::DBLock dbLock(_opCtx.get(), "test", MODE_S);
auto indexCatalog = collection->getIndexCatalog();
ASSERT(indexCatalog);
ASSERT_EQUALS(1, indexCatalog->numIndexesReady(_opCtx.get()));
@@ -564,7 +564,7 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandIndexNotInCatalog) {
ASSERT_EQUALS(1, countLogLinesContaining("rollback failed to drop index a_1 in test.t"));
ASSERT_FALSE(rollbackSource.called);
{
- Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_S);
+ Lock::DBLock dbLock(_opCtx.get(), "test", MODE_S);
auto indexCatalog = collection->getIndexCatalog();
ASSERT(indexCatalog);
ASSERT_EQUALS(1, indexCatalog->numIndexesReady(_opCtx.get()));
@@ -619,7 +619,7 @@ TEST_F(RSRollbackTest, RollbackDropIndexCommandWithOneIndex) {
createOplog(_opCtx.get());
auto collection = _createCollection(_opCtx.get(), "test.t", CollectionOptions());
{
- Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_S);
+ Lock::DBLock dbLock(_opCtx.get(), "test", MODE_S);
auto indexCatalog = collection->getIndexCatalog();
ASSERT(indexCatalog);
ASSERT_EQUALS(1, indexCatalog->numIndexesReady(_opCtx.get()));
@@ -677,7 +677,7 @@ TEST_F(RSRollbackTest, RollbackDropIndexCommandWithMultipleIndexes) {
createOplog(_opCtx.get());
auto collection = _createCollection(_opCtx.get(), "test.t", CollectionOptions());
{
- Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_S);
+ Lock::DBLock dbLock(_opCtx.get(), "test", MODE_S);
auto indexCatalog = collection->getIndexCatalog();
ASSERT(indexCatalog);
ASSERT_EQUALS(1, indexCatalog->numIndexesReady(_opCtx.get()));
@@ -860,7 +860,7 @@ TEST_F(RSRollbackTest, RollbackUnknownCommand) {
<< "t")),
RecordId(2));
{
- Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_X);
+ Lock::DBLock dbLock(_opCtx.get(), "test", MODE_X);
mongo::WriteUnitOfWork wuow(_opCtx.get());
auto db = dbHolder().openDb(_opCtx.get(), "test");
ASSERT_TRUE(db);
@@ -1083,7 +1083,7 @@ TEST_F(RSRollbackTest, RollbackApplyOpsCommand) {
ASSERT_EQUALS(1U, rollbackSource.searchedIds.count(3));
ASSERT_EQUALS(1U, rollbackSource.searchedIds.count(4));
- AutoGetCollectionForRead acr(_opCtx.get(), NamespaceString("test.t"));
+ AutoGetCollectionForReadCommand acr(_opCtx.get(), NamespaceString("test.t"));
BSONObj result;
ASSERT(Helpers::findOne(_opCtx.get(), acr.getCollection(), BSON("_id" << 1), result));
ASSERT_EQUALS(1, result["v"].numberInt()) << result;
@@ -1119,7 +1119,7 @@ TEST_F(RSRollbackTest, RollbackCreateCollectionCommand) {
_coordinator,
&_storageInterface));
{
- Lock::DBLock dbLock(_opCtx->lockState(), "test", MODE_S);
+ Lock::DBLock dbLock(_opCtx.get(), "test", MODE_S);
auto db = dbHolder().get(_opCtx.get(), "test");
ASSERT_TRUE(db);
ASSERT_FALSE(db->getCollection("test.t"));
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index bece9cedeb6..b98af9c8537 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -104,8 +104,7 @@ NamespaceString StorageInterfaceImpl::getMinValidNss() const {
BSONObj StorageInterfaceImpl::getMinValidDocument(OperationContext* opCtx) const {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(opCtx, MODE_IS);
- Lock::DBLock dblk(opCtx->lockState(), _minValidNss.db(), MODE_IS);
+ Lock::DBLock dblk(opCtx, _minValidNss.db(), MODE_IS);
Lock::CollectionLock lk(opCtx->lockState(), _minValidNss.ns(), MODE_IS);
BSONObj doc;
bool found = Helpers::getSingleton(opCtx, _minValidNss.ns().c_str(), doc);
@@ -121,9 +120,8 @@ BSONObj StorageInterfaceImpl::getMinValidDocument(OperationContext* opCtx) const
void StorageInterfaceImpl::updateMinValidDocument(OperationContext* opCtx,
const BSONObj& updateSpec) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(opCtx, MODE_IX);
// For now this needs to be MODE_X because it sometimes creates the collection.
- Lock::DBLock dblk(opCtx->lockState(), _minValidNss.db(), MODE_X);
+ Lock::DBLock dblk(opCtx, _minValidNss.db(), MODE_X);
Helpers::putSingleton(opCtx, _minValidNss.ns().c_str(), updateSpec);
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
@@ -263,7 +261,6 @@ StorageInterfaceImpl::createCollectionForBulkLoading(
// Retry if WCE.
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
// Get locks and create the collection.
- ScopedTransaction transaction(opCtx, MODE_IX);
auto db = stdx::make_unique<AutoGetOrCreateDb>(opCtx, nss.db(), MODE_IX);
auto coll = stdx::make_unique<AutoGetCollection>(opCtx, nss, MODE_X);
collection = coll->getCollection();
@@ -321,7 +318,6 @@ Status insertDocumentsSingleBatch(OperationContext* opCtx,
const NamespaceString& nss,
std::vector<BSONObj>::const_iterator begin,
std::vector<BSONObj>::const_iterator end) {
- ScopedTransaction transaction(opCtx, MODE_IX);
AutoGetCollection autoColl(opCtx, nss, MODE_IX);
auto collection = autoColl.getCollection();
if (!collection) {
@@ -384,7 +380,7 @@ Status StorageInterfaceImpl::createOplog(OperationContext* opCtx, const Namespac
StatusWith<size_t> StorageInterfaceImpl::getOplogMaxSize(OperationContext* opCtx,
const NamespaceString& nss) {
- AutoGetCollectionForRead collection(opCtx, nss);
+ AutoGetCollectionForReadCommand collection(opCtx, nss);
if (!collection.getCollection()) {
return {ErrorCodes::NamespaceNotFound,
str::stream() << "Your oplog doesn't exist: " << nss.ns()};
@@ -401,7 +397,6 @@ Status StorageInterfaceImpl::createCollection(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionOptions& options) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(opCtx, MODE_IX);
AutoGetOrCreateDb databaseWriteGuard(opCtx, nss.db(), MODE_X);
auto db = databaseWriteGuard.getDb();
invariant(db);
@@ -424,7 +419,6 @@ Status StorageInterfaceImpl::createCollection(OperationContext* opCtx,
Status StorageInterfaceImpl::dropCollection(OperationContext* opCtx, const NamespaceString& nss) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(opCtx, MODE_IX);
AutoGetDb autoDB(opCtx, nss.db(), MODE_X);
if (!autoDB.getDb()) {
// Database does not exist - nothing to do.
@@ -470,7 +464,6 @@ StatusWith<std::vector<BSONObj>> _findOrDeleteDocuments(
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
auto collectionAccessMode = isFind ? MODE_IS : MODE_IX;
- ScopedTransaction transaction(opCtx, collectionAccessMode);
AutoGetCollection collectionGuard(opCtx, nss, collectionAccessMode);
auto collection = collectionGuard.getCollection();
if (!collection) {
@@ -607,7 +600,6 @@ StatusWith<std::vector<BSONObj>> StorageInterfaceImpl::deleteDocuments(
}
Status StorageInterfaceImpl::isAdminDbValid(OperationContext* opCtx) {
- ScopedTransaction transaction(opCtx, MODE_IX);
AutoGetDb autoDB(opCtx, "admin", MODE_X);
return checkAdminDatabase(opCtx, autoDB.getDb());
}
diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp
index e94761e5d35..6ca1058cf5c 100644
--- a/src/mongo/db/repl/storage_interface_impl_test.cpp
+++ b/src/mongo/db/repl/storage_interface_impl_test.cpp
@@ -89,8 +89,7 @@ NamespaceString makeNamespace(const T& t, const char* suffix = "") {
*/
BSONObj getMinValidDocument(OperationContext* opCtx, const NamespaceString& minValidNss) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(opCtx, MODE_IS);
- Lock::DBLock dblk(opCtx->lockState(), minValidNss.db(), MODE_IS);
+ Lock::DBLock dblk(opCtx, minValidNss.db(), MODE_IS);
Lock::CollectionLock lk(opCtx->lockState(), minValidNss.ns(), MODE_IS);
BSONObj mv;
if (Helpers::getSingleton(opCtx, minValidNss.ns().c_str(), mv)) {
@@ -120,8 +119,7 @@ void createCollection(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionOptions& options = CollectionOptions()) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock dblk(opCtx->lockState(), nss.db(), MODE_X);
+ Lock::DBLock dblk(opCtx, nss.db(), MODE_X);
OldClientContext ctx(opCtx, nss.ns());
auto db = ctx.db();
ASSERT_TRUE(db);
@@ -449,7 +447,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, InsertMissingDocWorksOnExistingCap
opts.cappedSize = 1024 * 1024;
createCollection(opCtx, nss, opts);
ASSERT_OK(storage.insertDocument(opCtx, nss, BSON("_id" << 1)));
- AutoGetCollectionForRead autoColl(opCtx, nss);
+ AutoGetCollectionForReadCommand autoColl(opCtx, nss);
ASSERT_TRUE(autoColl.getCollection());
}
@@ -459,7 +457,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, InsertMissingDocWorksOnExistingCol
NamespaceString nss("foo.bar");
createCollection(opCtx, nss);
ASSERT_OK(storage.insertDocument(opCtx, nss, BSON("_id" << 1)));
- AutoGetCollectionForRead autoColl(opCtx, nss);
+ AutoGetCollectionForReadCommand autoColl(opCtx, nss);
ASSERT_TRUE(autoColl.getCollection());
}
@@ -487,7 +485,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, CreateCollectionWithIDIndexCommits
ASSERT_OK(loader->insertDocuments(docs.begin(), docs.end()));
ASSERT_OK(loader->commit());
- AutoGetCollectionForRead autoColl(opCtx, nss);
+ AutoGetCollectionForReadCommand autoColl(opCtx, nss);
auto coll = autoColl.getCollection();
ASSERT(coll);
ASSERT_EQ(coll->getRecordStore()->numRecords(opCtx), 2LL);
@@ -516,7 +514,7 @@ void _testDestroyUncommitedCollectionBulkLoader(
// Collection and ID index should not exist after 'loader' is destroyed.
destroyLoaderFn(std::move(loader));
- AutoGetCollectionForRead autoColl(opCtx, nss);
+ AutoGetCollectionForReadCommand autoColl(opCtx, nss);
auto coll = autoColl.getCollection();
// Bulk loader is used to create indexes. The collection is not dropped when the bulk loader is
@@ -590,12 +588,12 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, CreateOplogCreateCappedCollection)
StorageInterfaceImpl storage;
NamespaceString nss("local.oplog.X");
{
- AutoGetCollectionForRead autoColl(opCtx, nss);
+ AutoGetCollectionForReadCommand autoColl(opCtx, nss);
ASSERT_FALSE(autoColl.getCollection());
}
ASSERT_OK(storage.createOplog(opCtx, nss));
{
- AutoGetCollectionForRead autoColl(opCtx, nss);
+ AutoGetCollectionForReadCommand autoColl(opCtx, nss);
ASSERT_TRUE(autoColl.getCollection());
ASSERT_EQ(nss.toString(), autoColl.getCollection()->ns().toString());
ASSERT_TRUE(autoColl.getCollection()->isCapped());
@@ -608,7 +606,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
StorageInterfaceImpl storage;
NamespaceString nss("local.oplog.Y");
{
- AutoGetCollectionForRead autoColl(opCtx, nss);
+ AutoGetCollectionForReadCommand autoColl(opCtx, nss);
ASSERT_FALSE(autoColl.getCollection());
}
@@ -622,12 +620,12 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, CreateCollectionFailsIfCollectionE
StorageInterfaceImpl storage;
auto nss = makeNamespace(_agent);
{
- AutoGetCollectionForRead autoColl(opCtx, nss);
+ AutoGetCollectionForReadCommand autoColl(opCtx, nss);
ASSERT_FALSE(autoColl.getCollection());
}
ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions()));
{
- AutoGetCollectionForRead autoColl(opCtx, nss);
+ AutoGetCollectionForReadCommand autoColl(opCtx, nss);
ASSERT_TRUE(autoColl.getCollection());
ASSERT_EQ(nss.toString(), autoColl.getCollection()->ns().toString());
}
@@ -652,7 +650,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, DropCollectionWorksWithExistingEmp
NamespaceString nss("foo.bar");
createCollection(opCtx, nss);
ASSERT_OK(storage.dropCollection(opCtx, nss));
- AutoGetCollectionForRead autoColl(opCtx, nss);
+ AutoGetCollectionForReadCommand autoColl(opCtx, nss);
ASSERT_FALSE(autoColl.getCollection());
}
@@ -662,7 +660,7 @@ TEST_F(StorageInterfaceImplWithReplCoordTest, DropCollectionWorksWithMissingColl
NamespaceString nss("foo.bar");
ASSERT_FALSE(AutoGetDb(opCtx, nss.db(), MODE_IS).getDb());
ASSERT_OK(storage.dropCollection(opCtx, nss));
- ASSERT_FALSE(AutoGetCollectionForRead(opCtx, nss).getCollection());
+ ASSERT_FALSE(AutoGetCollectionForReadCommand(opCtx, nss).getCollection());
// Database should not be created after running dropCollection.
ASSERT_FALSE(AutoGetDb(opCtx, nss.db(), MODE_IS).getDb());
}
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index c42b51905b2..105f40bcd5a 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -312,7 +312,7 @@ Status SyncTail::syncApply(OperationContext* opCtx,
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
// a command may need a global write lock. so we will conservatively go
// ahead and grab one here. suboptimal. :-(
- Lock::GlobalWrite globalWriteLock(opCtx->lockState());
+ Lock::GlobalWrite globalWriteLock(opCtx);
// special case apply for commands to avoid implicit database creation
Status status = applyCommandInLock(opCtx, op, inSteadyStateReplication);
@@ -339,7 +339,7 @@ Status SyncTail::syncApply(OperationContext* opCtx,
if (isNoOp || (opType[0] == 'i' && nsToCollectionSubstring(ns) == "system.indexes")) {
auto opStr = isNoOp ? "syncApply_noop" : "syncApply_indexBuild";
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- Lock::DBLock dbLock(opCtx->lockState(), nsToDatabaseSubstring(ns), MODE_X);
+ Lock::DBLock dbLock(opCtx, nsToDatabaseSubstring(ns), MODE_X);
OldClientContext ctx(opCtx, ns);
return applyOp(ctx.db());
}
@@ -361,7 +361,7 @@ Status SyncTail::syncApply(OperationContext* opCtx,
// drop the DB lock before acquiring
// the upgraded one.
dbLock.reset();
- dbLock.reset(new Lock::DBLock(opCtx->lockState(), dbName, mode));
+ dbLock.reset(new Lock::DBLock(opCtx, dbName, mode));
collectionLock.reset(new Lock::CollectionLock(opCtx->lockState(), ns, mode));
};
@@ -418,7 +418,7 @@ void prefetchOp(const BSONObj& op) {
// for multiple prefetches if they are for the same database.
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
- AutoGetCollectionForRead ctx(&opCtx, NamespaceString(ns));
+ AutoGetCollectionForReadCommand ctx(&opCtx, NamespaceString(ns));
Database* db = ctx.getDb();
if (db) {
prefetchPagesForReplicatedOp(&opCtx, db, op);
@@ -478,8 +478,8 @@ void scheduleWritesToOplog(OperationContext* opCtx,
// guarantees that 'ops' will stay in scope until the spawned threads complete.
return [&ops, begin, end] {
initializeWriterThread();
- const auto txnHolder = cc().makeOperationContext();
- const auto opCtx = txnHolder.get();
+ const auto opCtxHolder = cc().makeOperationContext();
+ const auto opCtx = opCtxHolder.get();
opCtx->lockState()->setShouldConflictWithSecondaryBatchApplication(false);
UnreplicatedWritesBlock uwb(opCtx);
@@ -552,7 +552,7 @@ private:
CollectionProperties getCollectionPropertiesImpl(OperationContext* opCtx, StringData ns) {
CollectionProperties collProperties;
- Lock::DBLock dbLock(opCtx->lockState(), nsToDatabaseSubstring(ns), MODE_IS);
+ Lock::DBLock dbLock(opCtx, nsToDatabaseSubstring(ns), MODE_IS);
auto db = dbHolder().get(opCtx, ns);
if (!db) {
return collProperties;
@@ -640,8 +640,7 @@ void tryToGoLiveAsASecondary(OperationContext* opCtx, ReplicationCoordinator* re
// This needs to happen after the attempt so readers can be sure we've already tried.
ON_BLOCK_EXIT([] { attemptsToBecomeSecondary.increment(); });
- ScopedTransaction transaction(opCtx, MODE_S);
- Lock::GlobalRead readLock(opCtx->lockState());
+ Lock::GlobalRead readLock(opCtx);
if (replCoord->getMaintenanceMode()) {
LOG(1) << "Can't go live (tryToGoLiveAsASecondary) as maintenance mode is active.";
diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp
index b1e62a2efa5..7e2cda18fe9 100644
--- a/src/mongo/db/repl/sync_tail_test.cpp
+++ b/src/mongo/db/repl/sync_tail_test.cpp
@@ -177,8 +177,7 @@ void createCollection(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionOptions& options) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock dblk(opCtx->lockState(), nss.db(), MODE_X);
+ Lock::DBLock dblk(opCtx, nss.db(), MODE_X);
OldClientContext ctx(opCtx, nss.ns());
auto db = ctx.db();
ASSERT_TRUE(db);
@@ -363,7 +362,7 @@ TEST_F(SyncTailTest, SyncApplyInsertDocumentDatabaseMissing) {
TEST_F(SyncTailTest, SyncApplyInsertDocumentCollectionMissing) {
{
- Lock::GlobalWrite globalLock(_opCtx->lockState());
+ Lock::GlobalWrite globalLock(_opCtx.get());
bool justCreated = false;
Database* db = dbHolder().openDb(_opCtx.get(), "test", &justCreated);
ASSERT_TRUE(db);
@@ -374,7 +373,7 @@ TEST_F(SyncTailTest, SyncApplyInsertDocumentCollectionMissing) {
TEST_F(SyncTailTest, SyncApplyInsertDocumentCollectionExists) {
{
- Lock::GlobalWrite globalLock(_opCtx->lockState());
+ Lock::GlobalWrite globalLock(_opCtx.get());
bool justCreated = false;
Database* db = dbHolder().openDb(_opCtx.get(), "test", &justCreated);
ASSERT_TRUE(db);
@@ -614,7 +613,7 @@ TEST_F(SyncTailTest, MultiSyncApplyUsesSyncApplyToApplyOperation) {
multiSyncApply(&ops, nullptr);
// Collection should be created after SyncTail::syncApply() processes operation.
_opCtx = cc().makeOperationContext();
- ASSERT_TRUE(AutoGetCollectionForRead(_opCtx.get(), nss).getCollection());
+ ASSERT_TRUE(AutoGetCollectionForReadCommand(_opCtx.get(), nss).getCollection());
}
TEST_F(SyncTailTest, MultiSyncApplyDisablesDocumentValidationWhileApplyingOperations) {
@@ -866,7 +865,7 @@ TEST_F(SyncTailTest,
// Since the missing document is not found on the sync source, the collection referenced by
// the failed operation should not be automatically created.
- ASSERT_FALSE(AutoGetCollectionForRead(_opCtx.get(), nss).getCollection());
+ ASSERT_FALSE(AutoGetCollectionForReadCommand(_opCtx.get(), nss).getCollection());
ASSERT_EQUALS(fetchCount.load(), 1U);
}
@@ -1007,14 +1006,14 @@ OplogEntry IdempotencyTest::dropIndex(const std::string& indexName) {
}
std::string IdempotencyTest::validate() {
- auto collection = AutoGetCollectionForRead(_opCtx.get(), nss).getCollection();
+ auto collection = AutoGetCollectionForReadCommand(_opCtx.get(), nss).getCollection();
if (!collection) {
return "CollectionNotFound";
}
ValidateResults validateResults;
BSONObjBuilder bob;
- Lock::DBLock lk(_opCtx->lockState(), nss.db(), MODE_IS);
+ Lock::DBLock lk(_opCtx.get(), nss.db(), MODE_IS);
Lock::CollectionLock lock(_opCtx->lockState(), nss.ns(), MODE_IS);
ASSERT_OK(collection->validate(_opCtx.get(), kValidateFull, &validateResults, &bob));
ASSERT_TRUE(validateResults.valid);
diff --git a/src/mongo/db/restapi.cpp b/src/mongo/db/restapi.cpp
index 532f0c81213..1869858f223 100644
--- a/src/mongo/db/restapi.cpp
+++ b/src/mongo/db/restapi.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/auth/user_name.h"
#include "mongo/db/background.h"
#include "mongo/db/clientcursor.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/dbwebserver.h"
@@ -306,7 +307,7 @@ public:
virtual void run(OperationContext* opCtx, stringstream& ss) {
Timer t;
- Lock::GlobalLock globalSLock(opCtx->lockState(), MODE_S, 300);
+ Lock::GlobalLock globalSLock(opCtx, MODE_S, 300);
if (globalSLock.isLocked()) {
_gotLock(t.millis(), ss);
} else {
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index fda5fffdcfb..ff635fb59ff 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -472,7 +472,6 @@ void MigrationChunkClonerSourceLegacy::_cleanup(OperationContext* opCtx) {
}
if (_deleteNotifyExec) {
- ScopedTransaction scopedXact(opCtx, MODE_IS);
AutoGetCollection autoColl(opCtx, _args.getNss(), MODE_IS);
_deleteNotifyExec.reset();
@@ -510,7 +509,6 @@ StatusWith<BSONObj> MigrationChunkClonerSourceLegacy::_callRecipient(const BSONO
}
Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opCtx) {
- ScopedTransaction scopedXact(opCtx, MODE_IS);
AutoGetCollection autoColl(opCtx, _args.getNss(), MODE_IS);
Collection* const collection = autoColl.getCollection();
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
index a51ef083521..955d834941d 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
@@ -57,8 +57,7 @@ class AutoGetActiveCloner {
MONGO_DISALLOW_COPYING(AutoGetActiveCloner);
public:
- AutoGetActiveCloner(OperationContext* opCtx, const MigrationSessionId& migrationSessionId)
- : _scopedXact(opCtx, MODE_IS) {
+ AutoGetActiveCloner(OperationContext* opCtx, const MigrationSessionId& migrationSessionId) {
ShardingState* const gss = ShardingState::get(opCtx);
const auto nss = gss->getActiveDonateChunkNss();
@@ -105,9 +104,6 @@ public:
}
private:
- // Scoped transaction to reset the WT snapshot
- ScopedTransaction _scopedXact;
-
// Scoped database + collection lock
boost::optional<AutoGetCollection> _autoColl;
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 8239842289f..f013b1baa91 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -527,8 +527,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* opCtx,
{
// 1. copy indexes
- ScopedTransaction scopedXact(opCtx, MODE_IX);
- Lock::DBLock lk(opCtx->lockState(), _nss.db(), MODE_X);
+ Lock::DBLock lk(opCtx, _nss.db(), MODE_X);
OldClientContext ctx(opCtx, _nss.ns());
if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, _nss)) {
@@ -888,8 +887,7 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx,
bool didAnything = false;
if (xfer["deleted"].isABSONObj()) {
- ScopedTransaction scopedXact(opCtx, MODE_IX);
- Lock::DBLock dlk(opCtx->lockState(), nsToDatabaseSubstring(ns), MODE_IX);
+ Lock::DBLock dlk(opCtx, nsToDatabaseSubstring(ns), MODE_IX);
Helpers::RemoveSaver rs("moveChunk", ns, "removedDuring");
BSONObjIterator i(xfer["deleted"].Obj()); // deleted documents
@@ -985,8 +983,7 @@ bool MigrationDestinationManager::_flushPendingWrites(OperationContext* opCtx,
{
// Get global lock to wait for write to be commited to journal.
- ScopedTransaction scopedXact(opCtx, MODE_S);
- Lock::GlobalRead lk(opCtx->lockState());
+ Lock::GlobalRead lk(opCtx);
// if durability is on, force a write to journal
if (getDur().commitNow(opCtx)) {
@@ -1003,7 +1000,6 @@ Status MigrationDestinationManager::_notePending(OperationContext* opCtx,
const BSONObj& min,
const BSONObj& max,
const OID& epoch) {
- ScopedTransaction scopedXact(opCtx, MODE_IX);
AutoGetCollection autoColl(opCtx, nss, MODE_IX, MODE_X);
auto css = CollectionShardingState::get(opCtx, nss);
@@ -1046,7 +1042,6 @@ Status MigrationDestinationManager::_forgetPending(OperationContext* opCtx,
_chunkMarkedPending = false;
}
- ScopedTransaction scopedXact(opCtx, MODE_IX);
AutoGetCollection autoColl(opCtx, nss, MODE_IX, MODE_X);
auto css = CollectionShardingState::get(opCtx, nss);
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 5fb64445a75..fdb92230763 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -117,7 +117,6 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
// Snapshot the committed metadata from the time the migration starts
{
- ScopedTransaction scopedXact(opCtx, MODE_IS);
AutoGetCollection autoColl(opCtx, getNss(), MODE_IS);
_collectionMetadata = CollectionShardingState::get(opCtx, getNss())->getMetadata();
@@ -183,7 +182,6 @@ Status MigrationSourceManager::startClone(OperationContext* opCtx) {
{
// Register for notifications from the replication subsystem
- ScopedTransaction scopedXact(opCtx, MODE_IX);
AutoGetCollection autoColl(opCtx, getNss(), MODE_IX, MODE_X);
auto css = CollectionShardingState::get(opCtx, getNss().ns());
@@ -232,7 +230,6 @@ Status MigrationSourceManager::enterCriticalSection(OperationContext* opCtx) {
// The critical section must be entered with collection X lock in order to ensure there are
// no writes which could have entered and passed the version check just before we entered
// the crticial section, but managed to complete after we left it.
- ScopedTransaction scopedXact(opCtx, MODE_IX);
AutoGetCollection autoColl(opCtx, getNss(), MODE_IX, MODE_X);
// Check that the collection has not been dropped or recreated since the migration began.
@@ -379,7 +376,6 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
ShardingState::get(opCtx)->refreshMetadataNow(opCtx, getNss(), &unusedShardVersion);
if (refreshStatus.isOK()) {
- ScopedTransaction scopedXact(opCtx, MODE_IS);
AutoGetCollection autoColl(opCtx, getNss(), MODE_IS);
auto refreshedMetadata = CollectionShardingState::get(opCtx, getNss())->getMetadata();
@@ -402,7 +398,6 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
log() << "Migration succeeded and updated collection version to "
<< refreshedMetadata->getCollVersion();
} else {
- ScopedTransaction scopedXact(opCtx, MODE_IX);
AutoGetCollection autoColl(opCtx, getNss(), MODE_IX, MODE_X);
CollectionShardingState::get(opCtx, getNss())->refreshMetadata(opCtx, nullptr);
@@ -458,7 +453,6 @@ void MigrationSourceManager::_cleanup(OperationContext* opCtx) {
auto cloneDriver = [&]() {
// Unregister from the collection's sharding state
- ScopedTransaction scopedXact(opCtx, MODE_IX);
AutoGetCollection autoColl(opCtx, getNss(), MODE_IX, MODE_X);
auto css = CollectionShardingState::get(opCtx, getNss().ns());
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index 5bbb6d9c925..1320ee15465 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -552,7 +552,6 @@ StatusWith<ChunkVersion> ShardingState::_refreshMetadata(
}
// Exclusive collection lock needed since we're now changing the metadata
- ScopedTransaction transaction(opCtx, MODE_IX);
AutoGetCollection autoColl(opCtx, nss, MODE_IX, MODE_X);
auto css = CollectionShardingState::get(opCtx, nss);
diff --git a/src/mongo/db/service_context_d_test_fixture.cpp b/src/mongo/db/service_context_d_test_fixture.cpp
index 7c24ce66ff5..efa54fdd1a4 100644
--- a/src/mongo/db/service_context_d_test_fixture.cpp
+++ b/src/mongo/db/service_context_d_test_fixture.cpp
@@ -87,8 +87,7 @@ ServiceContext* ServiceContextMongoDTest::getServiceContext() {
void ServiceContextMongoDTest::_dropAllDBs(OperationContext* opCtx) {
dropAllDatabasesExceptLocal(opCtx);
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx);
AutoGetDb autoDBLocal(opCtx, "local", MODE_X);
const auto localDB = autoDBLocal.getDb();
if (localDB) {
diff --git a/src/mongo/db/stats/storage_stats.cpp b/src/mongo/db/stats/storage_stats.cpp
index 8a9a343f31a..fca73b4a473 100644
--- a/src/mongo/db/stats/storage_stats.cpp
+++ b/src/mongo/db/stats/storage_stats.cpp
@@ -54,7 +54,7 @@ Status appendCollectionStorageStats(OperationContext* opCtx,
bool verbose = param["verbose"].trueValue();
- AutoGetCollectionForRead ctx(opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(opCtx, nss);
if (!ctx.getDb()) {
return {ErrorCodes::BadValue,
str::stream() << "Database [" << nss.db().toString() << "] not found."};
diff --git a/src/mongo/db/storage/mmap_v1/dur_recover.cpp b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
index 7a2b05a379f..0fcd359dd0d 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recover.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
@@ -41,6 +41,7 @@
#include <sys/stat.h>
#include "mongo/db/client.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/storage/mmap_v1/compress.h"
#include "mongo/db/storage/mmap_v1/dur_commitjob.h"
@@ -647,8 +648,7 @@ void replayJournalFilesAtStartup() {
// we use a lock so that exitCleanly will wait for us
// to finish (or at least to notice what is up and stop)
auto opCtx = cc().makeOperationContext();
- ScopedTransaction transaction(opCtx.get(), MODE_X);
- Lock::GlobalWrite lk(opCtx->lockState());
+ Lock::GlobalWrite lk(opCtx.get());
_recover(opCtx.get()); // throws on interruption
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
index 29d7952bf22..7fc0c855dcc 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
@@ -37,6 +37,7 @@
#include "mongo/base/counter.h"
#include "mongo/db/audit.h"
#include "mongo/db/client.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/mmap_v1/data_file.h"
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
index ef85a84bb4f..954317fb017 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
@@ -81,8 +81,6 @@ public:
OperationContext& opCtx = *opCtxPtr;
try {
- ScopedTransaction transaction(&opCtx, MODE_IX);
-
AutoGetDb autoDb(&opCtx, _ns.db(), MODE_IX);
Database* db = autoDb.getDb();
if (!db) {
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index c5ce7c49101..f9c1d9459d6 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -130,7 +130,6 @@ private:
// Get all TTL indexes from every collection.
for (const std::string& collectionNS : ttlCollections) {
- ScopedTransaction st(&opCtx, MODE_IS);
NamespaceString collectionNSS(collectionNS);
AutoGetCollection autoGetCollection(&opCtx, collectionNSS, MODE_IS);
Collection* coll = autoGetCollection.getCollection();
diff --git a/src/mongo/db/views/durable_view_catalog.cpp b/src/mongo/db/views/durable_view_catalog.cpp
index 3034a82a453..757952ec598 100644
--- a/src/mongo/db/views/durable_view_catalog.cpp
+++ b/src/mongo/db/views/durable_view_catalog.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/database_holder.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index e20410a3f6c..72eef6f6097 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -51,8 +51,7 @@ const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
class Base {
public:
Base()
- : _scopedXact(&_opCtx, MODE_IX),
- _lk(_opCtx.lockState(), nsToDatabaseSubstring(ns()), MODE_X),
+ : _lk(&_opCtx, nsToDatabaseSubstring(ns()), MODE_X),
_context(&_opCtx, ns()),
_client(&_opCtx) {
_database = _context.db();
@@ -112,9 +111,8 @@ protected:
}
- const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _opCtx = *_txnPtr;
- ScopedTransaction _scopedXact;
+ const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
+ OperationContext& _opCtx = *_opCtxPtr;
Lock::DBLock _lk;
OldClientContext _context;
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index 0d2255ebea9..29d89c9cc08 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -70,8 +70,7 @@ public:
{
// Remove _id range [_min, _max).
- ScopedTransaction transaction(&opCtx, MODE_IX);
- Lock::DBLock lk(opCtx.lockState(), nsToDatabaseSubstring(ns), MODE_X);
+ Lock::DBLock lk(&opCtx, nsToDatabaseSubstring(ns), MODE_X);
OldClientContext ctx(&opCtx, ns);
KeyRange range(ns, BSON("_id" << _min), BSON("_id" << _max), BSON("_id" << 1));
diff --git a/src/mongo/dbtests/indexcatalogtests.cpp b/src/mongo/dbtests/indexcatalogtests.cpp
index fd1f10f8a55..a30fa5cc82f 100644
--- a/src/mongo/dbtests/indexcatalogtests.cpp
+++ b/src/mongo/dbtests/indexcatalogtests.cpp
@@ -40,8 +40,7 @@ public:
IndexIteratorTests() {
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
- ScopedTransaction transaction(&opCtx, MODE_IX);
- Lock::DBLock lk(opCtx.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
+ Lock::DBLock lk(&opCtx, nsToDatabaseSubstring(_ns), MODE_X);
OldClientContext ctx(&opCtx, _ns);
WriteUnitOfWork wuow(&opCtx);
@@ -54,8 +53,7 @@ public:
~IndexIteratorTests() {
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
- ScopedTransaction transaction(&opCtx, MODE_IX);
- Lock::DBLock lk(opCtx.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
+ Lock::DBLock lk(&opCtx, nsToDatabaseSubstring(_ns), MODE_X);
OldClientContext ctx(&opCtx, _ns);
WriteUnitOfWork wuow(&opCtx);
@@ -109,8 +107,7 @@ public:
RefreshEntry() {
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
- ScopedTransaction transaction(&opCtx, MODE_IX);
- Lock::DBLock lk(opCtx.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
+ Lock::DBLock lk(&opCtx, nsToDatabaseSubstring(_ns), MODE_X);
OldClientContext ctx(&opCtx, _ns);
WriteUnitOfWork wuow(&opCtx);
@@ -123,8 +120,7 @@ public:
~RefreshEntry() {
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
- ScopedTransaction transaction(&opCtx, MODE_IX);
- Lock::DBLock lk(opCtx.lockState(), nsToDatabaseSubstring(_ns), MODE_X);
+ Lock::DBLock lk(&opCtx, nsToDatabaseSubstring(_ns), MODE_X);
OldClientContext ctx(&opCtx, _ns);
WriteUnitOfWork wuow(&opCtx);
diff --git a/src/mongo/dbtests/matchertests.cpp b/src/mongo/dbtests/matchertests.cpp
index 9a762f017a0..9e60daa9eaf 100644
--- a/src/mongo/dbtests/matchertests.cpp
+++ b/src/mongo/dbtests/matchertests.cpp
@@ -234,7 +234,7 @@ public:
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
const NamespaceString nss("unittests.matchertests");
- AutoGetCollectionForRead ctx(&opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&opCtx, nss);
const CollatorInterface* collator = nullptr;
M m(BSON("$where"
diff --git a/src/mongo/dbtests/mmaptests.cpp b/src/mongo/dbtests/mmaptests.cpp
index b0afc3e916f..351019df663 100644
--- a/src/mongo/dbtests/mmaptests.cpp
+++ b/src/mongo/dbtests/mmaptests.cpp
@@ -75,9 +75,8 @@ public:
} catch (...) {
}
- MMAPV1LockerImpl lockState;
- Lock::GlobalWrite lk(&lockState);
auto opCtx = cc().makeOperationContext();
+ Lock::GlobalWrite lk(opCtx.get());
{
DurableMappedFile f(opCtx.get());
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index 81c6b3125b0..5a861b35ae8 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -547,8 +547,7 @@ public:
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
- ScopedTransaction transaction(&opCtx, MODE_IX);
- Lock::DBLock lk(opCtx.lockState(), dbName, MODE_X);
+ Lock::DBLock lk(&opCtx, dbName, MODE_X);
bool justCreated;
Database* db = dbHolder().openDb(&opCtx, dbName, &justCreated);
@@ -592,8 +591,7 @@ public:
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
- ScopedTransaction transaction(&opCtx, MODE_IX);
- Lock::DBLock lk(opCtx.lockState(), dbName, MODE_X);
+ Lock::DBLock lk(&opCtx, dbName, MODE_X);
bool justCreated;
Database* db = dbHolder().openDb(&opCtx, dbName, &justCreated);
diff --git a/src/mongo/dbtests/oplogstarttests.cpp b/src/mongo/dbtests/oplogstarttests.cpp
index 4070d0b5440..de5c365f421 100644
--- a/src/mongo/dbtests/oplogstarttests.cpp
+++ b/src/mongo/dbtests/oplogstarttests.cpp
@@ -44,11 +44,7 @@ static const NamespaceString nss("unittests.oplogstarttests");
class Base {
public:
- Base()
- : _scopedXact(&_opCtx, MODE_X),
- _lk(_opCtx.lockState()),
- _context(&_opCtx, nss.ns()),
- _client(&_opCtx) {
+ Base() : _lk(&_opCtx), _context(&_opCtx, nss.ns()), _client(&_opCtx) {
Collection* c = _context.db()->getCollection(nss.ns());
if (!c) {
WriteUnitOfWork wuow(&_opCtx);
@@ -99,9 +95,8 @@ protected:
private:
// The order of these is important in order to ensure order of destruction
- const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _opCtx = *_txnPtr;
- ScopedTransaction _scopedXact;
+ const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
+ OperationContext& _opCtx = *_opCtxPtr;
Lock::GlobalWrite _lk;
OldClientContext _context;
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
index 59eb024b5c5..65ed6a714cf 100644
--- a/src/mongo/dbtests/pdfiletests.cpp
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -44,7 +44,7 @@ namespace PdfileTests {
namespace Insert {
class Base {
public:
- Base() : _scopedXact(&_opCtx, MODE_X), _lk(_opCtx.lockState()), _context(&_opCtx, ns()) {}
+ Base() : _lk(&_opCtx), _context(&_opCtx, ns()) {}
virtual ~Base() {
if (!collection())
@@ -62,9 +62,8 @@ protected:
return _context.db()->getCollection(ns());
}
- const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _opCtx = *_txnPtr;
- ScopedTransaction _scopedXact;
+ const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
+ OperationContext& _opCtx = *_opCtxPtr;
Lock::GlobalWrite _lk;
OldClientContext _context;
};
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index 44e2ddfba31..bbcd0b6a2d0 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -107,7 +107,7 @@ public:
* Does NOT take ownership of 'cq'. Caller DOES NOT own the returned QuerySolution*.
*/
QuerySolution* pickBestPlan(CanonicalQuery* cq) {
- AutoGetCollectionForRead ctx(&_opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
Collection* collection = ctx.getCollection();
QueryPlannerParams plannerParams;
diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp
index 7265c0e8dbc..5fbc726b51a 100644
--- a/src/mongo/dbtests/query_plan_executor.cpp
+++ b/src/mongo/dbtests/query_plan_executor.cpp
@@ -175,7 +175,7 @@ public:
}
size_t numCursors() {
- AutoGetCollectionForRead ctx(&_opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
Collection* collection = ctx.getCollection();
if (!collection)
return 0;
@@ -184,7 +184,7 @@ public:
void registerExec(PlanExecutor* exec) {
// TODO: This is not correct (create collection under S-lock)
- AutoGetCollectionForRead ctx(&_opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
WriteUnitOfWork wunit(&_opCtx);
Collection* collection = ctx.getDb()->getOrCreateCollection(&_opCtx, nss.ns());
collection->getCursorManager()->registerExecutor(exec);
@@ -193,7 +193,7 @@ public:
void deregisterExec(PlanExecutor* exec) {
// TODO: This is not correct (create collection under S-lock)
- AutoGetCollectionForRead ctx(&_opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
WriteUnitOfWork wunit(&_opCtx);
Collection* collection = ctx.getDb()->getOrCreateCollection(&_opCtx, nss.ns());
collection->getCursorManager()->deregisterExecutor(exec);
@@ -201,8 +201,8 @@ public:
}
protected:
- const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _opCtx = *_txnPtr;
+ const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
+ OperationContext& _opCtx = *_opCtxPtr;
private:
IndexDescriptor* getIndex(Database* db, const BSONObj& obj) {
@@ -515,7 +515,7 @@ public:
}
{
- AutoGetCollectionForRead ctx(&_opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
Collection* collection = ctx.getCollection();
BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index ed69f451117..48e9da129a1 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -78,8 +78,7 @@ public:
}
void dropCollection() {
- ScopedTransaction transaction(&_opCtx, MODE_X);
- Lock::DBLock dbLock(_opCtx.lockState(), nss.db(), MODE_X);
+ Lock::DBLock dbLock(&_opCtx, nss.db(), MODE_X);
Database* database = dbHolder().get(&_opCtx, nss.db());
if (!database) {
return;
@@ -104,8 +103,8 @@ public:
}
protected:
- const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _opCtx = *_txnPtr;
+ const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
+ OperationContext& _opCtx = *_opCtxPtr;
WorkingSet _ws;
};
@@ -116,7 +115,7 @@ protected:
class QueryStageCachedPlanFailure : public QueryStageCachedPlanBase {
public:
void run() {
- AutoGetCollectionForRead ctx(&_opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
Collection* collection = ctx.getCollection();
ASSERT(collection);
@@ -184,7 +183,7 @@ public:
class QueryStageCachedPlanHitMaxWorks : public QueryStageCachedPlanBase {
public:
void run() {
- AutoGetCollectionForRead ctx(&_opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
Collection* collection = ctx.getCollection();
ASSERT(collection);
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index e74ba02f361..e2db62bc4f4 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -84,7 +84,7 @@ public:
}
int countResults(CollectionScanParams::Direction direction, const BSONObj& filterObj) {
- AutoGetCollectionForRead ctx(&_opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
// Configure the scan.
CollectionScanParams params;
@@ -206,7 +206,7 @@ public:
class QueryStageCollscanObjectsInOrderForward : public QueryStageCollectionScanBase {
public:
void run() {
- AutoGetCollectionForRead ctx(&_opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
// Configure the scan.
CollectionScanParams params;
@@ -242,7 +242,7 @@ public:
class QueryStageCollscanObjectsInOrderBackward : public QueryStageCollectionScanBase {
public:
void run() {
- AutoGetCollectionForRead ctx(&_opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
CollectionScanParams params;
params.collection = ctx.getCollection();
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 6a851dc9258..3f6e8fc3dc0 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -31,6 +31,7 @@
#include "mongo/platform/basic.h"
#include "mongo/db/client.h"
+#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/collection_scan.h"
#include "mongo/db/exec/collection_scan_common.h"
@@ -54,10 +55,7 @@ const int kInterjections = kDocuments;
class CountStageTest {
public:
CountStageTest()
- : _scopedXact(&_opCtx, MODE_IX),
- _dbLock(_opCtx.lockState(), nsToDatabaseSubstring(ns()), MODE_X),
- _ctx(&_opCtx, ns()),
- _coll(NULL) {}
+ : _dbLock(&_opCtx, nsToDatabaseSubstring(ns()), MODE_X), _ctx(&_opCtx, ns()), _coll(NULL) {}
virtual ~CountStageTest() {}
@@ -233,9 +231,8 @@ public:
protected:
vector<RecordId> _recordIds;
- const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _opCtx = *_txnPtr;
- ScopedTransaction _scopedXact;
+ const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
+ OperationContext& _opCtx = *_opCtxPtr;
Lock::DBLock _dbLock;
OldClientContext _ctx;
Collection* _coll;
diff --git a/src/mongo/dbtests/query_stage_distinct.cpp b/src/mongo/dbtests/query_stage_distinct.cpp
index f5b2ac7bf48..e350e7cfd15 100644
--- a/src/mongo/dbtests/query_stage_distinct.cpp
+++ b/src/mongo/dbtests/query_stage_distinct.cpp
@@ -121,7 +121,7 @@ public:
// Make an index on a:1
addIndex(BSON("a" << 1));
- AutoGetCollectionForRead ctx(&_opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
Collection* coll = ctx.getCollection();
// Set up the distinct stage.
@@ -188,7 +188,7 @@ public:
// Make an index on a:1
addIndex(BSON("a" << 1));
- AutoGetCollectionForRead ctx(&_opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
Collection* coll = ctx.getCollection();
// Set up the distinct stage.
@@ -257,7 +257,7 @@ public:
addIndex(BSON("a" << 1 << "b" << 1));
- AutoGetCollectionForRead ctx(&_opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
Collection* coll = ctx.getCollection();
std::vector<IndexDescriptor*> indices;
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index 709e2d1b760..f44c03d7e26 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -82,8 +82,8 @@ public:
}
protected:
- const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _opCtx = *_txnPtr;
+ const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
+ OperationContext& _opCtx = *_opCtxPtr;
DBDirectClient _client;
};
@@ -158,8 +158,7 @@ public:
class FetchStageFilter : public QueryStageFetchBase {
public:
void run() {
- ScopedTransaction transaction(&_opCtx, MODE_IX);
- Lock::DBLock lk(_opCtx.lockState(), nsToDatabaseSubstring(ns()), MODE_X);
+ Lock::DBLock lk(&_opCtx, nsToDatabaseSubstring(ns()), MODE_X);
OldClientContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp
index 7ee19d8f352..b2727d223d8 100644
--- a/src/mongo/dbtests/query_stage_ixscan.cpp
+++ b/src/mongo/dbtests/query_stage_ixscan.cpp
@@ -46,10 +46,7 @@ const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
class IndexScanTest {
public:
IndexScanTest()
- : _scopedXact(&_opCtx, MODE_IX),
- _dbLock(_opCtx.lockState(), nsToDatabaseSubstring(ns()), MODE_X),
- _ctx(&_opCtx, ns()),
- _coll(NULL) {}
+ : _dbLock(&_opCtx, nsToDatabaseSubstring(ns()), MODE_X), _ctx(&_opCtx, ns()), _coll(NULL) {}
virtual ~IndexScanTest() {}
@@ -147,10 +144,9 @@ public:
}
protected:
- const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _opCtx = *_txnPtr;
+ const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
+ OperationContext& _opCtx = *_opCtxPtr;
- ScopedTransaction _scopedXact;
Lock::DBLock _dbLock;
OldClientContext _ctx;
Collection* _coll;
diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp
index 90185fd529d..59d06e93d05 100644
--- a/src/mongo/dbtests/query_stage_multiplan.cpp
+++ b/src/mongo/dbtests/query_stage_multiplan.cpp
@@ -133,7 +133,7 @@ public:
addIndex(BSON("foo" << 1));
- AutoGetCollectionForRead ctx(&_opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
const Collection* coll = ctx.getCollection();
// Plan 0: IXScan over foo == 7
@@ -227,7 +227,7 @@ public:
addIndex(BSON("a" << 1));
addIndex(BSON("b" << 1));
- AutoGetCollectionForRead ctx(&_opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
Collection* collection = ctx.getCollection();
// Query for both 'a' and 'b' and sort on 'b'.
@@ -335,7 +335,7 @@ public:
secondPlan->pushBack(PlanStage::NEED_TIME);
}
- AutoGetCollectionForRead ctx(&_opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
auto qr = stdx::make_unique<QueryRequest>(nss);
qr->setFilter(BSON("x" << 1));
@@ -413,7 +413,7 @@ public:
addIndex(BSON("foo" << 1));
addIndex(BSON("foo" << -1 << "bar" << 1));
- AutoGetCollectionForRead ctx(&_opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
Collection* coll = ctx.getCollection();
// Create the executor (Matching all documents).
diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp
index 55326de7c63..852058ac005 100644
--- a/src/mongo/dbtests/query_stage_tests.cpp
+++ b/src/mongo/dbtests/query_stage_tests.cpp
@@ -79,7 +79,7 @@ public:
}
int countResults(const IndexScanParams& params, BSONObj filterObj = BSONObj()) {
- AutoGetCollectionForRead ctx(&_opCtx, NamespaceString(ns()));
+ AutoGetCollectionForReadCommand ctx(&_opCtx, NamespaceString(ns()));
const CollatorInterface* collator = nullptr;
StatusWithMatchExpression statusWithMatcher = MatchExpressionParser::parse(
@@ -117,7 +117,7 @@ public:
}
IndexDescriptor* getIndex(const BSONObj& obj) {
- AutoGetCollectionForRead ctx(&_opCtx, NamespaceString(ns()));
+ AutoGetCollectionForReadCommand ctx(&_opCtx, NamespaceString(ns()));
Collection* collection = ctx.getCollection();
std::vector<IndexDescriptor*> indexes;
collection->getIndexCatalog()->findIndexesByKeyPattern(&_opCtx, obj, false, &indexes);
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index da49550c73a..3153037c9ed 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -227,7 +227,7 @@ public:
// Verify the contents of the resulting collection.
{
- AutoGetCollectionForRead ctx(&_opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
Collection* collection = ctx.getCollection();
vector<BSONObj> objs;
@@ -336,7 +336,7 @@ public:
// Check the contents of the collection.
{
- AutoGetCollectionForRead ctx(&_opCtx, nss);
+ AutoGetCollectionForReadCommand ctx(&_opCtx, nss);
Collection* collection = ctx.getCollection();
vector<BSONObj> objs;
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 83765c6ecd4..8171cb96cbe 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -65,7 +65,7 @@ const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
class Base {
public:
- Base() : _scopedXact(&_opCtx, MODE_X), _lk(_opCtx.lockState()), _context(&_opCtx, ns()) {
+ Base() : _lk(&_opCtx), _context(&_opCtx, ns()) {
{
WriteUnitOfWork wunit(&_opCtx);
_database = _context.db();
@@ -121,9 +121,8 @@ protected:
}
- const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _opCtx = *_txnPtr;
- ScopedTransaction _scopedXact;
+ const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
+ OperationContext& _opCtx = *_opCtxPtr;
Lock::GlobalWrite _lk;
OldClientContext _context;
@@ -188,8 +187,7 @@ public:
void run() {
// We don't normally allow empty objects in the database, but test that we can find
// an empty object (one might be allowed inside a reserved namespace at some point).
- ScopedTransaction transaction(&_opCtx, MODE_X);
- Lock::GlobalWrite lk(_opCtx.lockState());
+ Lock::GlobalWrite lk(&_opCtx);
OldClientContext ctx(&_opCtx, "unittests.querytests");
{
@@ -336,7 +334,7 @@ public:
// Check that the cursor has been removed.
{
- AutoGetCollectionForRead ctx(&_opCtx, NamespaceString(ns));
+ AutoGetCollectionForReadCommand ctx(&_opCtx, NamespaceString(ns));
ASSERT(0 == ctx.getCollection()->getCursorManager()->numCursors());
}
@@ -384,7 +382,7 @@ public:
// Check that the cursor still exists
{
- AutoGetCollectionForRead ctx(&_opCtx, NamespaceString(ns));
+ AutoGetCollectionForReadCommand ctx(&_opCtx, NamespaceString(ns));
ASSERT(1 == ctx.getCollection()->getCursorManager()->numCursors());
ASSERT_OK(ctx.getCollection()->getCursorManager()->pinCursor(cursorId).getStatus());
}
@@ -661,8 +659,7 @@ public:
_client.dropCollection(ns);
_client.createCollection(ns, 10, true);
- ScopedTransaction transaction(&_opCtx, MODE_IX);
- Lock::DBLock lk(_opCtx.lockState(), "unittests", MODE_X);
+ Lock::DBLock lk(&_opCtx, "unittests", MODE_X);
OldClientContext ctx(&_opCtx, ns);
BSONObj info;
@@ -1137,8 +1134,7 @@ private:
class DirectLocking : public ClientBase {
public:
void run() {
- ScopedTransaction transaction(&_opCtx, MODE_X);
- Lock::GlobalWrite lk(_opCtx.lockState());
+ Lock::GlobalWrite lk(&_opCtx);
OldClientContext ctx(&_opCtx, "unittests.DirectLocking");
_client.remove("a.b", BSONObj());
ASSERT_EQUALS("unittests", ctx.db()->name());
@@ -1252,7 +1248,7 @@ public:
}
size_t numCursorsOpen() {
- AutoGetCollectionForRead ctx(&_opCtx, NamespaceString(_ns));
+ AutoGetCollectionForReadCommand ctx(&_opCtx, NamespaceString(_ns));
Collection* collection = ctx.getCollection();
if (!collection)
return 0;
@@ -1600,13 +1596,9 @@ public:
class CollectionInternalBase : public CollectionBase {
public:
CollectionInternalBase(const char* nsLeaf)
- : CollectionBase(nsLeaf),
- _scopedXact(&_opCtx, MODE_IX),
- _lk(_opCtx.lockState(), "unittests", MODE_X),
- _ctx(&_opCtx, ns()) {}
+ : CollectionBase(nsLeaf), _lk(&_opCtx, "unittests", MODE_X), _ctx(&_opCtx, ns()) {}
private:
- ScopedTransaction _scopedXact;
Lock::DBLock _lk;
OldClientContext _ctx;
};
@@ -1656,7 +1648,7 @@ public:
ClientCursor* clientCursor = 0;
{
- AutoGetCollectionForRead ctx(&_opCtx, NamespaceString(ns()));
+ AutoGetCollectionForReadCommand ctx(&_opCtx, NamespaceString(ns()));
auto clientCursorPin =
unittest::assertGet(ctx.getCollection()->getCursorManager()->pinCursor(cursorId));
clientCursor = clientCursorPin.getCursor();
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index cb2032f8ccc..637429b4921 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -68,8 +68,8 @@ BSONObj f(const char* s) {
class Base {
protected:
- const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
- OperationContext& _opCtx = *_txnPtr;
+ const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
+ OperationContext& _opCtx = *_opCtxPtr;
mutable DBDirectClient _client;
public:
@@ -145,8 +145,7 @@ protected:
return _client.findOne(cllNS(), BSONObj());
}
int count() const {
- ScopedTransaction transaction(&_opCtx, MODE_X);
- Lock::GlobalWrite lk(_opCtx.lockState());
+ Lock::GlobalWrite lk(&_opCtx);
OldClientContext ctx(&_opCtx, ns());
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
@@ -167,8 +166,7 @@ protected:
return DBDirectClient(&_opCtx).query(cllNS(), BSONObj())->itcount();
}
void applyAllOperations() {
- ScopedTransaction transaction(&_opCtx, MODE_X);
- Lock::GlobalWrite lk(_opCtx.lockState());
+ Lock::GlobalWrite lk(&_opCtx);
vector<BSONObj> ops;
{
DBDirectClient db(&_opCtx);
@@ -193,8 +191,7 @@ protected:
}
}
void printAll(const char* ns) {
- ScopedTransaction transaction(&_opCtx, MODE_X);
- Lock::GlobalWrite lk(_opCtx.lockState());
+ Lock::GlobalWrite lk(&_opCtx);
OldClientContext ctx(&_opCtx, ns);
Database* db = ctx.db();
@@ -213,8 +210,7 @@ protected:
}
// These deletes don't get logged.
void deleteAll(const char* ns) const {
- ScopedTransaction transaction(&_opCtx, MODE_X);
- Lock::GlobalWrite lk(_opCtx.lockState());
+ Lock::GlobalWrite lk(&_opCtx);
OldClientContext ctx(&_opCtx, ns);
WriteUnitOfWork wunit(&_opCtx);
Database* db = ctx.db();
@@ -227,8 +223,7 @@ protected:
wunit.commit();
}
void insert(const BSONObj& o) const {
- ScopedTransaction transaction(&_opCtx, MODE_X);
- Lock::GlobalWrite lk(_opCtx.lockState());
+ Lock::GlobalWrite lk(&_opCtx);
OldClientContext ctx(&_opCtx, ns());
WriteUnitOfWork wunit(&_opCtx);
Database* db = ctx.db();
@@ -1404,8 +1399,7 @@ public:
<< "foo"
<< "bar"));
- ScopedTransaction transaction(&_opCtx, MODE_X);
- Lock::GlobalWrite lk(_opCtx.lockState());
+ Lock::GlobalWrite lk(&_opCtx);
// this should fail because we can't connect
try {
diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp
index d9e70651178..50a7440fdb8 100644
--- a/src/mongo/dbtests/rollbacktests.cpp
+++ b/src/mongo/dbtests/rollbacktests.cpp
@@ -51,8 +51,7 @@ namespace {
const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
void dropDatabase(OperationContext* opCtx, const NamespaceString& nss) {
- ScopedTransaction transaction(opCtx, MODE_X);
- Lock::GlobalWrite globalWriteLock(opCtx->lockState());
+ Lock::GlobalWrite globalWriteLock(opCtx);
Database* db = dbHolder().get(opCtx, nss.db());
if (db) {
@@ -66,8 +65,7 @@ bool collectionExists(OldClientContext* ctx, const string& ns) {
return std::find(names.begin(), names.end(), ns) != names.end();
}
void createCollection(OperationContext* opCtx, const NamespaceString& nss) {
- ScopedTransaction transaction(opCtx, MODE_IX);
- Lock::DBLock dbXLock(opCtx->lockState(), nss.db(), MODE_X);
+ Lock::DBLock dbXLock(opCtx, nss.db(), MODE_X);
OldClientContext ctx(opCtx, nss.ns());
{
WriteUnitOfWork uow(opCtx);
@@ -155,8 +153,7 @@ public:
NamespaceString nss(ns);
dropDatabase(&opCtx, nss);
- ScopedTransaction transaction(&opCtx, MODE_IX);
- Lock::DBLock dbXLock(opCtx.lockState(), nss.db(), MODE_X);
+ Lock::DBLock dbXLock(&opCtx, nss.db(), MODE_X);
OldClientContext ctx(&opCtx, ns);
{
WriteUnitOfWork uow(&opCtx);
@@ -187,8 +184,7 @@ public:
NamespaceString nss(ns);
dropDatabase(&opCtx, nss);
- ScopedTransaction transaction(&opCtx, MODE_IX);
- Lock::DBLock dbXLock(opCtx.lockState(), nss.db(), MODE_X);
+ Lock::DBLock dbXLock(&opCtx, nss.db(), MODE_X);
OldClientContext ctx(&opCtx, ns);
{
WriteUnitOfWork uow(&opCtx);
@@ -231,8 +227,7 @@ public:
dropDatabase(&opCtx, source);
dropDatabase(&opCtx, target);
- ScopedTransaction transaction(&opCtx, MODE_X);
- Lock::GlobalWrite globalWriteLock(opCtx.lockState());
+ Lock::GlobalWrite globalWriteLock(&opCtx);
OldClientContext ctx(&opCtx, source.ns());
{
@@ -283,8 +278,7 @@ public:
dropDatabase(&opCtx, source);
dropDatabase(&opCtx, target);
- ScopedTransaction transaction(&opCtx, MODE_X);
- Lock::GlobalWrite globalWriteLock(opCtx.lockState());
+ Lock::GlobalWrite globalWriteLock(&opCtx);
OldClientContext ctx(&opCtx, source.ns());
BSONObj sourceDoc = BSON("_id"
@@ -354,8 +348,7 @@ public:
OperationContext& opCtx = *opCtxPtr;
dropDatabase(&opCtx, nss);
- ScopedTransaction transaction(&opCtx, MODE_IX);
- Lock::DBLock dbXLock(opCtx.lockState(), nss.db(), MODE_X);
+ Lock::DBLock dbXLock(&opCtx, nss.db(), MODE_X);
OldClientContext ctx(&opCtx, nss.ns());
BSONObj oldDoc = BSON("_id"
@@ -415,8 +408,7 @@ public:
OperationContext& opCtx = *opCtxPtr;
dropDatabase(&opCtx, nss);
- ScopedTransaction transaction(&opCtx, MODE_IX);
- Lock::DBLock dbXLock(opCtx.lockState(), nss.db(), MODE_X);
+ Lock::DBLock dbXLock(&opCtx, nss.db(), MODE_X);
OldClientContext ctx(&opCtx, nss.ns());
BSONObj doc = BSON("_id"
@@ -456,8 +448,7 @@ public:
OperationContext& opCtx = *opCtxPtr;
dropDatabase(&opCtx, nss);
- ScopedTransaction transaction(&opCtx, MODE_IX);
- Lock::DBLock dbXLock(opCtx.lockState(), nss.db(), MODE_X);
+ Lock::DBLock dbXLock(&opCtx, nss.db(), MODE_X);
OldClientContext ctx(&opCtx, nss.ns());
BSONObj doc = BSON("_id"
@@ -513,7 +504,6 @@ public:
dropDatabase(&opCtx, nss);
createCollection(&opCtx, nss);
- ScopedTransaction transaction(&opCtx, MODE_IX);
AutoGetDb autoDb(&opCtx, nss.db(), MODE_X);
Collection* coll = autoDb.getDb()->getCollection(ns);
@@ -555,7 +545,6 @@ public:
dropDatabase(&opCtx, nss);
createCollection(&opCtx, nss);
- ScopedTransaction transaction(&opCtx, MODE_IX);
AutoGetDb autoDb(&opCtx, nss.db(), MODE_X);
Collection* coll = autoDb.getDb()->getCollection(ns);
@@ -609,7 +598,6 @@ public:
dropDatabase(&opCtx, nss);
createCollection(&opCtx, nss);
- ScopedTransaction transaction(&opCtx, MODE_IX);
AutoGetDb autoDb(&opCtx, nss.db(), MODE_X);
Collection* coll = autoDb.getDb()->getCollection(ns);
@@ -654,7 +642,6 @@ public:
dropDatabase(&opCtx, nss);
createCollection(&opCtx, nss);
- ScopedTransaction transaction(&opCtx, MODE_IX);
AutoGetDb autoDb(&opCtx, nss.db(), MODE_X);
Collection* coll = autoDb.getDb()->getCollection(ns);
@@ -717,8 +704,7 @@ public:
NamespaceString nss(ns);
dropDatabase(&opCtx, nss);
- ScopedTransaction transaction(&opCtx, MODE_IX);
- Lock::DBLock dbXLock(opCtx.lockState(), nss.db(), MODE_X);
+ Lock::DBLock dbXLock(&opCtx, nss.db(), MODE_X);
OldClientContext ctx(&opCtx, nss.ns());
string idxNameA = "indexA";
diff --git a/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp b/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
index cb35f46c2df..6d5ee674f5c 100644
--- a/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
@@ -221,7 +221,6 @@ TEST_F(ConfigInitializationTest, ReRunsIfDocRolledBackThenReElected) {
repl::UnreplicatedWritesBlock uwb(opCtx);
auto nss = NamespaceString(VersionType::ConfigNS);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(opCtx, MODE_IX);
AutoGetCollection autoColl(opCtx, nss, MODE_IX);
auto coll = autoColl.getCollection();
ASSERT_TRUE(coll);