summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCharlie Swanson <cswanson310@gmail.com>2016-12-07 08:33:46 -0500
committerCharlie Swanson <charlie.swanson@mongodb.com>2016-12-13 10:08:17 -0500
commit68bf5e7dddcc17b4fce0e5c602ad0f1bb423324d (patch)
treed28ed13221d36df2a9e01a93d66a65b18f0aa933
parent34a39c8f9d7ea9b06692978c6c7b1018b082a8cb (diff)
downloadmongo-68bf5e7dddcc17b4fce0e5c602ad0f1bb423324d.tar.gz
SERVER-27042 Stabilize currentop.js and add unit tests for lock state reporting
(cherry picked from commit 0bca5d9fc70fe4178441eaf097324e6da814410a)
-rw-r--r--buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml4
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml3
-rw-r--r--jstests/core/currentop.js115
-rw-r--r--jstests/core/fsync.js2
-rw-r--r--jstests/libs/parallelTester.js6
-rw-r--r--src/mongo/db/concurrency/lock_state_test.cpp110
-rw-r--r--src/mongo/db/stats/SConscript23
-rw-r--r--src/mongo/db/stats/fill_locker_info_test.cpp136
8 files changed, 299 insertions, 100 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
index a9008670712..1d78d60e858 100644
--- a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
@@ -12,7 +12,7 @@ selector:
- jstests/core/check_shard_index.js # checkShardingIndex.
- jstests/core/collection_truncate.js # emptycapped.
- jstests/core/compact_keeps_indexes.js # compact.
- - jstests/core/capped_update.js # uses godinsert and can't run under replication.
+ - jstests/core/currentop.js # uses fsync.
- jstests/core/auth_copydb.js # copyDatabase.
- jstests/core/copydb.js # copyDatabase.
- jstests/core/dbadmin.js # "local" database.
@@ -21,7 +21,7 @@ selector:
- jstests/core/diagdata.js # Command not supported in mongos
- jstests/core/dropdb_race.js # syncdelay.
- jstests/core/evalb.js # profiling.
- - jstests/core/fsync.js # fsync.
+ - jstests/core/fsync.js # uses fsync.
- jstests/core/geo_haystack*.js # geoSearch.
- jstests/core/geo_s2cursorlimitskip.js # profiling.
- jstests/core/geo_update_btree2.js # notablescan.
diff --git a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml
index 2bc519e5bd3..f2121d49d72 100644
--- a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml
@@ -14,13 +14,14 @@ selector:
- jstests/core/check_shard_index.js # checkShardingIndex.
- jstests/core/collection_truncate.js # emptycapped.
- jstests/core/compact_keeps_indexes.js # compact.
+ - jstests/core/currentop.js # uses fsync.
- jstests/core/dbadmin.js # "local" database.
- jstests/core/dbhash.js # dbhash.
- jstests/core/dbhash2.js # dbhash.
- jstests/core/diagdata.js # Command not supported in mongos
- jstests/core/dropdb_race.js # syncdelay.
- jstests/core/evalb.js # profiling.
- - jstests/core/fsync.js # fsync.
+ - jstests/core/fsync.js # uses fsync.
- jstests/core/geo_haystack*.js # geoSearch.
- jstests/core/geo_s2cursorlimitskip.js # profiling.
- jstests/core/geo_update_btree2.js # notablescan.
diff --git a/jstests/core/currentop.js b/jstests/core/currentop.js
index ef948d415be..d1e360c12c0 100644
--- a/jstests/core/currentop.js
+++ b/jstests/core/currentop.js
@@ -1,82 +1,33 @@
-print("BEGIN currentop.js");
-
-// test basic currentop functionality + querying of nested documents
-t = db.jstests_currentop;
-t.drop();
-
-for (i = 0; i < 100; i++) {
- t.save({"num": i});
-}
-
-print("count:" + t.count());
-
-function ops(q) {
- printjson(db.currentOp().inprog);
- return db.currentOp(q).inprog;
-}
-
-print("start shell");
-
-// sleep for a second for each (of 100) documents; can be killed in between documents & test should
-// complete before 100 seconds
-s1 = startParallelShell("db.jstests_currentop.count( { '$where': function() { sleep(1000); } } )");
-
-print("sleep");
-sleep(1000);
-
-print("inprog:");
-printjson(db.currentOp().inprog);
-print();
-sleep(1);
-print("inprog:");
-printjson(db.currentOp().inprog);
-print();
-
-// need to wait for read to start
-print("wait have some ops");
-assert.soon(function() {
- return ops({"locks.Collection": "r", "ns": "test.jstests_currentop"}).length +
- ops({"locks.Collection": "R", "ns": "test.jstests_currentop"}).length >=
- 1;
-}, "have_some_ops");
-print("ok");
-
-s2 = startParallelShell("db.jstests_currentop.update({ '$where': function() { sleep(150); } }," +
- " { '$inc': {num: 1} }, false, true );");
-
-o = [];
-
-function f() {
- o = ops({"ns": "test.jstests_currentop"});
-
- printjson(o);
-
- var writes = ops({"locks.Collection": "w", "ns": "test.jstests_currentop"}).length;
-
- var readops = ops({"locks.Collection": "r", "ns": "test.jstests_currentop"});
- print("readops:");
- printjson(readops);
- var reads = readops.length;
-
- print("total: " + o.length + " w: " + writes + " r:" + reads);
-
- return o.length > writes && o.length > reads;
-}
-
-print("go");
-
-assert.soon(f, "f");
-
-// avoid waiting for the operations to complete (if soon succeeded)
-for (var i in o) {
- db.killOp(o[i].opid);
-}
-
-start = new Date();
-
-// The operations running in the parallel shells may or may not have been killed.
-s1({checkExitSuccess: false});
-s2({checkExitSuccess: false});
-
-// don't want to pass if timeout killed the js function
-assert((new Date()) - start < 30000);
+/**
+ * Tests that long-running operations show up in currentOp and report the locks they are holding.
+ */
+(function() {
+ "use strict";
+ const coll = db.jstests_currentop;
+ coll.drop();
+
+ // We fsync+lock the server to cause all subsequent write operations to block.
+ assert.commandWorked(db.fsyncLock());
+
+ const awaitInsertShell = startParallelShell(function() {
+ assert.writeOK(db.jstests_currentop.insert({}));
+ });
+
+ // Wait until the write appears in the currentOp output reporting that it is waiting for a lock.
+ assert.soon(
+ function() {
+ return db.currentOp({
+ ns: coll.getFullName(),
+ "locks.Global": "w",
+ "waitingForLock": true,
+ }).inprog.length === 1;
+ },
+ function() {
+ return "Failed to find blocked insert in currentOp() output: " + tojson(db.currentOp());
+ });
+
+ // Unlock the server and make sure the write finishes.
+ const fsyncResponse = assert.commandWorked(db.fsyncUnlock());
+ assert.eq(fsyncResponse.lockCount, 0);
+ awaitInsertShell();
+}());
diff --git a/jstests/core/fsync.js b/jstests/core/fsync.js
index 9f622867ee1..13ff20e4177 100644
--- a/jstests/core/fsync.js
+++ b/jstests/core/fsync.js
@@ -89,7 +89,7 @@
assert(fsyncPseudoCommandRes.ok, "fsyncUnlock pseudo-command failed");
assert(db.currentOp().fsyncLock == null, "fsyncUnlock is not null in db.currentOp");
- // Make sure that insert attempts made during multiple fsynLock requests will not execute until
+ // Make sure that insert attempts made during multiple fsyncLock requests will not execute until
// all locks have been released.
fsyncLockRes = db.fsyncLock();
assert.commandWorked(fsyncLockRes);
diff --git a/jstests/libs/parallelTester.js b/jstests/libs/parallelTester.js
index ca964d86111..d4d8f96d7b4 100644
--- a/jstests/libs/parallelTester.js
+++ b/jstests/libs/parallelTester.js
@@ -173,7 +173,6 @@ if (typeof _threadInject != "undefined") {
// this has a chance to see the message
"connections_opened.js", // counts connections, globally
"opcounters_write_cmd.js",
- "currentop.js", // SERVER-8673, plus rwlock yielding issues
"set_param1.js", // changes global state
"geo_update_btree2.js", // SERVER-11132 test disables table scans
"update_setOnInsert.js", // SERVER-9982
@@ -192,11 +191,12 @@ if (typeof _threadInject != "undefined") {
// some tests can't be run in parallel with each other
var serialTestsArr = [
+ // These tests use fsyncLock.
parallelFilesDir + "/fsync.js",
- parallelFilesDir + "/auth1.js",
+ parallelFilesDir + "/currentop.js",
// These tests expect the profiler to be on or off at specific points. They should not
- // be run in parallel with tests that peform fsyncLock. User operations skip writing to
+ // be run in parallel with tests that perform fsyncLock. User operations skip writing to
// the system.profile collection while the server is fsyncLocked.
//
// The profiler tests can be run in parallel with each other as they use test-specific
diff --git a/src/mongo/db/concurrency/lock_state_test.cpp b/src/mongo/db/concurrency/lock_state_test.cpp
index 2a80c0b4a42..13bc6b58cbc 100644
--- a/src/mongo/db/concurrency/lock_state_test.cpp
+++ b/src/mongo/db/concurrency/lock_state_test.cpp
@@ -30,11 +30,13 @@
#include "mongo/platform/basic.h"
+#include <algorithm>
#include <string>
#include <vector>
#include "mongo/config.h"
#include "mongo/db/concurrency/lock_manager_test_help.h"
+#include "mongo/db/concurrency/locker.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/log.h"
#include "mongo/util/timer.h"
@@ -42,7 +44,7 @@
namespace mongo {
TEST(LockerImpl, LockNoConflict) {
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
MMAPV1LockerImpl locker;
locker.lockGlobal(MODE_IX);
@@ -60,7 +62,7 @@ TEST(LockerImpl, LockNoConflict) {
}
TEST(LockerImpl, ReLockNoConflict) {
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
MMAPV1LockerImpl locker;
locker.lockGlobal(MODE_IX);
@@ -78,7 +80,7 @@ TEST(LockerImpl, ReLockNoConflict) {
}
TEST(LockerImpl, ConflictWithTimeout) {
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
DefaultLockerImpl locker1;
ASSERT(LOCK_OK == locker1.lockGlobal(MODE_IX));
@@ -97,7 +99,7 @@ TEST(LockerImpl, ConflictWithTimeout) {
}
TEST(LockerImpl, ConflictUpgradeWithTimeout) {
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ const ResourceId resId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
DefaultLockerImpl locker1;
ASSERT(LOCK_OK == locker1.lockGlobal(MODE_IS));
@@ -191,8 +193,8 @@ TEST(LockerImpl, saveAndRestoreDBAndCollection) {
DefaultLockerImpl locker;
- const ResourceId resIdDatabase(RESOURCE_DATABASE, std::string("TestDB"));
- const ResourceId resIdCollection(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ const ResourceId resIdDatabase(RESOURCE_DATABASE, "TestDB"_sd);
+ const ResourceId resIdCollection(RESOURCE_COLLECTION, "TestDB.collection"_sd);
// Lock some stuff.
locker.lockGlobal(MODE_IX);
@@ -215,7 +217,7 @@ TEST(LockerImpl, saveAndRestoreDBAndCollection) {
}
TEST(LockerImpl, DefaultLocker) {
- const ResourceId resId(RESOURCE_DATABASE, std::string("TestDB"));
+ const ResourceId resId(RESOURCE_DATABASE, "TestDB"_sd);
DefaultLockerImpl locker;
ASSERT_EQUALS(LOCK_OK, locker.lockGlobal(MODE_IX));
@@ -233,7 +235,7 @@ TEST(LockerImpl, DefaultLocker) {
}
TEST(LockerImpl, MMAPV1Locker) {
- const ResourceId resId(RESOURCE_DATABASE, std::string("TestDB"));
+ const ResourceId resId(RESOURCE_DATABASE, "TestDB"_sd);
MMAPV1LockerImpl locker;
ASSERT_EQUALS(LOCK_OK, locker.lockGlobal(MODE_IX));
@@ -252,8 +254,8 @@ TEST(LockerImpl, MMAPV1Locker) {
}
TEST(LockerImpl, CanceledDeadlockUnblocks) {
- const ResourceId db1(RESOURCE_DATABASE, std::string("db1"));
- const ResourceId db2(RESOURCE_DATABASE, std::string("db2"));
+ const ResourceId db1(RESOURCE_DATABASE, "db1"_sd);
+ const ResourceId db2(RESOURCE_DATABASE, "db2"_sd);
DefaultLockerImpl locker1;
DefaultLockerImpl locker2;
@@ -296,4 +298,92 @@ TEST(LockerImpl, CanceledDeadlockUnblocks) {
ASSERT(locker2.unlockGlobal());
ASSERT(locker3.unlockGlobal());
}
+
+namespace {
+/**
+ * Helper function to determine if 'lockerInfo' contains a lock with ResourceId 'resourceId' and
+ * lock mode 'mode' within 'lockerInfo.locks'.
+ */
+bool lockerInfoContainsLock(const Locker::LockerInfo& lockerInfo,
+ const ResourceId& resourceId,
+ const LockMode& mode) {
+ return (1U == std::count_if(lockerInfo.locks.begin(),
+ lockerInfo.locks.end(),
+ [&resourceId, &mode](const Locker::OneLock& lock) {
+ return lock.resourceId == resourceId && lock.mode == mode;
+ }));
+}
+} // namespace
+
+TEST(LockerImpl, GetLockerInfoShouldReportHeldLocks) {
+ const ResourceId globalId(RESOURCE_GLOBAL, ResourceId::SINGLETON_GLOBAL);
+ const ResourceId dbId(RESOURCE_DATABASE, "TestDB"_sd);
+ const ResourceId collectionId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
+
+ // Take an exclusive lock on the collection.
+ DefaultLockerImpl locker;
+ ASSERT_EQ(LOCK_OK, locker.lockGlobal(MODE_IX));
+ ASSERT_EQ(LOCK_OK, locker.lock(dbId, MODE_IX));
+ ASSERT_EQ(LOCK_OK, locker.lock(collectionId, MODE_X));
+
+ // Assert it shows up in the output of getLockerInfo().
+ Locker::LockerInfo lockerInfo;
+ locker.getLockerInfo(&lockerInfo);
+
+ ASSERT(lockerInfoContainsLock(lockerInfo, globalId, MODE_IX));
+ ASSERT(lockerInfoContainsLock(lockerInfo, dbId, MODE_IX));
+ ASSERT(lockerInfoContainsLock(lockerInfo, collectionId, MODE_X));
+ ASSERT_EQ(3U, lockerInfo.locks.size());
+
+ ASSERT(locker.unlock(collectionId));
+ ASSERT(locker.unlock(dbId));
+ ASSERT(locker.unlockGlobal());
+}
+
+TEST(LockerImpl, GetLockerInfoShouldReportPendingLocks) {
+ const ResourceId globalId(RESOURCE_GLOBAL, ResourceId::SINGLETON_GLOBAL);
+ const ResourceId dbId(RESOURCE_DATABASE, "TestDB"_sd);
+ const ResourceId collectionId(RESOURCE_COLLECTION, "TestDB.collection"_sd);
+
+ // Take an exclusive lock on the collection.
+ DefaultLockerImpl successfulLocker;
+ ASSERT_EQ(LOCK_OK, successfulLocker.lockGlobal(MODE_IX));
+ ASSERT_EQ(LOCK_OK, successfulLocker.lock(dbId, MODE_IX));
+ ASSERT_EQ(LOCK_OK, successfulLocker.lock(collectionId, MODE_X));
+
+ // Now attempt to get conflicting locks.
+ DefaultLockerImpl conflictingLocker;
+ ASSERT_EQ(LOCK_OK, conflictingLocker.lockGlobal(MODE_IS));
+ ASSERT_EQ(LOCK_OK, conflictingLocker.lock(dbId, MODE_IS));
+ ASSERT_EQ(LOCK_WAITING, conflictingLocker.lockBegin(collectionId, MODE_IS));
+
+ // Assert the held locks show up in the output of getLockerInfo().
+ Locker::LockerInfo lockerInfo;
+ conflictingLocker.getLockerInfo(&lockerInfo);
+ ASSERT(lockerInfoContainsLock(lockerInfo, globalId, MODE_IS));
+ ASSERT(lockerInfoContainsLock(lockerInfo, dbId, MODE_IS));
+ ASSERT(lockerInfoContainsLock(lockerInfo, collectionId, MODE_IS));
+ ASSERT_EQ(3U, lockerInfo.locks.size());
+
+ // Assert it reports that it is waiting for the collection lock.
+ ASSERT_EQ(collectionId, lockerInfo.waitingResource);
+
+ // Make sure it no longer reports waiting once unlocked.
+ ASSERT(successfulLocker.unlock(collectionId));
+ ASSERT(successfulLocker.unlock(dbId));
+ ASSERT(successfulLocker.unlockGlobal());
+
+ const unsigned timeoutMs = 0;
+ const bool checkDeadlock = false;
+ ASSERT_EQ(LOCK_OK,
+ conflictingLocker.lockComplete(collectionId, MODE_IS, timeoutMs, checkDeadlock));
+
+ conflictingLocker.getLockerInfo(&lockerInfo);
+ ASSERT_FALSE(lockerInfo.waitingResource.isValid());
+
+ ASSERT(conflictingLocker.unlock(collectionId));
+ ASSERT(conflictingLocker.unlock(dbId));
+ ASSERT(conflictingLocker.unlockGlobal());
+}
+
} // namespace mongo
diff --git a/src/mongo/db/stats/SConscript b/src/mongo/db/stats/SConscript
index 5d8fc0e31be..6a9459c1b37 100644
--- a/src/mongo/db/stats/SConscript
+++ b/src/mongo/db/stats/SConscript
@@ -66,9 +66,29 @@ env.Library(
)
env.Library(
+ target='fill_locker_info',
+ source=[
+ 'fill_locker_info.cpp',
+ ],
+ LIBDEPS=[
+ '$BUILD_DIR/mongo/base',
+ '$BUILD_DIR/mongo/db/concurrency/lock_manager'
+ ],
+)
+
+env.CppUnitTest(
+ target='fill_locker_info_test',
+ source=[
+ 'fill_locker_info_test.cpp',
+ ],
+ LIBDEPS=[
+ 'fill_locker_info',
+ ],
+)
+
+env.Library(
target='serveronly',
source=[
- "fill_locker_info.cpp",
"latency_server_status_section.cpp",
"lock_server_status_section.cpp",
"range_deleter_server_status.cpp",
@@ -79,6 +99,7 @@ env.Library(
'$BUILD_DIR/mongo/base',
'$BUILD_DIR/mongo/db/commands/core',
'$BUILD_DIR/mongo/db/range_deleter',
+ 'fill_locker_info',
'top',
],
LIBDEPS_TAGS=[
diff --git a/src/mongo/db/stats/fill_locker_info_test.cpp b/src/mongo/db/stats/fill_locker_info_test.cpp
new file mode 100644
index 00000000000..37ca204600e
--- /dev/null
+++ b/src/mongo/db/stats/fill_locker_info_test.cpp
@@ -0,0 +1,136 @@
+/**
+ * Copyright (C) 2016 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/bson/bsontypes.h"
+#include "mongo/db/concurrency/lock_manager_defs.h"
+#include "mongo/db/stats/fill_locker_info.h"
+#include "mongo/unittest/death_test.h"
+#include "mongo/unittest/unittest.h"
+
+namespace mongo {
+namespace {
+using LockerInfo = Locker::LockerInfo;
+using OneLock = Locker::OneLock;
+
+const ResourceId kGlobalId(RESOURCE_GLOBAL, ResourceId::SINGLETON_GLOBAL);
+
+TEST(FillLockerInfo, DoesReportWaitingForLockIfWaiting) {
+ LockerInfo info;
+ info.waitingResource = kGlobalId;
+ ASSERT_TRUE(info.waitingResource.isValid());
+
+ BSONObjBuilder infoBuilder;
+ fillLockerInfo(info, infoBuilder);
+ const BSONObj infoObj = infoBuilder.done();
+
+ ASSERT(infoObj["waitingForLock"].type() == BSONType::Bool);
+ ASSERT_TRUE(infoObj["waitingForLock"].Bool());
+}
+
+TEST(FillLockerInfo, DoesNotReportWaitingForLockIfNotWaiting) {
+ LockerInfo info;
+ info.waitingResource = ResourceId(); // This means it is not waiting for anything.
+ ASSERT_FALSE(info.waitingResource.isValid());
+
+ BSONObjBuilder infoBuilder;
+ fillLockerInfo(info, infoBuilder);
+ const BSONObj infoObj = infoBuilder.done();
+
+ ASSERT(infoObj["waitingForLock"].type() == BSONType::Bool);
+ ASSERT_FALSE(infoObj["waitingForLock"].Bool());
+}
+
+TEST(FillLockerInfo, DoesReportLockStats) {
+ LockerInfo info;
+ SingleThreadedLockStats stats;
+ stats.recordAcquisition(kGlobalId, MODE_IX);
+ info.stats = stats;
+
+ BSONObjBuilder infoBuilder;
+ fillLockerInfo(info, infoBuilder);
+ const BSONObj infoObj = infoBuilder.done();
+
+ ASSERT_EQ(infoObj["lockStats"].type(), BSONType::Object);
+}
+
+DEATH_TEST(FillLockerInfo, ShouldFailIfLocksAreNotSortedAppropriately, "Invariant failure") {
+ LockerInfo info;
+ // The global lock is supposed to come before the database lock.
+ info.locks = {OneLock{ResourceId(RESOURCE_DATABASE, "TestDB"_sd), MODE_X},
+ OneLock{kGlobalId, MODE_IX}};
+
+ BSONObjBuilder infoBuilder;
+ fillLockerInfo(info, infoBuilder);
+}
+
+TEST(FillLockerInfo, DoesReportLocksHeld) {
+ const ResourceId dbId(RESOURCE_DATABASE, "TestDB"_sd);
+ LockerInfo info;
+ info.locks = {OneLock{kGlobalId, MODE_IX}, OneLock{dbId, MODE_IX}};
+
+ BSONObjBuilder infoBuilder;
+ fillLockerInfo(info, infoBuilder);
+ const BSONObj infoObj = infoBuilder.done();
+
+ ASSERT_EQ(infoObj["locks"].type(), BSONType::Object);
+ ASSERT_EQ(infoObj["locks"][resourceTypeName(kGlobalId.getType())].type(), BSONType::String);
+ ASSERT_EQ(infoObj["locks"][resourceTypeName(kGlobalId.getType())].String(), "w");
+ ASSERT_EQ(infoObj["locks"][resourceTypeName(dbId.getType())].type(), BSONType::String);
+ ASSERT_EQ(infoObj["locks"][resourceTypeName(dbId.getType())].String(), "w");
+}
+
+TEST(FillLockerInfo, ShouldReportMaxTypeHeldForResourceType) {
+ const ResourceId firstDbId(RESOURCE_DATABASE, "FirstDB"_sd);
+ const ResourceId secondDbId(RESOURCE_DATABASE, "SecondDB"_sd);
+ LockerInfo info;
+ info.locks = {
+ OneLock{kGlobalId, MODE_IX}, OneLock{firstDbId, MODE_IX}, OneLock{secondDbId, MODE_X}};
+
+ BSONObjBuilder infoBuilder;
+ fillLockerInfo(info, infoBuilder);
+ BSONObj infoObj = infoBuilder.done();
+
+ ASSERT_EQ(infoObj["locks"].type(), BSONType::Object);
+ ASSERT_EQ(infoObj["locks"][resourceTypeName(firstDbId.getType())].type(), BSONType::String);
+ ASSERT_EQ(infoObj["locks"][resourceTypeName(firstDbId.getType())].String(),
+ "W"); // One is held in IX, one in X, so X should win and be displayed as "W".
+
+ // Ensure it still works if locks are reported in the opposite order.
+ info.locks = {
+ OneLock{kGlobalId, MODE_IX}, OneLock{secondDbId, MODE_X}, OneLock{firstDbId, MODE_IX}};
+
+ ASSERT_EQ(infoObj["locks"].type(), BSONType::Object);
+ ASSERT_EQ(infoObj["locks"][resourceTypeName(firstDbId.getType())].type(), BSONType::String);
+ ASSERT_EQ(infoObj["locks"][resourceTypeName(firstDbId.getType())].String(), "W");
+}
+
+} // namespace
+} // namespace mongo