summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCharlie Swanson <cswanson310@gmail.com>2016-12-07 08:33:46 -0500
committerCharlie Swanson <charlie.swanson@mongodb.com>2016-12-12 18:00:55 -0500
commit9d920663421437e65706b0587077781895984e2d (patch)
tree5d83afaba605e8520e19a08992b7eec53e8296db
parentb25817dde10af84b7a9b0721bae294c043360f64 (diff)
downloadmongo-9d920663421437e65706b0587077781895984e2d.tar.gz
SERVER-27042 Stabilize currentop.js and add unit tests for lock state reporting
-rw-r--r--buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml4
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml3
-rw-r--r--jstests/core/currentop.js114
-rw-r--r--jstests/libs/parallelTester.js4
-rw-r--r--src/mongo/db/SConscript2
-rw-r--r--src/mongo/db/concurrency/lock_state_test.cpp82
-rw-r--r--src/mongo/db/stats/SConscript21
-rw-r--r--src/mongo/db/stats/fill_locker_info_test.cpp136
8 files changed, 278 insertions, 88 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
index 6b51a5f17f5..32ad9399d63 100644
--- a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
@@ -12,7 +12,7 @@ selector:
- jstests/core/check_shard_index.js # checkShardingIndex.
- jstests/core/collection_truncate.js # emptycapped.
- jstests/core/compact_keeps_indexes.js # compact.
- - jstests/core/capped_update.js # uses godinsert and can't run under replication.
+ - jstests/core/currentop.js # uses fsync.
- jstests/core/auth_copydb.js # copyDatabase.
- jstests/core/copydb.js # copyDatabase.
- jstests/core/dbadmin.js # "local" database.
@@ -20,7 +20,7 @@ selector:
- jstests/core/dbhash2.js # dbhash.
- jstests/core/dropdb_race.js # syncdelay.
- jstests/core/evalb.js # profiling.
- - jstests/core/fsync.js # fsync.
+ - jstests/core/fsync.js # uses fsync.
- jstests/core/geo_haystack*.js # geoSearch.
- jstests/core/geo_s2cursorlimitskip.js # profiling.
- jstests/core/geo_update_btree2.js # notablescan.
diff --git a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml
index 56a5b581ce8..a1758adb65c 100644
--- a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml
@@ -14,12 +14,13 @@ selector:
- jstests/core/check_shard_index.js # checkShardingIndex.
- jstests/core/collection_truncate.js # emptycapped.
- jstests/core/compact_keeps_indexes.js # compact.
+ - jstests/core/currentop.js # uses fsync.
- jstests/core/dbadmin.js # "local" database.
- jstests/core/dbhash.js # dbhash.
- jstests/core/dbhash2.js # dbhash.
- jstests/core/dropdb_race.js # syncdelay.
- jstests/core/evalb.js # profiling.
- - jstests/core/fsync.js # fsync.
+ - jstests/core/fsync.js # uses fsync.
- jstests/core/geo_haystack*.js # geoSearch.
- jstests/core/geo_s2cursorlimitskip.js # profiling.
- jstests/core/geo_update_btree2.js # notablescan.
diff --git a/jstests/core/currentop.js b/jstests/core/currentop.js
index ef948d415be..296ad69355c 100644
--- a/jstests/core/currentop.js
+++ b/jstests/core/currentop.js
@@ -1,82 +1,32 @@
-print("BEGIN currentop.js");
-
-// test basic currentop functionality + querying of nested documents
-t = db.jstests_currentop;
-t.drop();
-
-for (i = 0; i < 100; i++) {
- t.save({"num": i});
-}
-
-print("count:" + t.count());
-
-function ops(q) {
- printjson(db.currentOp().inprog);
- return db.currentOp(q).inprog;
-}
-
-print("start shell");
-
-// sleep for a second for each (of 100) documents; can be killed in between documents & test should
-// complete before 100 seconds
-s1 = startParallelShell("db.jstests_currentop.count( { '$where': function() { sleep(1000); } } )");
-
-print("sleep");
-sleep(1000);
-
-print("inprog:");
-printjson(db.currentOp().inprog);
-print();
-sleep(1);
-print("inprog:");
-printjson(db.currentOp().inprog);
-print();
-
-// need to wait for read to start
-print("wait have some ops");
-assert.soon(function() {
- return ops({"locks.Collection": "r", "ns": "test.jstests_currentop"}).length +
- ops({"locks.Collection": "R", "ns": "test.jstests_currentop"}).length >=
- 1;
-}, "have_some_ops");
-print("ok");
-
-s2 = startParallelShell("db.jstests_currentop.update({ '$where': function() { sleep(150); } }," +
- " { '$inc': {num: 1} }, false, true );");
-
-o = [];
-
-function f() {
- o = ops({"ns": "test.jstests_currentop"});
-
- printjson(o);
-
- var writes = ops({"locks.Collection": "w", "ns": "test.jstests_currentop"}).length;
-
- var readops = ops({"locks.Collection": "r", "ns": "test.jstests_currentop"});
- print("readops:");
- printjson(readops);
- var reads = readops.length;
-
- print("total: " + o.length + " w: " + writes + " r:" + reads);
-
- return o.length > writes && o.length > reads;
-}
-
-print("go");
-
-assert.soon(f, "f");
-
-// avoid waiting for the operations to complete (if soon succeeded)
-for (var i in o) {
- db.killOp(o[i].opid);
-}
-
-start = new Date();
-
-// The operations running in the parallel shells may or may not have been killed.
-s1({checkExitSuccess: false});
-s2({checkExitSuccess: false});
-
-// don't want to pass if timeout killed the js function
-assert((new Date()) - start < 30000);
+/**
+ * Tests that long-running operations show up in currentOp and report the locks they are holding.
+ */
+(function() {
+ "use strict";
+ const coll = db.jstests_currentop;
+ coll.drop();
+
+ // We fsync+lock the server to cause all subsequent write operations to block.
+ assert.commandWorked(db.fsyncLock());
+
+ const awaitInsertShell = startParallelShell(function() {
+ assert.writeOK(db.jstests_currentop.insert({}));
+ });
+
+ // Wait until the write appears in the currentOp output reporting that it is waiting for a lock.
+ assert.soon(
+ function() {
+ return db.currentOp({
+ ns: coll.getFullName(),
+ "locks.Global": "w",
+ "waitingForLock": true,
+ }).inprog.length === 1;
+ },
+ function() {
+ return "Failed to find blocked insert in currentOp() output: " + tojson(db.currentOp());
+ });
+
+ // Unlock the server and make sure the write finishes.
+ assert.commandWorked(db.fsyncUnlock());
+ awaitInsertShell();
+}());
diff --git a/jstests/libs/parallelTester.js b/jstests/libs/parallelTester.js
index b043e3de78d..0126bfad9c3 100644
--- a/jstests/libs/parallelTester.js
+++ b/jstests/libs/parallelTester.js
@@ -149,7 +149,6 @@ if (typeof _threadInject != "undefined") {
// this has a chance to see the message
"connections_opened.js", // counts connections, globally
"opcounters_write_cmd.js",
- "currentop.js", // SERVER-8673, plus rwlock yielding issues
"set_param1.js", // changes global state
"geo_update_btree2.js", // SERVER-11132 test disables table scans
"update_setOnInsert.js", // SERVER-9982
@@ -168,8 +167,9 @@ if (typeof _threadInject != "undefined") {
// some tests can't be run in parallel with each other
var serialTestsArr = [
+ // These tests use fsyncLock.
parallelFilesDir + "/fsync.js",
- parallelFilesDir + "/auth1.js",
+ parallelFilesDir + "/currentop.js",
// These tests expect the profiler to be on or off at specific points
// during the test run.
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index 628bb239813..a8ce1b54416 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -618,7 +618,6 @@ serverOnlyFiles = [
"repl/storage_interface_impl.cpp",
"repl/sync_source_feedback.cpp",
"service_context_d.cpp",
- "stats/fill_locker_info.cpp",
"stats/lock_server_status_section.cpp",
"stats/range_deleter_server_status.cpp",
"stats/snapshots.cpp",
@@ -689,6 +688,7 @@ serveronlyLibdeps = [
"s/sharding",
"startup_warnings_mongod",
"stats/counters",
+ "stats/fill_locker_info",
"stats/top",
"storage/devnull/storage_devnull",
"storage/ephemeral_for_test/storage_ephemeral_for_test",
diff --git a/src/mongo/db/concurrency/lock_state_test.cpp b/src/mongo/db/concurrency/lock_state_test.cpp
index ec44c9dc096..c3a29275416 100644
--- a/src/mongo/db/concurrency/lock_state_test.cpp
+++ b/src/mongo/db/concurrency/lock_state_test.cpp
@@ -30,11 +30,13 @@
#include "mongo/platform/basic.h"
+#include <algorithm>
#include <string>
#include <vector>
#include "mongo/config.h"
#include "mongo/db/concurrency/lock_manager_test_help.h"
+#include "mongo/db/concurrency/locker.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/log.h"
#include "mongo/util/timer.h"
@@ -361,5 +363,85 @@ TEST(Locker, PerformanceLocker) {
}
#endif // MONGO_CONFIG_DEBUG_BUILD
+namespace {
+/**
+ * Helper function to determine if 'lockerInfo' contains a lock with ResourceId 'resourceId' and
+ * lock mode 'mode' within 'lockerInfo.locks'.
+ */
+bool lockerInfoContainsLock(const Locker::LockerInfo& lockerInfo,
+ const ResourceId& resourceId,
+ const LockMode& mode) {
+ return (1U == std::count_if(lockerInfo.locks.begin(),
+ lockerInfo.locks.end(),
+ [&resourceId, &mode](const Locker::OneLock& lock) {
+ return lock.resourceId == resourceId && lock.mode == mode;
+ }));
+}
+} // namespace
+
+TEST(LockerImpl, GetLockerInfoShouldReportHeldLocks) {
+ const ResourceId globalId(RESOURCE_GLOBAL, ResourceId::SINGLETON_GLOBAL);
+ const ResourceId dbId(RESOURCE_DATABASE, std::string("TestDB"));
+ const ResourceId collectionId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+
+ // Take an exclusive lock on the collection.
+ DefaultLockerImpl locker;
+ ASSERT_EQ(LOCK_OK, locker.lockGlobal(MODE_IX));
+ ASSERT_EQ(LOCK_OK, locker.lock(dbId, MODE_IX));
+ ASSERT_EQ(LOCK_OK, locker.lock(collectionId, MODE_X));
+
+ // Assert it shows up in the output of getLockerInfo().
+ Locker::LockerInfo lockerInfo;
+ locker.getLockerInfo(&lockerInfo);
+
+ ASSERT(lockerInfoContainsLock(lockerInfo, globalId, MODE_IX));
+ ASSERT(lockerInfoContainsLock(lockerInfo, dbId, MODE_IX));
+ ASSERT(lockerInfoContainsLock(lockerInfo, collectionId, MODE_X));
+ ASSERT_EQ(3U, lockerInfo.locks.size());
+
+ ASSERT(locker.unlockAll());
+}
+
+TEST(LockerImpl, GetLockerInfoShouldReportPendingLocks) {
+ const ResourceId globalId(RESOURCE_GLOBAL, ResourceId::SINGLETON_GLOBAL);
+ const ResourceId dbId(RESOURCE_DATABASE, std::string("TestDB"));
+ const ResourceId collectionId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+
+ // Take an exclusive lock on the collection.
+ DefaultLockerImpl successfulLocker;
+ ASSERT_EQ(LOCK_OK, successfulLocker.lockGlobal(MODE_IX));
+ ASSERT_EQ(LOCK_OK, successfulLocker.lock(dbId, MODE_IX));
+ ASSERT_EQ(LOCK_OK, successfulLocker.lock(collectionId, MODE_X));
+
+ // Now attempt to get conflicting locks.
+ DefaultLockerImpl conflictingLocker;
+ ASSERT_EQ(LOCK_OK, conflictingLocker.lockGlobal(MODE_IS));
+ ASSERT_EQ(LOCK_OK, conflictingLocker.lock(dbId, MODE_IS));
+ ASSERT_EQ(LOCK_WAITING, conflictingLocker.lockBegin(collectionId, MODE_IS));
+
+ // Assert the held locks show up in the output of getLockerInfo().
+ Locker::LockerInfo lockerInfo;
+ conflictingLocker.getLockerInfo(&lockerInfo);
+ ASSERT(lockerInfoContainsLock(lockerInfo, globalId, MODE_IS));
+ ASSERT(lockerInfoContainsLock(lockerInfo, dbId, MODE_IS));
+ ASSERT(lockerInfoContainsLock(lockerInfo, collectionId, MODE_IS));
+ ASSERT_EQ(3U, lockerInfo.locks.size());
+
+ // Assert it reports that it is waiting for the collection lock.
+ ASSERT_EQ(collectionId, lockerInfo.waitingResource);
+
+ // Make sure it no longer reports waiting once unlocked.
+ ASSERT(successfulLocker.unlockAll());
+
+ const unsigned timeoutMs = 0;
+ const bool checkDeadlock = false;
+ ASSERT_EQ(LOCK_OK,
+ conflictingLocker.lockComplete(collectionId, MODE_IS, timeoutMs, checkDeadlock));
+
+ conflictingLocker.getLockerInfo(&lockerInfo);
+ ASSERT_FALSE(lockerInfo.waitingResource.isValid());
+
+ ASSERT(conflictingLocker.unlockAll());
+}
} // namespace mongo
diff --git a/src/mongo/db/stats/SConscript b/src/mongo/db/stats/SConscript
index 0dea68e45da..a0ca5534447 100644
--- a/src/mongo/db/stats/SConscript
+++ b/src/mongo/db/stats/SConscript
@@ -54,3 +54,24 @@ env.Library(
'$BUILD_DIR/mongo/util/concurrency/spin_lock',
],
)
+
+env.Library(
+ target='fill_locker_info',
+ source=[
+ 'fill_locker_info.cpp',
+ ],
+ LIBDEPS=[
+ '$BUILD_DIR/mongo/base',
+ '$BUILD_DIR/mongo/db/concurrency/lock_manager'
+ ],
+)
+
+env.CppUnitTest(
+ target='fill_locker_info_test',
+ source=[
+ 'fill_locker_info_test.cpp',
+ ],
+ LIBDEPS=[
+ 'fill_locker_info',
+ ],
+)
diff --git a/src/mongo/db/stats/fill_locker_info_test.cpp b/src/mongo/db/stats/fill_locker_info_test.cpp
new file mode 100644
index 00000000000..40a58879ddf
--- /dev/null
+++ b/src/mongo/db/stats/fill_locker_info_test.cpp
@@ -0,0 +1,136 @@
+/**
+ * Copyright (C) 2016 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/bson/bsontypes.h"
+#include "mongo/db/concurrency/lock_manager_defs.h"
+#include "mongo/db/stats/fill_locker_info.h"
+#include "mongo/unittest/death_test.h"
+#include "mongo/unittest/unittest.h"
+
+namespace mongo {
+namespace {
+using LockerInfo = Locker::LockerInfo;
+using OneLock = Locker::OneLock;
+
+const ResourceId kGlobalId(RESOURCE_GLOBAL, ResourceId::SINGLETON_GLOBAL);
+
+TEST(FillLockerInfo, DoesReportWaitingForLockIfWaiting) {
+ LockerInfo info;
+ info.waitingResource = kGlobalId;
+ ASSERT_TRUE(info.waitingResource.isValid());
+
+ BSONObjBuilder infoBuilder;
+ fillLockerInfo(info, infoBuilder);
+ const BSONObj infoObj = infoBuilder.done();
+
+ ASSERT(infoObj["waitingForLock"].type() == BSONType::Bool);
+ ASSERT_TRUE(infoObj["waitingForLock"].Bool());
+}
+
+TEST(FillLockerInfo, DoesNotReportWaitingForLockIfNotWaiting) {
+ LockerInfo info;
+ info.waitingResource = ResourceId(); // This means it is not waiting for anything.
+ ASSERT_FALSE(info.waitingResource.isValid());
+
+ BSONObjBuilder infoBuilder;
+ fillLockerInfo(info, infoBuilder);
+ const BSONObj infoObj = infoBuilder.done();
+
+ ASSERT(infoObj["waitingForLock"].type() == BSONType::Bool);
+ ASSERT_FALSE(infoObj["waitingForLock"].Bool());
+}
+
+TEST(FillLockerInfo, DoesReportLockStats) {
+ LockerInfo info;
+ SingleThreadedLockStats stats;
+ stats.recordAcquisition(kGlobalId, MODE_IX);
+ info.stats = stats;
+
+ BSONObjBuilder infoBuilder;
+ fillLockerInfo(info, infoBuilder);
+ const BSONObj infoObj = infoBuilder.done();
+
+ ASSERT_EQ(infoObj["lockStats"].type(), BSONType::Object);
+}
+
+DEATH_TEST(FillLockerInfo, ShouldFailIfLocksAreNotSortedAppropriately, "Invariant failure") {
+ LockerInfo info;
+ // The global lock is supposed to come before the database lock.
+ info.locks = {OneLock{ResourceId(RESOURCE_DATABASE, std::string("TestDB")), MODE_X},
+ OneLock{kGlobalId, MODE_IX}};
+
+ BSONObjBuilder infoBuilder;
+ fillLockerInfo(info, infoBuilder);
+}
+
+TEST(FillLockerInfo, DoesReportLocksHeld) {
+ const ResourceId dbId(RESOURCE_DATABASE, std::string("TestDB"));
+ LockerInfo info;
+ info.locks = {OneLock{kGlobalId, MODE_IX}, OneLock{dbId, MODE_IX}};
+
+ BSONObjBuilder infoBuilder;
+ fillLockerInfo(info, infoBuilder);
+ const BSONObj infoObj = infoBuilder.done();
+
+ ASSERT_EQ(infoObj["locks"].type(), BSONType::Object);
+ ASSERT_EQ(infoObj["locks"][resourceTypeName(kGlobalId.getType())].type(), BSONType::String);
+ ASSERT_EQ(infoObj["locks"][resourceTypeName(kGlobalId.getType())].String(), "w");
+ ASSERT_EQ(infoObj["locks"][resourceTypeName(dbId.getType())].type(), BSONType::String);
+ ASSERT_EQ(infoObj["locks"][resourceTypeName(dbId.getType())].String(), "w");
+}
+
+TEST(FillLockerInfo, ShouldReportMaxTypeHeldForResourceType) {
+ const ResourceId firstDbId(RESOURCE_DATABASE, std::string("FirstDB"));
+ const ResourceId secondDbId(RESOURCE_DATABASE, std::string("SecondDB"));
+ LockerInfo info;
+ info.locks = {
+ OneLock{kGlobalId, MODE_IX}, OneLock{firstDbId, MODE_IX}, OneLock{secondDbId, MODE_X}};
+
+ BSONObjBuilder infoBuilder;
+ fillLockerInfo(info, infoBuilder);
+ BSONObj infoObj = infoBuilder.done();
+
+ ASSERT_EQ(infoObj["locks"].type(), BSONType::Object);
+ ASSERT_EQ(infoObj["locks"][resourceTypeName(firstDbId.getType())].type(), BSONType::String);
+ ASSERT_EQ(infoObj["locks"][resourceTypeName(firstDbId.getType())].String(),
+ "W"); // One is held in IX, one in X, so X should win and be displayed as "W".
+
+ // Ensure it still works if locks are reported in the opposite order.
+ info.locks = {
+ OneLock{kGlobalId, MODE_IX}, OneLock{secondDbId, MODE_X}, OneLock{firstDbId, MODE_IX}};
+
+ ASSERT_EQ(infoObj["locks"].type(), BSONType::Object);
+ ASSERT_EQ(infoObj["locks"][resourceTypeName(firstDbId.getType())].type(), BSONType::String);
+ ASSERT_EQ(infoObj["locks"][resourceTypeName(firstDbId.getType())].String(), "W");
+}
+
+} // namespace
+} // namespace mongo