summaryrefslogtreecommitdiff
path: root/jstests/replsets/dbhash_lock_acquisition.js
diff options
context:
space:
mode:
authorclang-format-7.0.1 <adam.martin@10gen.com>2019-07-26 18:20:35 -0400
committerADAM David Alan Martin <adam.martin@10gen.com>2019-07-27 11:02:23 -0400
commit134a4083953270e8a11430395357fb70a29047ad (patch)
treedd428e1230e31d92b20b393dfdc17ffe7fa79cb6 /jstests/replsets/dbhash_lock_acquisition.js
parent1e46b5049003f427047e723ea5fab15b5a9253ca (diff)
downloadmongo-134a4083953270e8a11430395357fb70a29047ad.tar.gz
SERVER-41772 Apply clang-format 7.0.1 to the codebase
Diffstat (limited to 'jstests/replsets/dbhash_lock_acquisition.js')
-rw-r--r--jstests/replsets/dbhash_lock_acquisition.js169
1 files changed, 84 insertions, 85 deletions
diff --git a/jstests/replsets/dbhash_lock_acquisition.js b/jstests/replsets/dbhash_lock_acquisition.js
index 3c66ad1aa48..8fd30e7f7be 100644
--- a/jstests/replsets/dbhash_lock_acquisition.js
+++ b/jstests/replsets/dbhash_lock_acquisition.js
@@ -5,92 +5,91 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
-
- load("jstests/libs/parallelTester.js"); // for ScopedThread
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- const db = primary.getDB("test");
-
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(db.getName());
+"use strict";
+
+load("jstests/libs/parallelTester.js"); // for ScopedThread
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const db = primary.getDB("test");
+
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(db.getName());
+
+// We insert a document so the dbHash command has a collection to process.
+assert.commandWorked(sessionDB.mycoll.insert({}, {writeConcern: {w: "majority"}}));
+const clusterTime = session.getOperationTime();
+
+// We then start a transaction in order to be able have a catalog operation queue up behind it.
+session.startTransaction();
+assert.commandWorked(sessionDB.mycoll.insert({}));
+
+const ops = db.currentOp({"lsid.id": session.getSessionId().id}).inprog;
+assert.eq(
+ 1, ops.length, () => "Failed to find session in currentOp() output: " + tojson(db.currentOp()));
+assert.eq(ops[0].locks,
+ {ReplicationStateTransition: "w", Global: "w", Database: "w", Collection: "w"});
+
+const threadCaptruncCmd = new ScopedThread(function(host) {
+ try {
+ const conn = new Mongo(host);
+ const db = conn.getDB("test");
+
+ // We use the captrunc command as a catalog operation that requires a MODE_X lock on the
+ // collection. This ensures we aren't having the dbHash command queue up behind it on a
+ // database-level lock. The collection isn't capped so it'll fail with an
+ // IllegalOperation error response.
+ assert.commandFailedWithCode(db.runCommand({captrunc: "mycoll", n: 1}),
+ ErrorCodes.IllegalOperation);
+ return {ok: 1};
+ } catch (e) {
+ return {ok: 0, error: e.toString(), stack: e.stack};
+ }
+}, db.getMongo().host);
+
+threadCaptruncCmd.start();
+
+assert.soon(() => {
+ const ops = db.currentOp({"command.captrunc": "mycoll", waitingForLock: true}).inprog;
+ return ops.length === 1;
+}, () => "Failed to find create collection in currentOp() output: " + tojson(db.currentOp()));
+
+const threadDBHash = new ScopedThread(function(host, clusterTime) {
+ try {
+ const conn = new Mongo(host);
+ const db = conn.getDB("test");
+ assert.commandWorked(db.runCommand({
+ dbHash: 1,
+ $_internalReadAtClusterTime: eval(clusterTime),
+ }));
+ return {ok: 1};
+ } catch (e) {
+ return {ok: 0, error: e.toString(), stack: e.stack};
+ }
+}, db.getMongo().host, tojson(clusterTime));
+
+threadDBHash.start();
+
+assert.soon(() => {
+ const ops = db.currentOp({"command.dbHash": 1, waitingForLock: true}).inprog;
+ if (ops.length === 0) {
+ return false;
+ }
+ assert.eq(ops[0].locks,
+ {ReplicationStateTransition: "w", Global: "r", Database: "r", Collection: "r"});
+ return true;
+}, () => "Failed to find create collection in currentOp() output: " + tojson(db.currentOp()));
- // We insert a document so the dbHash command has a collection to process.
- assert.commandWorked(sessionDB.mycoll.insert({}, {writeConcern: {w: "majority"}}));
- const clusterTime = session.getOperationTime();
+assert.commandWorked(session.commitTransaction_forTesting());
+threadCaptruncCmd.join();
+threadDBHash.join();
- // We then start a transaction in order to be able have a catalog operation queue up behind it.
- session.startTransaction();
- assert.commandWorked(sessionDB.mycoll.insert({}));
+assert.commandWorked(threadCaptruncCmd.returnData());
+assert.commandWorked(threadDBHash.returnData());
- const ops = db.currentOp({"lsid.id": session.getSessionId().id}).inprog;
- assert.eq(1,
- ops.length,
- () => "Failed to find session in currentOp() output: " + tojson(db.currentOp()));
- assert.eq(ops[0].locks,
- {ReplicationStateTransition: "w", Global: "w", Database: "w", Collection: "w"});
-
- const threadCaptruncCmd = new ScopedThread(function(host) {
- try {
- const conn = new Mongo(host);
- const db = conn.getDB("test");
-
- // We use the captrunc command as a catalog operation that requires a MODE_X lock on the
- // collection. This ensures we aren't having the dbHash command queue up behind it on a
- // database-level lock. The collection isn't capped so it'll fail with an
- // IllegalOperation error response.
- assert.commandFailedWithCode(db.runCommand({captrunc: "mycoll", n: 1}),
- ErrorCodes.IllegalOperation);
- return {ok: 1};
- } catch (e) {
- return {ok: 0, error: e.toString(), stack: e.stack};
- }
- }, db.getMongo().host);
-
- threadCaptruncCmd.start();
-
- assert.soon(() => {
- const ops = db.currentOp({"command.captrunc": "mycoll", waitingForLock: true}).inprog;
- return ops.length === 1;
- }, () => "Failed to find create collection in currentOp() output: " + tojson(db.currentOp()));
-
- const threadDBHash = new ScopedThread(function(host, clusterTime) {
- try {
- const conn = new Mongo(host);
- const db = conn.getDB("test");
- assert.commandWorked(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: eval(clusterTime),
- }));
- return {ok: 1};
- } catch (e) {
- return {ok: 0, error: e.toString(), stack: e.stack};
- }
- }, db.getMongo().host, tojson(clusterTime));
-
- threadDBHash.start();
-
- assert.soon(() => {
- const ops = db.currentOp({"command.dbHash": 1, waitingForLock: true}).inprog;
- if (ops.length === 0) {
- return false;
- }
- assert.eq(ops[0].locks,
- {ReplicationStateTransition: "w", Global: "r", Database: "r", Collection: "r"});
- return true;
- }, () => "Failed to find create collection in currentOp() output: " + tojson(db.currentOp()));
-
- assert.commandWorked(session.commitTransaction_forTesting());
- threadCaptruncCmd.join();
- threadDBHash.join();
-
- assert.commandWorked(threadCaptruncCmd.returnData());
- assert.commandWorked(threadDBHash.returnData());
-
- session.endSession();
- rst.stopSet();
+session.endSession();
+rst.stopSet();
})();