1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
|
/**
* Tests that the dbHash command acquires IS mode locks on the global, database, and collection
* resources when reading a timestamp using the $_internalReadAtClusterTime option.
*
* @tags: [uses_transactions, requires_fcv_47]
*/
(function() {
"use strict";
load("jstests/libs/parallelTester.js"); // for Thread
const rst = new ReplSetTest({nodes: 1});
rst.startSet();
rst.initiate();
const primary = rst.getPrimary();
const db = primary.getDB("test");
const session = primary.startSession({causalConsistency: false});
const sessionDB = session.getDatabase(db.getName());
// We insert a document so the dbHash command has a collection to process.
assert.commandWorked(sessionDB.mycoll.insert({}, {writeConcern: {w: "majority"}}));
const clusterTime = session.getOperationTime();
// We then start a transaction in order to be able have a catalog operation queue up behind it.
session.startTransaction();
assert.commandWorked(sessionDB.mycoll.insert({}));
const ops = db.currentOp({"lsid.id": session.getSessionId().id}).inprog;
assert.eq(
1, ops.length, () => "Failed to find session in currentOp() output: " + tojson(db.currentOp()));
assert.eq(ops[0].locks,
{ReplicationStateTransition: "w", Global: "w", Database: "w", Collection: "w"});
const threadCaptruncCmd = new Thread(function(host) {
try {
const conn = new Mongo(host);
const db = conn.getDB("test");
// We use the captrunc command as a catalog operation that requires a MODE_X lock on the
// collection. This ensures we aren't having the dbHash command queue up behind it on a
// database-level lock. The collection isn't capped so it'll fail with an
// IllegalOperation error response.
assert.commandFailedWithCode(db.runCommand({captrunc: "mycoll", n: 1}),
ErrorCodes.IllegalOperation);
return {ok: 1};
} catch (e) {
return {ok: 0, error: e.toString(), stack: e.stack};
}
}, db.getMongo().host);
threadCaptruncCmd.start();
assert.soon(() => {
const ops = db.currentOp({"command.captrunc": "mycoll", waitingForLock: true}).inprog;
return ops.length === 1;
}, () => "Failed to find create collection in currentOp() output: " + tojson(db.currentOp()));
const threadDBHash = new Thread(function(host, clusterTime) {
try {
const conn = new Mongo(host);
const db = conn.getDB("test");
assert.commandWorked(db.runCommand({
dbHash: 1,
$_internalReadAtClusterTime: eval(clusterTime),
}));
return {ok: 1};
} catch (e) {
return {ok: 0, error: e.toString(), stack: e.stack};
}
}, db.getMongo().host, tojson(clusterTime));
threadDBHash.start();
assert.soon(() => {
const ops = db.currentOp({"command.dbHash": 1, waitingForLock: true}).inprog;
if (ops.length === 0) {
return false;
}
assert.eq(ops[0].locks,
{ReplicationStateTransition: "w", Global: "r", Database: "r", Collection: "r"});
return true;
}, () => "Failed to find create collection in currentOp() output: " + tojson(db.currentOp()));
assert.commandWorked(session.commitTransaction_forTesting());
threadCaptruncCmd.join();
threadDBHash.join();
assert.commandWorked(threadCaptruncCmd.returnData());
assert.commandWorked(threadDBHash.returnData());
session.endSession();
rst.stopSet();
})();
|