1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
|
(function() {
"use strict";
const conn = MongoRunner.runMongod({nojournal: ""});
assert.neq(null, conn, "mongod was unable to start up");
db = conn.getDB("concurrency");
db.dropDatabase();
const NRECORDS = 3 * 1024 * 1024;
print("loading " + NRECORDS + " documents (progress msg every 1024*1024 documents)");
var bulk = db.conc.initializeUnorderedBulkOp();
for (var i = 0; i < NRECORDS; i++) {
bulk.insert({x: i});
}
assert.commandWorked(bulk.execute());
print("making an index (this will take a while)");
db.conc.createIndex({x: 1});
var c1 = db.conc.count({x: {$lt: NRECORDS}});
const updater = startParallelShell(
"db = db.getSiblingDB('concurrency');\
db.concflag.insert({ inprog: true });\
sleep(20);\
assert.commandWorked(db.conc.update({}, \
{ $inc: { x: " +
NRECORDS +
"}}, false, true)); \
assert.commandWorked(db.concflag.update({}, { inprog: false }));");
assert.soon(function() {
var x = db.concflag.findOne();
return x && x.inprog;
}, "wait for fork", 30000, 1);
let querycount = 0;
let decrements = 0;
let misses = 0;
assert.soon(function() {
const c2 = db.conc.count({x: {$lt: NRECORDS}});
print(c2);
querycount++;
if (c2 < c1)
decrements++;
else
misses++;
c1 = c2;
return !db.concflag.findOne().inprog;
}, "update never finished", 2 * 60 * 60 * 1000, 10);
print(querycount + " queries, " + decrements + " decrements, " + misses + " misses");
assert.eq(NRECORDS, db.conc.count(), "AT END 1");
updater(); // wait()
MongoRunner.stopMongod(conn);
})();
|