1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
|
/**
* Tests that rollback succeeds even when the WiredTiger cache is filled.
* See WT-3698.
* @tags: [requires_replication, requires_wiredtiger]
*/
(function() {
'use strict';
load('jstests/replsets/libs/rollback_test.js');
// Use constrained cache size for both data bearing nodes so that it doesn't matter which node
// RollbackTest selects as the rollback node.
const nodeOptions = {
// Don't log slow operations.
slowms: 30000,
// Constrain the storage engine cache size to make it easier to fill it up with unflushed
// modifications.
// This test uses a smaller cache size than the other wt_cache_full.js tests because it
// has to work with the hard-coded 300 MB refetch limit in the pre-4.0 rollback
// implementation.
wiredTigerCacheSizeGB: 0.5,
};
const rst = new ReplSetTest({
nodes: 3,
nodeOptions: nodeOptions,
useBridge: true,
});
rst.startSet();
let config = rst.getReplSetConfig();
config.members[2].priority = 0;
config.settings = {
chainingAllowed: false
};
rst.initiate(config);
// Prior to 4.0, rollback imposed a 300 MB limit on the total size of documents to refetch from
// the sync source. Therefore, we select values for numDocs and minDocSizeMB, while accounting
// for some small per-document overhead, such that we are able to stay under this 300 MB limit.
// This test uses single updates, rather than the multiple updates in the other wt_cache_full.js
// tests because the refetching logic in the pre-4.0 algorithm depends on which documents were
// modified, not on the number of modifications to each document.
// This test has been observed to hang under some non-standard build platforms so we are
// giving ourselves a slightly larger allowance of 5 documents from the theoretical maximum
// of documents calculated from the rollback size limit.
// Using a numDocs value of (maxDocs - 5) is sufficiently large enough to reproduce the memory
// pressure issue in 3.6.5 but small enough for this test to perform uniformly across most of
// the platforms in our continuous integration system.
const rollbackSizeLimitMB = 300;
const minDocSizeMB = 10;
const largeString = 'x'.repeat(minDocSizeMB * 1024 * 1024);
const numDocs = Math.floor(rollbackSizeLimitMB / minDocSizeMB);
// Operations that will be present on both nodes, before the common point.
const collName = 'test.t';
let CommonOps = (node) => {
const coll = node.getCollection(collName);
jsTestLog('Inserting ' + numDocs + ' documents of ' + minDocSizeMB + ' MB each into ' +
collName + '.');
for (let i = 0; i < numDocs; ++i) {
assert.commandWorked(
coll.save({_id: i, a: 0, x: largeString},
{writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
}
assert.eq(numDocs, coll.find().itcount());
};
// Operations that will be performed on the rollback node past the common point.
let RollbackOps = (node) => {
const coll = node.getCollection(collName);
jsTestLog('Updating ' + numDocs +
' documents on the primary. These updates will be rolled back.');
for (let i = 0; i < numDocs; ++i) {
assert.commandWorked(coll.update({_id: i}, {$inc: {a: 1}}));
}
};
// Set up Rollback Test.
const rollbackTest = new RollbackTest(rst.name, rst);
CommonOps(rollbackTest.getPrimary());
const rollbackNode = rollbackTest.transitionToRollbackOperations();
RollbackOps(rollbackNode);
// Wait for rollback to finish.
rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
rollbackTest.transitionToSyncSourceOperationsDuringRollback();
rollbackTest.transitionToSteadyStateOperations();
rollbackTest.stop();
})();
|