diff options
author | Benety Goh <benety@mongodb.com> | 2018-07-19 13:45:31 -0400 |
---|---|---|
committer | Benety Goh <benety@mongodb.com> | 2018-07-20 15:34:59 -0400 |
commit | a467195cae7046e08ee1f09926dc072c16edfd30 (patch) | |
tree | 40a688d1c984c43a3918aee5c20f93d8187f5e85 | |
parent | f6a183706a304e0260678e1e754347fbc0f15793 (diff) | |
download | mongo-a467195cae7046e08ee1f09926dc072c16edfd30.tar.gz |
SERVER-34942 add test to fill wiredtiger cache during initial sync oplog replay
(cherry picked from commit 2c2427c96848e90129ef10ceb36a0454c2736ab1)
-rw-r--r-- | jstests/noPassthrough/initial_sync_wt_cache_full.js | 71 |
1 files changed, 71 insertions, 0 deletions
diff --git a/jstests/noPassthrough/initial_sync_wt_cache_full.js b/jstests/noPassthrough/initial_sync_wt_cache_full.js new file mode 100644 index 00000000000..9db398c00ce --- /dev/null +++ b/jstests/noPassthrough/initial_sync_wt_cache_full.js @@ -0,0 +1,71 @@ +/** + * Fills WiredTiger cache during initial sync oplog replay. + * @tags: [requires_wiredtiger] + */ +(function() { + 'use strict'; + load('jstests/libs/check_log.js'); + + const rst = new ReplSetTest({ + nodes: [ + { + slowms: 30000, // Don't log slow operations on primary. + }, + { + // Disallow elections on secondary. + rsConfig: { + priority: 0, + votes: 0, + }, + // Constrain the storage engine cache size to make it easier to fill it up with + // unflushed modifications. + wiredTigerCacheSizeGB: 1, + }, + ] + }); + const nodes = rst.startSet(); + rst.initiate(); + + const primary = rst.getPrimary(); + const mydb = primary.getDB('test'); + const coll = mydb.getCollection('t'); + + const numDocs = 2; + const minDocSizeMB = 10; + + for (let i = 0; i < numDocs; ++i) { + assert.writeOK( + coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)}, + {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}})); + } + assert.eq(numDocs, coll.find().itcount()); + + const secondary = rst.restart(1, { + startClean: true, + setParameter: + 'failpoint.initialSyncHangBeforeCopyingDatabases=' + tojson({mode: 'alwaysOn'}) + }); + + const batchOpsLimit = + assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1})) + .replBatchLimitOperations; + jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' + + batchOpsLimit + ' operations per batch.'); + + const numUpdates = 1000; + jsTestLog('Buffering ' + numUpdates + ' updates to ' + numDocs + ' documents on secondary.'); + checkLog.contains(secondary, + 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled'); + for (let i = 0; i < numDocs; ++i) { + for (let j = 0; j < numUpdates; ++j) { + assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}})); + } + } + + jsTestLog('Applying updates on secondary ' + secondary.host); + assert.commandWorked(secondary.adminCommand( + {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'})); + rst.awaitReplication(); + + rst.stopSet(); +})(); |