diff options
author | Xiangyu Yao <xiangyu.yao@mongodb.com> | 2018-04-11 14:10:14 -0400 |
---|---|---|
committer | Xiangyu Yao <xiangyu.yao@mongodb.com> | 2018-04-29 23:43:27 -0400 |
commit | 8c8f30175f092a1b4743b9f9c1e30b3348219718 (patch) | |
tree | 15b897efce357cbd69d2c68e34ba8bc8417d2635 /jstests/concurrency/fsm_workloads/secondary_reads.js | |
parent | 793e59f11b558db3d833a12ec23bbacc359011a1 (diff) | |
download | mongo-8c8f30175f092a1b4743b9f9c1e30b3348219718.tar.gz |
SERVER-34383 Add secondary reads FSM test
Diffstat (limited to 'jstests/concurrency/fsm_workloads/secondary_reads.js')
-rw-r--r-- | jstests/concurrency/fsm_workloads/secondary_reads.js | 125 |
1 files changed, 125 insertions, 0 deletions
diff --git a/jstests/concurrency/fsm_workloads/secondary_reads.js b/jstests/concurrency/fsm_workloads/secondary_reads.js new file mode 100644 index 00000000000..247ec66029a --- /dev/null +++ b/jstests/concurrency/fsm_workloads/secondary_reads.js @@ -0,0 +1,125 @@ +'use strict'; + +/** + * secondary_reads.js + * + * One thread (tid 0) is dedicated to writing documents with field 'x' in + * ascending order into the collection. + * + * Other threads do one of the following operations each iteration. + * 1) Retrieve first 50 documents in descending order with local readConcern from a secondary node. + * 2) Retrieve first 50 documents in descending order with available readConcern from a secondary + * node. + * 3) Retrieve first 50 documents in descending order with majority readConcern from a secondary + * node. + * + * For each read, we check if there is any 'hole' in the returned batch. There + * should not be any 'hole' because oplogs are applied sequentially in batches. + * + */ +var $config = (function() { + + // Use the workload name as the collection name. + var uniqueCollectionName = 'secondary_reads'; + + function isWriterThread() { + return this.tid === 0; + } + + function insertDocuments(db, collName, writeConcern) { + let bulk = db[collName].initializeOrderedBulkOp(); + for (let i = this.nDocumentsInTotal; i < this.nDocumentsInTotal + this.nDocumentsToInsert; + i++) { + bulk.insert({_id: i, x: i}); + } + let res = bulk.execute(writeConcern); + assertWhenOwnColl.writeOK(res); + assertWhenOwnColl.eq(this.nDocumentsToInsert, res.nInserted); + this.nDocumentsInTotal += this.nDocumentsToInsert; + } + + function readFromSecondaries(db, collName, readConcernLevel) { + let arr = []; + let success = false; + while (!success) { + try { + arr = db[collName] + .find() + .readPref('secondary') + .readConcern(readConcernLevel) + .sort({x: -1}) + .limit(this.nDocumentsToCheck) + .toArray(); + success = true; + } catch (e) { + // Retry if the query is interrupted. + assertAlways.eq(e.code, + ErrorCodes.QueryPlanKilled, + 'unexpected error code: ' + e.code + ': ' + e.message); + } + } + // Make sure there is no hole in the result. + for (let i = 0; i < arr.length - 1; i++) { + assertWhenOwnColl.eq(arr[i].x, arr[i + 1].x + 1, () => tojson(arr)); + } + } + + function getReadConcernLevel() { + const readConcernLevels = ['local', 'available', 'majority']; + return readConcernLevels[Random.randInt(readConcernLevels.length)]; + } + + var states = (function() { + + // One thread is dedicated to writing and other threads perform reads on + // secondaries with a randomly chosen readConcern level. + function readFromSecondaries(db, collName) { + if (this.isWriterThread()) { + this.insertDocuments(db, this.collName, {w: 1}); + } else { + this.readFromSecondaries(db, this.collName, getReadConcernLevel()); + } + } + + return {readFromSecondaries: readFromSecondaries}; + })(); + + var transitions = {readFromSecondaries: {readFromSecondaries: 1}}; + + var setup = function setup(db, collName, cluster) { + this.nDocumentsInTotal = 0; + // Start write workloads to activate oplog application on secondaries + // before any reads. + this.insertDocuments(db, this.collName, {w: cluster.getReplSetNumNodes()}); + }; + + var skip = function skip(cluster) { + if (cluster.isSharded() || cluster.isStandalone()) { + return {skip: true, msg: 'only runs in a replica set.'}; + } + return {skip: false}; + }; + + var teardown = function teardown(db, collName, cluster) { + db[this.collName].drop(); + }; + + return { + threadCount: 50, + iterations: 40, + startState: 'readFromSecondaries', + states: states, + data: { + nDocumentsToInsert: 2000, + nDocumentsToCheck: 50, + isWriterThread: isWriterThread, + insertDocuments: insertDocuments, + readFromSecondaries: readFromSecondaries, + collName: uniqueCollectionName + }, + transitions: transitions, + setup: setup, + skip: skip, + teardown: teardown + }; +})(); |