summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorXiangyu Yao <xiangyu.yao@mongodb.com>2018-04-11 14:10:14 -0400
committerXiangyu Yao <xiangyu.yao@mongodb.com>2018-04-29 23:43:27 -0400
commit8c8f30175f092a1b4743b9f9c1e30b3348219718 (patch)
tree15b897efce357cbd69d2c68e34ba8bc8417d2635
parent793e59f11b558db3d833a12ec23bbacc359011a1 (diff)
downloadmongo-8c8f30175f092a1b4743b9f9c1e30b3348219718.tar.gz
SERVER-34383 Add secondary reads FSM test
-rw-r--r--jstests/concurrency/fsm_libs/cluster.js15
-rw-r--r--jstests/concurrency/fsm_libs/thread_mgr.js1
-rw-r--r--jstests/concurrency/fsm_libs/worker_thread.js3
-rw-r--r--jstests/concurrency/fsm_workloads/secondary_reads.js125
-rw-r--r--jstests/concurrency/fsm_workloads/secondary_reads_with_catalog_changes.js80
5 files changed, 222 insertions, 2 deletions
diff --git a/jstests/concurrency/fsm_libs/cluster.js b/jstests/concurrency/fsm_libs/cluster.js
index 48c1bb5baef..5033e867d63 100644
--- a/jstests/concurrency/fsm_libs/cluster.js
+++ b/jstests/concurrency/fsm_libs/cluster.js
@@ -195,6 +195,7 @@ var Cluster = function(options) {
var _conns = {mongos: [], mongod: []};
var nextConn = 0;
var replSets = [];
+ var rst;
validateClusterOptions(options);
Object.freeze(options);
@@ -313,8 +314,6 @@ var Cluster = function(options) {
settings: {electionTimeoutMillis: 60 * 60 * 24 * 1000}
};
- var rst;
-
if (!options.useExistingConnectionAsSeed) {
rst = new ReplSetTest(replSetConfig);
rst.startSet();
@@ -424,6 +423,18 @@ var Cluster = function(options) {
return conn.host;
};
+ this.getReplSetName = function getReplSetName() {
+ if (this.isReplication() && !this.isSharded()) {
+ return rst.name;
+ }
+ return undefined;
+ };
+
+ this.getReplSetNumNodes = function getReplSetNumNodes() {
+ assert(this.isReplication() && !this.isSharded(), 'cluster must be a replica set');
+ return options.replication.numNodes;
+ };
+
this.isSharded = function isSharded() {
return Cluster.isSharded(options);
};
diff --git a/jstests/concurrency/fsm_libs/thread_mgr.js b/jstests/concurrency/fsm_libs/thread_mgr.js
index 686721243e8..35391ed0b05 100644
--- a/jstests/concurrency/fsm_libs/thread_mgr.js
+++ b/jstests/concurrency/fsm_libs/thread_mgr.js
@@ -111,6 +111,7 @@ var ThreadManager = function(clusterOptions, executionMode = {composed: false})
tid: tid++,
data: workloadData,
host: cluster.getHost(),
+ replSetName: cluster.getReplSetName(),
latch: latch,
dbName: _context[workload].dbName,
collName: _context[workload].collName,
diff --git a/jstests/concurrency/fsm_libs/worker_thread.js b/jstests/concurrency/fsm_libs/worker_thread.js
index 42a72bfffb9..3b2ec6e8571 100644
--- a/jstests/concurrency/fsm_libs/worker_thread.js
+++ b/jstests/concurrency/fsm_libs/worker_thread.js
@@ -25,6 +25,9 @@ var workerThread = (function() {
var myDB;
var configs = {};
var connectionString = 'mongodb://' + args.host + '/?appname=tid:' + args.tid;
+ if (typeof args.replSetName !== 'undefined') {
+ connectionString += '&replicaSet=' + args.replSetName;
+ }
globalAssertLevel = args.globalAssertLevel;
diff --git a/jstests/concurrency/fsm_workloads/secondary_reads.js b/jstests/concurrency/fsm_workloads/secondary_reads.js
new file mode 100644
index 00000000000..247ec66029a
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/secondary_reads.js
@@ -0,0 +1,125 @@
+'use strict';
+
+/**
+ * secondary_reads.js
+ *
+ * One thread (tid 0) is dedicated to writing documents with field 'x' in
+ * ascending order into the collection.
+ *
+ * Other threads do one of the following operations each iteration.
+ * 1) Retrieve first 50 documents in descending order with local readConcern from a secondary node.
+ * 2) Retrieve first 50 documents in descending order with available readConcern from a secondary
+ * node.
+ * 3) Retrieve first 50 documents in descending order with majority readConcern from a secondary
+ * node.
+ *
+ * For each read, we check if there is any 'hole' in the returned batch. There
+ * should not be any 'hole' because oplogs are applied sequentially in batches.
+ *
+ */
+var $config = (function() {
+
+ // Use the workload name as the collection name.
+ var uniqueCollectionName = 'secondary_reads';
+
+ function isWriterThread() {
+ return this.tid === 0;
+ }
+
+ function insertDocuments(db, collName, writeConcern) {
+ let bulk = db[collName].initializeOrderedBulkOp();
+ for (let i = this.nDocumentsInTotal; i < this.nDocumentsInTotal + this.nDocumentsToInsert;
+ i++) {
+ bulk.insert({_id: i, x: i});
+ }
+ let res = bulk.execute(writeConcern);
+ assertWhenOwnColl.writeOK(res);
+ assertWhenOwnColl.eq(this.nDocumentsToInsert, res.nInserted);
+ this.nDocumentsInTotal += this.nDocumentsToInsert;
+ }
+
+ function readFromSecondaries(db, collName, readConcernLevel) {
+ let arr = [];
+ let success = false;
+ while (!success) {
+ try {
+ arr = db[collName]
+ .find()
+ .readPref('secondary')
+ .readConcern(readConcernLevel)
+ .sort({x: -1})
+ .limit(this.nDocumentsToCheck)
+ .toArray();
+ success = true;
+ } catch (e) {
+ // Retry if the query is interrupted.
+ assertAlways.eq(e.code,
+ ErrorCodes.QueryPlanKilled,
+ 'unexpected error code: ' + e.code + ': ' + e.message);
+ }
+ }
+ // Make sure there is no hole in the result.
+ for (let i = 0; i < arr.length - 1; i++) {
+ assertWhenOwnColl.eq(arr[i].x, arr[i + 1].x + 1, () => tojson(arr));
+ }
+ }
+
+ function getReadConcernLevel() {
+ const readConcernLevels = ['local', 'available', 'majority'];
+ return readConcernLevels[Random.randInt(readConcernLevels.length)];
+ }
+
+ var states = (function() {
+
+ // One thread is dedicated to writing and other threads perform reads on
+ // secondaries with a randomly chosen readConcern level.
+ function readFromSecondaries(db, collName) {
+ if (this.isWriterThread()) {
+ this.insertDocuments(db, this.collName, {w: 1});
+ } else {
+ this.readFromSecondaries(db, this.collName, getReadConcernLevel());
+ }
+ }
+
+ return {readFromSecondaries: readFromSecondaries};
+ })();
+
+ var transitions = {readFromSecondaries: {readFromSecondaries: 1}};
+
+ var setup = function setup(db, collName, cluster) {
+ this.nDocumentsInTotal = 0;
+ // Start write workloads to activate oplog application on secondaries
+ // before any reads.
+ this.insertDocuments(db, this.collName, {w: cluster.getReplSetNumNodes()});
+ };
+
+ var skip = function skip(cluster) {
+ if (cluster.isSharded() || cluster.isStandalone()) {
+ return {skip: true, msg: 'only runs in a replica set.'};
+ }
+ return {skip: false};
+ };
+
+ var teardown = function teardown(db, collName, cluster) {
+ db[this.collName].drop();
+ };
+
+ return {
+ threadCount: 50,
+ iterations: 40,
+ startState: 'readFromSecondaries',
+ states: states,
+ data: {
+ nDocumentsToInsert: 2000,
+ nDocumentsToCheck: 50,
+ isWriterThread: isWriterThread,
+ insertDocuments: insertDocuments,
+ readFromSecondaries: readFromSecondaries,
+ collName: uniqueCollectionName
+ },
+ transitions: transitions,
+ setup: setup,
+ skip: skip,
+ teardown: teardown
+ };
+})();
diff --git a/jstests/concurrency/fsm_workloads/secondary_reads_with_catalog_changes.js b/jstests/concurrency/fsm_workloads/secondary_reads_with_catalog_changes.js
new file mode 100644
index 00000000000..7b9f0fdd785
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/secondary_reads_with_catalog_changes.js
@@ -0,0 +1,80 @@
+'use strict';
+
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/secondary_reads.js'); // for $config
+
+/**
+ * secondary_reads_with_catalog_changes.js
+ *
+ * One thread (tid 0) is dedicated to writing documents with field 'x' in
+ * ascending order into the collection.
+ *
+ * Other threads do one of the following operations each iteration.
+ * 1) Retrieve first 50 documents in descending order with local readConcern from a secondary node.
+ * 2) Retrieve first 50 documents in descending order with available readConcern from a secondary
+ * node.
+ * 3) Retrieve first 50 documents in descending order with majority readConcern from a secondary
+ * node.
+ * 4) Build indexes on field x.
+ * 5) Drop indexes on field x.
+ * 6) Drop collection.
+ *
+ * Note that index/collection drop could interrupt the reads, so we need to retry if the read is
+ * interrupted.
+ *
+ */
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.states.buildIndex = function buildIndex(db, collName) {
+ if (this.isWriterThread(this.tid)) {
+ this.insertDocuments(db, this.collName);
+ } else {
+ assertWhenOwnColl.commandWorked(db[this.collName].createIndex(
+ {x: 1}, {unique: true, background: Random.rand() < 0.5}));
+ }
+ };
+
+ $config.states.dropIndex = function dropIndex(db, collName) {
+ if (this.isWriterThread(this.tid)) {
+ this.insertDocuments(db, this.collName);
+ } else {
+ const res = db[this.collName].dropIndex({x: 1});
+ if (res.ok === 1) {
+ assertWhenOwnColl.commandWorked(res);
+ } else {
+ assertWhenOwnColl.commandFailedWithCode(res, [
+ ErrorCodes.IndexNotFound,
+ ErrorCodes.NamespaceNotFound,
+ ErrorCodes.BackgroundOperationInProgressForNamespace
+ ]);
+ }
+ }
+ };
+
+ $config.states.dropCollection = function dropCollection(db, collName) {
+ if (this.isWriterThread(this.tid)) {
+ this.insertDocuments(db, this.collName);
+ } else {
+ const res = db.runCommand({drop: this.collName});
+ if (res.ok === 1) {
+ assertWhenOwnColl.commandWorked(res);
+ } else {
+ assertWhenOwnColl.commandFailedWithCode(res, [
+ ErrorCodes.NamespaceNotFound,
+ ErrorCodes.BackgroundOperationInProgressForNamespace
+ ]);
+ }
+ this.nDocumentsInTotal = 0;
+ }
+ };
+
+ $config.transitions = {
+ readFromSecondaries:
+ {readFromSecondaries: 0.9, buildIndex: 0.05, dropIndex: 0.03, dropCollection: 0.02},
+ buildIndex: {readFromSecondaries: 1},
+ dropIndex: {readFromSecondaries: 1},
+ dropCollection: {readFromSecondaries: 1}
+ };
+
+ return $config;
+});