summaryrefslogtreecommitdiff
path: root/jstests/concurrency
diff options
context:
space:
mode:
authorNick Zolnierz <nicholas.zolnierz@mongodb.com>2018-04-27 15:49:07 -0400
committerNick Zolnierz <nicholas.zolnierz@mongodb.com>2018-05-08 11:03:28 -0400
commit5cd9fda822a6a813b4e0c5eb0003f222d86c1f35 (patch)
treee709530f4bf6f58f89dc882d84cac230f3b5943f /jstests/concurrency
parentd7b69eae6a12570d77edd1b64e388e6627a54291 (diff)
downloadmongo-5cd9fda822a6a813b4e0c5eb0003f222d86c1f35.tar.gz
SERVER-34725: Group and count plan stages do not set the WorkingSetID output on PlanStage::DEAD state
(cherry picked from commit 46f72213f60bd74367a11aec7f02b38780ae7c3a)
Diffstat (limited to 'jstests/concurrency')
-rw-r--r--jstests/concurrency/fsm_workloads/yield_group.js86
1 files changed, 86 insertions, 0 deletions
diff --git a/jstests/concurrency/fsm_workloads/yield_group.js b/jstests/concurrency/fsm_workloads/yield_group.js
new file mode 100644
index 00000000000..6ba3607940f
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/yield_group.js
@@ -0,0 +1,86 @@
+'use strict';
+
+/**
+ * Tests that the group command either succeeds or fails gracefully when interspersed with inserts
+ * on a capped collection. Designed to reproduce SERVER-34725.
+ */
+var $config = (function() {
+
+ var states = {
+ /*
+ * Issue a group command against the capped collection.
+ */
+ group: function group(db, collName) {
+ try {
+ assert.commandWorked(db.runCommand(
+ {group: {ns: collName, key: {_id: 1}, $reduce: function() {}, initial: {}}}));
+ } catch (ex) {
+ assert.eq(ErrorCodes.CappedPositionLost, ex.code);
+ }
+ },
+
+ /**
+ * Inserts a document into the capped collection.
+ */
+ insert: function insert(db, collName) {
+ assertAlways.writeOK(db[collName].insert({a: 1}));
+ }
+ };
+
+ var transitions = {
+ insert: {insert: 0.5, group: 0.5},
+ group: {insert: 0.5, group: 0.5},
+ };
+
+ function setup(db, collName, cluster) {
+ const nDocs = 200;
+
+ // Create the test capped collection, with a max number of documents.
+ db[collName].drop();
+ assert.commandWorked(db.createCollection(collName, {
+ capped: true,
+ size: 4096,
+ max: this.nDocs, // Set the maximum number of documents in the capped collection such
+ // that additional inserts will drop older documents and increase the
+ // likelihood of losing the capped position.
+ }));
+
+ // Lower the following parameters to increase the probability of yields.
+ cluster.executeOnMongodNodes(function lowerYieldParams(db) {
+ assertAlways.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 5}));
+ assertAlways.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryExecYieldPeriodMS: 1}));
+ });
+
+ // Set up some data to query.
+ var bulk = db[collName].initializeUnorderedBulkOp();
+ for (let i = 0; i < nDocs; i++) {
+ bulk.insert({_id: i});
+ }
+ assertAlways.writeOK(bulk.execute());
+ }
+
+ /*
+ * Reset parameters.
+ */
+ function teardown(db, collName, cluster) {
+ cluster.executeOnMongodNodes(function resetYieldParams(db) {
+ assertAlways.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 128}));
+ assertAlways.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryExecYieldPeriodMS: 10}));
+ });
+ }
+
+ return {
+ threadCount: 5,
+ iterations: 50,
+ startState: 'insert',
+ states: states,
+ transitions: transitions,
+ setup: setup,
+ teardown: teardown,
+ data: {}
+ };
+})();