summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Zolnierz <nicholas.zolnierz@mongodb.com>2018-04-27 15:49:07 -0400
committerNick Zolnierz <nicholas.zolnierz@mongodb.com>2018-05-01 13:47:50 -0400
commit46f72213f60bd74367a11aec7f02b38780ae7c3a (patch)
tree45018ee4a9531cb973f6fbb96c321ef44774278c
parent0d097ebf4d5c78e77d789dad3f8a45a942d37d47 (diff)
downloadmongo-46f72213f60bd74367a11aec7f02b38780ae7c3a.tar.gz
SERVER-34725: Group and count plan stages do not set the WorkingSetID output on PlanStage::DEAD state
-rw-r--r--jstests/concurrency/fsm_workloads/yield_group.js86
-rw-r--r--src/mongo/db/exec/and_hash.cpp40
-rw-r--r--src/mongo/db/exec/and_sorted.cpp12
-rw-r--r--src/mongo/db/exec/cached_plan.cpp1
-rw-r--r--src/mongo/db/exec/count.cpp14
-rw-r--r--src/mongo/db/exec/delete.cpp11
-rw-r--r--src/mongo/db/exec/fetch.cpp12
-rw-r--r--src/mongo/db/exec/group.cpp14
-rw-r--r--src/mongo/db/exec/limit.cpp14
-rw-r--r--src/mongo/db/exec/merge_sort.cpp19
-rw-r--r--src/mongo/db/exec/or.cpp12
-rw-r--r--src/mongo/db/exec/projection.cpp12
-rw-r--r--src/mongo/db/exec/queued_data_stage.cpp17
-rw-r--r--src/mongo/db/exec/skip.cpp12
-rw-r--r--src/mongo/db/exec/sort.cpp12
-rw-r--r--src/mongo/db/query/plan_executor.cpp1
16 files changed, 149 insertions, 140 deletions
diff --git a/jstests/concurrency/fsm_workloads/yield_group.js b/jstests/concurrency/fsm_workloads/yield_group.js
new file mode 100644
index 00000000000..6ba3607940f
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/yield_group.js
@@ -0,0 +1,86 @@
+'use strict';
+
+/**
+ * Tests that the group command either succeeds or fails gracefully when interspersed with inserts
+ * on a capped collection. Designed to reproduce SERVER-34725.
+ */
+var $config = (function() {
+
+ var states = {
+ /*
+ * Issue a group command against the capped collection.
+ */
+ group: function group(db, collName) {
+ try {
+ assert.commandWorked(db.runCommand(
+ {group: {ns: collName, key: {_id: 1}, $reduce: function() {}, initial: {}}}));
+ } catch (ex) {
+ assert.eq(ErrorCodes.CappedPositionLost, ex.code);
+ }
+ },
+
+ /**
+ * Inserts a document into the capped collection.
+ */
+ insert: function insert(db, collName) {
+ assertAlways.writeOK(db[collName].insert({a: 1}));
+ }
+ };
+
+ var transitions = {
+ insert: {insert: 0.5, group: 0.5},
+ group: {insert: 0.5, group: 0.5},
+ };
+
+ function setup(db, collName, cluster) {
+ const nDocs = 200;
+
+ // Create the test capped collection, with a max number of documents.
+ db[collName].drop();
+ assert.commandWorked(db.createCollection(collName, {
+ capped: true,
+ size: 4096,
+ max: this.nDocs, // Set the maximum number of documents in the capped collection such
+ // that additional inserts will drop older documents and increase the
+ // likelihood of losing the capped position.
+ }));
+
+ // Lower the following parameters to increase the probability of yields.
+ cluster.executeOnMongodNodes(function lowerYieldParams(db) {
+ assertAlways.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 5}));
+ assertAlways.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryExecYieldPeriodMS: 1}));
+ });
+
+ // Set up some data to query.
+ var bulk = db[collName].initializeUnorderedBulkOp();
+ for (let i = 0; i < nDocs; i++) {
+ bulk.insert({_id: i});
+ }
+ assertAlways.writeOK(bulk.execute());
+ }
+
+ /*
+ * Reset parameters.
+ */
+ function teardown(db, collName, cluster) {
+ cluster.executeOnMongodNodes(function resetYieldParams(db) {
+ assertAlways.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 128}));
+ assertAlways.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryExecYieldPeriodMS: 10}));
+ });
+ }
+
+ return {
+ threadCount: 5,
+ iterations: 50,
+ startState: 'insert',
+ states: states,
+ transitions: transitions,
+ setup: setup,
+ teardown: teardown,
+ data: {}
+ };
+})();
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index 7f8f2cb9bc4..50ccd06f105 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -145,20 +145,10 @@ PlanStage::StageState AndHashStage::doWork(WorkingSetID* out) {
_ws->get(_lookAheadResults[i])->makeObjOwnedIfNeeded();
break; // Stop looking at this child.
} else if (PlanStage::FAILURE == childStatus || PlanStage::DEAD == childStatus) {
- // Propage error to parent.
+ // The stage which produces a failure is responsible for allocating a working
+ // set member with error details.
+ invariant(WorkingSet::INVALID_ID != _lookAheadResults[i]);
*out = _lookAheadResults[i];
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == *out) {
- mongoutils::str::stream ss;
- ss << "hashed AND stage failed to read in look ahead results "
- << "from child " << i
- << ", childStatus: " << PlanStage::stateStr(childStatus);
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- }
-
_hashingChildren = false;
_dataMap.clear();
return childStatus;
@@ -298,16 +288,10 @@ PlanStage::StageState AndHashStage::readFirstChild(WorkingSetID* out) {
return PlanStage::NEED_TIME;
} else if (PlanStage::FAILURE == childStatus || PlanStage::DEAD == childStatus) {
+ // The stage which produces a failure is responsible for allocating a working set member
+ // with error details.
+ invariant(WorkingSet::INVALID_ID != id);
*out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "hashed AND stage failed to read in results to from first child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- }
return childStatus;
} else {
if (PlanStage::NEED_YIELD == childStatus) {
@@ -392,16 +376,10 @@ PlanStage::StageState AndHashStage::hashOtherChildren(WorkingSetID* out) {
return PlanStage::NEED_TIME;
} else if (PlanStage::FAILURE == childStatus || PlanStage::DEAD == childStatus) {
+ // The stage which produces a failure is responsible for allocating a working set member
+ // with error details.
+ invariant(WorkingSet::INVALID_ID != id);
*out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "hashed AND stage failed to read in results from other child " << _currentChild;
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- }
return childStatus;
} else {
if (PlanStage::NEED_YIELD == childStatus) {
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index ce073bbb534..50a0bee1715 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -219,16 +219,10 @@ PlanStage::StageState AndSortedStage::moveTowardTargetRecordId(WorkingSetID* out
_ws->free(_targetId);
return state;
} else if (PlanStage::FAILURE == state || PlanStage::DEAD == state) {
+ // The stage which produces a failure is responsible for allocating a working set member
+ // with error details.
+ invariant(WorkingSet::INVALID_ID != id);
*out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "sorted AND stage failed to read in results from child " << workingChildNumber;
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- }
_isEOF = true;
_ws->free(_targetId);
return state;
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index b6d185c4032..071f6cf7b25 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -148,6 +148,7 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
return replan(yieldPolicy, shouldCache);
} else if (PlanStage::DEAD == state) {
BSONObj statusObj;
+ invariant(WorkingSet::INVALID_ID != id);
WorkingSetCommon::getStatusMemberObject(*_ws, id, &statusObj);
LOG(1) << "Execution of cached plan failed: PlanStage died"
diff --git a/src/mongo/db/exec/count.cpp b/src/mongo/db/exec/count.cpp
index 61e50d7b305..bf97950426e 100644
--- a/src/mongo/db/exec/count.cpp
+++ b/src/mongo/db/exec/count.cpp
@@ -119,17 +119,11 @@ PlanStage::StageState CountStage::doWork(WorkingSetID* out) {
if (PlanStage::IS_EOF == state) {
_commonStats.isEOF = true;
return PlanStage::IS_EOF;
- } else if (PlanStage::DEAD == state) {
- return state;
- } else if (PlanStage::FAILURE == state) {
+ } else if (PlanStage::FAILURE == state || PlanStage::DEAD == state) {
+ // The stage which produces a failure is responsible for allocating a working set member
+ // with error details.
+ invariant(WorkingSet::INVALID_ID != id);
*out = id;
- // If a stage fails, it may create a status WSM to indicate why it failed, in which
- // case 'id' is valid. If ID is invalid, we create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- const std::string errmsg = "count stage failed to read result from child";
- Status status = Status(ErrorCodes::InternalError, errmsg);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- }
return state;
} else if (PlanStage::ADVANCED == state) {
// We got a result. If we're still skipping, then decrement the number left to skip.
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index 47563136c9c..623535ef946 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -130,15 +130,10 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
case PlanStage::FAILURE:
case PlanStage::DEAD:
+ // The stage which produces a failure is responsible for allocating a working set
+ // member with error details.
+ invariant(WorkingSet::INVALID_ID != id);
*out = id;
-
- // If a stage fails, it may create a status WSM to indicate why it failed, in which
- // case 'id' is valid. If ID is invalid, we create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- const std::string errmsg = "delete stage failed to read in results from child";
- *out = WorkingSetCommon::allocateStatusMember(
- _ws, Status(ErrorCodes::InternalError, errmsg));
- }
return status;
case PlanStage::NEED_TIME:
diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp
index f7d84305966..6895a58ca3b 100644
--- a/src/mongo/db/exec/fetch.cpp
+++ b/src/mongo/db/exec/fetch.cpp
@@ -132,16 +132,10 @@ PlanStage::StageState FetchStage::doWork(WorkingSetID* out) {
return returnIfMatches(member, id, out);
} else if (PlanStage::FAILURE == status || PlanStage::DEAD == status) {
+ // The stage which produces a failure is responsible for allocating a working set member
+ // with error details.
+ invariant(WorkingSet::INVALID_ID != id);
*out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "fetch stage failed to read in results from child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- }
return status;
} else if (PlanStage::NEED_YIELD == status) {
*out = id;
diff --git a/src/mongo/db/exec/group.cpp b/src/mongo/db/exec/group.cpp
index ec63e3b3b65..9c91272b1db 100644
--- a/src/mongo/db/exec/group.cpp
+++ b/src/mongo/db/exec/group.cpp
@@ -228,17 +228,11 @@ PlanStage::StageState GroupStage::doWork(WorkingSetID* out) {
} else if (PlanStage::NEED_YIELD == state) {
*out = id;
return state;
- } else if (PlanStage::FAILURE == state) {
+ } else if (PlanStage::FAILURE == state || PlanStage::DEAD == state) {
+ // The stage which produces a failure is responsible for allocating a working set member
+ // with error details.
+ invariant(WorkingSet::INVALID_ID != id);
*out = id;
- // If a stage fails, it may create a status WSM to indicate why it failed, in which
- // case 'id' is valid. If ID is invalid, we create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- const std::string errmsg = "group stage failed to read in results from child";
- *out = WorkingSetCommon::allocateStatusMember(
- _ws, Status(ErrorCodes::InternalError, errmsg));
- }
- return state;
- } else if (PlanStage::DEAD == state) {
return state;
} else if (PlanStage::ADVANCED == state) {
WorkingSetMember* member = _ws->get(id);
diff --git a/src/mongo/db/exec/limit.cpp b/src/mongo/db/exec/limit.cpp
index 4472f41f2ba..79d37a985f7 100644
--- a/src/mongo/db/exec/limit.cpp
+++ b/src/mongo/db/exec/limit.cpp
@@ -66,19 +66,11 @@ PlanStage::StageState LimitStage::doWork(WorkingSetID* out) {
if (PlanStage::ADVANCED == status) {
*out = id;
--_numToReturn;
- return PlanStage::ADVANCED;
} else if (PlanStage::FAILURE == status || PlanStage::DEAD == status) {
+ // The stage which produces a failure is responsible for allocating a working set member
+ // with error details.
+ invariant(WorkingSet::INVALID_ID != id);
*out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "limit stage failed to read in results from child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- }
- return status;
} else if (PlanStage::NEED_YIELD == status) {
*out = id;
}
diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp
index 7010546d1e4..b8343869a60 100644
--- a/src/mongo/db/exec/merge_sort.cpp
+++ b/src/mongo/db/exec/merge_sort.cpp
@@ -132,22 +132,15 @@ PlanStage::StageState MergeSortStage::doWork(WorkingSetID* out) {
_noResultToMerge.pop();
return PlanStage::NEED_TIME;
} else if (PlanStage::FAILURE == code || PlanStage::DEAD == code) {
+ // The stage which produces a failure is responsible for allocating a working set member
+ // with error details.
+ invariant(WorkingSet::INVALID_ID != id);
+ *out = id;
+ return code;
+ } else if (PlanStage::NEED_YIELD == code) {
*out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "merge sort stage failed to read in results from child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- }
return code;
} else {
- if (PlanStage::NEED_YIELD == code) {
- *out = id;
- }
-
return code;
}
}
diff --git a/src/mongo/db/exec/or.cpp b/src/mongo/db/exec/or.cpp
index 44d88f26d12..89b811a2487 100644
--- a/src/mongo/db/exec/or.cpp
+++ b/src/mongo/db/exec/or.cpp
@@ -107,16 +107,10 @@ PlanStage::StageState OrStage::doWork(WorkingSetID* out) {
return PlanStage::NEED_TIME;
}
} else if (PlanStage::FAILURE == childStatus || PlanStage::DEAD == childStatus) {
+ // The stage which produces a failure is responsible for allocating a working set member
+ // with error details.
+ invariant(WorkingSet::INVALID_ID != id);
*out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "OR stage failed to read in results from child " << _currentChild;
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- }
return childStatus;
} else if (PlanStage::NEED_YIELD == childStatus) {
*out = id;
diff --git a/src/mongo/db/exec/projection.cpp b/src/mongo/db/exec/projection.cpp
index 370a8e87892..7bcadd0675b 100644
--- a/src/mongo/db/exec/projection.cpp
+++ b/src/mongo/db/exec/projection.cpp
@@ -210,16 +210,10 @@ PlanStage::StageState ProjectionStage::doWork(WorkingSetID* out) {
*out = id;
} else if (PlanStage::FAILURE == status || PlanStage::DEAD == status) {
+ // The stage which produces a failure is responsible for allocating a working set member
+ // with error details.
+ invariant(WorkingSet::INVALID_ID != id);
*out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "projection stage failed to read in results from child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- }
} else if (PlanStage::NEED_YIELD == status) {
*out = id;
}
diff --git a/src/mongo/db/exec/queued_data_stage.cpp b/src/mongo/db/exec/queued_data_stage.cpp
index 6f3f3b0ed2f..69ff26330f5 100644
--- a/src/mongo/db/exec/queued_data_stage.cpp
+++ b/src/mongo/db/exec/queued_data_stage.cpp
@@ -51,9 +51,20 @@ PlanStage::StageState QueuedDataStage::doWork(WorkingSetID* out) {
StageState state = _results.front();
_results.pop();
- if (PlanStage::ADVANCED == state) {
- *out = _members.front();
- _members.pop();
+ switch (state) {
+ case PlanStage::ADVANCED:
+ *out = _members.front();
+ _members.pop();
+ break;
+ case PlanStage::DEAD:
+ case PlanStage::FAILURE:
+ // On DEAD or FAILURE, this stage is reponsible for allocating the WorkingSetMember with
+ // the error details.
+ *out = WorkingSetCommon::allocateStatusMember(
+ _ws, Status(ErrorCodes::InternalError, "Queued data stage failure"));
+ break;
+ default:
+ break;
}
return state;
diff --git a/src/mongo/db/exec/skip.cpp b/src/mongo/db/exec/skip.cpp
index 7a353e5219f..fea524c1898 100644
--- a/src/mongo/db/exec/skip.cpp
+++ b/src/mongo/db/exec/skip.cpp
@@ -68,16 +68,10 @@ PlanStage::StageState SkipStage::doWork(WorkingSetID* out) {
*out = id;
return PlanStage::ADVANCED;
} else if (PlanStage::FAILURE == status || PlanStage::DEAD == status) {
+ // The stage which produces a failure is responsible for allocating a working set member
+ // with error details.
+ invariant(WorkingSet::INVALID_ID != id);
*out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "skip stage failed to read in results from child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- }
return status;
} else if (PlanStage::NEED_YIELD == status) {
*out = id;
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index bd2cc27e14f..243e8bf7a05 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -159,16 +159,10 @@ PlanStage::StageState SortStage::doWork(WorkingSetID* out) {
_sorted = true;
return PlanStage::NEED_TIME;
} else if (PlanStage::FAILURE == code || PlanStage::DEAD == code) {
+ // The stage which produces a failure is responsible for allocating a working set member
+ // with error details.
+ invariant(WorkingSet::INVALID_ID != id);
*out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "sort stage failed to read in results to sort from child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- }
return code;
} else if (PlanStage::NEED_YIELD == code) {
*out = id;
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index 2deec30fd5f..3bc8ab9202d 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -628,6 +628,7 @@ PlanExecutor::ExecState PlanExecutor::getNextImpl(Snapshotted<BSONObj>* objOut,
if (NULL != objOut) {
BSONObj statusObj;
+ invariant(WorkingSet::INVALID_ID != id);
WorkingSetCommon::getStatusMemberObject(*_workingSet, id, &statusObj);
*objOut = Snapshotted<BSONObj>(SnapshotId(), statusObj);
}