diff options
author | Pawel Terlecki <pawel.terlecki@mongodb.com> | 2019-01-29 14:25:20 -0500 |
---|---|---|
committer | Pawel Terlecki <pawel.terlecki@mongodb.com> | 2019-01-30 12:59:42 -0500 |
commit | b7df0530e35a23bc1139f22a84ff4ba8b7688b4a (patch) | |
tree | b8d3967252d3b207ee1925227cfd31266adf2130 /src/mongo/dbtests | |
parent | d1771e696b6df883af70eedaaa0733548c573fec (diff) | |
download | mongo-b7df0530e35a23bc1139f22a84ff4ba8b7688b4a.tar.gz |
SERVER-38316 Consolidate PlanExecutor::DEAD and PlanExecutor::FAILURE
Replaced PlanStage::DEAD with PlanStage::FAILURE. In the subsequent
commit, PlanExecutor::DEAD will be taken care of in the next commit
Diffstat (limited to 'src/mongo/dbtests')
-rw-r--r-- | src/mongo/dbtests/query_stage_and.cpp | 25 | ||||
-rw-r--r-- | src/mongo/dbtests/query_stage_cached_plan.cpp | 1 | ||||
-rw-r--r-- | src/mongo/dbtests/query_stage_count.cpp | 1 | ||||
-rw-r--r-- | src/mongo/dbtests/query_stage_distinct.cpp | 1 | ||||
-rw-r--r-- | src/mongo/dbtests/query_stage_ensure_sorted.cpp | 1 | ||||
-rw-r--r-- | src/mongo/dbtests/query_stage_ixscan.cpp | 1 | ||||
-rw-r--r-- | src/mongo/dbtests/query_stage_sort.cpp | 2 | ||||
-rw-r--r-- | src/mongo/dbtests/query_stage_subplan.cpp | 1 | ||||
-rw-r--r-- | src/mongo/dbtests/query_stage_trial.cpp | 8 |
9 files changed, 16 insertions, 25 deletions
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp index e3478d4bf37..bb502de5705 100644 --- a/src/mongo/dbtests/query_stage_and.cpp +++ b/src/mongo/dbtests/query_stage_and.cpp @@ -142,7 +142,6 @@ public: // We shouldn't fail or be dead. ASSERT(PlanStage::FAILURE != status); - ASSERT(PlanStage::DEAD != status); if (PlanStage::ADVANCED != status) { continue; @@ -805,9 +804,9 @@ public: const BSONObj dataObj = fromjson("{'foo': 'bar'}"); - // Confirm PlanStage::DEAD when children contain the following WorkingSetMembers: + // Confirm PlanStage::FAILURE when children contain the following WorkingSetMembers: // Child1: Data - // Child2: NEED_TIME, DEAD + // Child2: NEED_TIME, FAILURE { WorkingSet ws; const auto andHashStage = make_unique<AndHashStage>(&_opCtx, &ws); @@ -824,7 +823,7 @@ public: auto childStage2 = make_unique<QueuedDataStage>(&_opCtx, &ws); childStage2->pushBack(PlanStage::NEED_TIME); - childStage2->pushBack(PlanStage::DEAD); + childStage2->pushBack(PlanStage::FAILURE); andHashStage->addChild(childStage1.release()); andHashStage->addChild(childStage2.release()); @@ -835,11 +834,11 @@ public: state = andHashStage->work(&id); } - ASSERT_EQ(PlanStage::DEAD, state); + ASSERT_EQ(PlanStage::FAILURE, state); } - // Confirm PlanStage::DEAD when children contain the following WorkingSetMembers: - // Child1: Data, DEAD + // Confirm PlanStage::FAILURE when children contain the following WorkingSetMembers: + // Child1: Data, FAILURE // Child2: Data { WorkingSet ws; @@ -855,7 +854,7 @@ public: ws.transitionToRecordIdAndObj(id); childStage1->pushBack(id); } - childStage1->pushBack(PlanStage::DEAD); + childStage1->pushBack(PlanStage::FAILURE); auto childStage2 = make_unique<QueuedDataStage>(&_opCtx, &ws); { @@ -876,12 +875,12 @@ public: state = andHashStage->work(&id); } - ASSERT_EQ(PlanStage::DEAD, state); + ASSERT_EQ(PlanStage::FAILURE, state); } - // Confirm PlanStage::DEAD when children contain the following WorkingSetMembers: + // Confirm PlanStage::FAILURE when children contain the following WorkingSetMembers: // Child1: Data - // Child2: Data, DEAD + // Child2: Data, FAILURE { WorkingSet ws; const auto andHashStage = make_unique<AndHashStage>(&_opCtx, &ws); @@ -905,7 +904,7 @@ public: ws.transitionToRecordIdAndObj(id); childStage2->pushBack(id); } - childStage2->pushBack(PlanStage::DEAD); + childStage2->pushBack(PlanStage::FAILURE); andHashStage->addChild(childStage1.release()); andHashStage->addChild(childStage2.release()); @@ -916,7 +915,7 @@ public: state = andHashStage->work(&id); } - ASSERT_EQ(PlanStage::DEAD, state); + ASSERT_EQ(PlanStage::FAILURE, state); } } }; diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp index bd2dc068672..94e20f10f98 100644 --- a/src/mongo/dbtests/query_stage_cached_plan.cpp +++ b/src/mongo/dbtests/query_stage_cached_plan.cpp @@ -132,7 +132,6 @@ public: state = cachedPlanStage->work(&id); ASSERT_NE(state, PlanStage::FAILURE); - ASSERT_NE(state, PlanStage::DEAD); if (state == PlanStage::ADVANCED) { WorkingSetMember* member = ws.get(id); diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp index 9438576b614..de40aa79b84 100644 --- a/src/mongo/dbtests/query_stage_count.cpp +++ b/src/mongo/dbtests/query_stage_count.cpp @@ -180,7 +180,6 @@ public: // do some work -- assumes that one work unit counts a single doc PlanStage::StageState state = count_stage.work(&wsid); ASSERT_NOT_EQUALS(state, PlanStage::FAILURE); - ASSERT_NOT_EQUALS(state, PlanStage::DEAD); // prepare for yield count_stage.saveState(); diff --git a/src/mongo/dbtests/query_stage_distinct.cpp b/src/mongo/dbtests/query_stage_distinct.cpp index 32fb05dbcde..76eed91d300 100644 --- a/src/mongo/dbtests/query_stage_distinct.cpp +++ b/src/mongo/dbtests/query_stage_distinct.cpp @@ -289,7 +289,6 @@ public: while (PlanStage::IS_EOF != (state = distinct.work(&wsid))) { ASSERT_NE(PlanStage::FAILURE, state); - ASSERT_NE(PlanStage::DEAD, state); if (PlanStage::ADVANCED == state) { seen.push_back(getIntFieldDotted(ws, wsid, "b")); } diff --git a/src/mongo/dbtests/query_stage_ensure_sorted.cpp b/src/mongo/dbtests/query_stage_ensure_sorted.cpp index 3093a053ce2..3351b1f3eab 100644 --- a/src/mongo/dbtests/query_stage_ensure_sorted.cpp +++ b/src/mongo/dbtests/query_stage_ensure_sorted.cpp @@ -92,7 +92,6 @@ public: BSONArrayBuilder arr(bob.subarrayStart("output")); while (state != PlanStage::IS_EOF) { state = ess.work(&id); - ASSERT_NE(state, PlanStage::DEAD); ASSERT_NE(state, PlanStage::FAILURE); if (state == PlanStage::ADVANCED) { WorkingSetMember* member = ws.get(id); diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp index c6f87f48169..0f8e3b16391 100644 --- a/src/mongo/dbtests/query_stage_ixscan.cpp +++ b/src/mongo/dbtests/query_stage_ixscan.cpp @@ -88,7 +88,6 @@ public: // There are certain states we shouldn't get. ASSERT_NE(PlanStage::IS_EOF, state); - ASSERT_NE(PlanStage::DEAD, state); ASSERT_NE(PlanStage::FAILURE, state); } diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp index fea7abfc6a0..1acecaf4316 100644 --- a/src/mongo/dbtests/query_stage_sort.cpp +++ b/src/mongo/dbtests/query_stage_sort.cpp @@ -395,7 +395,6 @@ public: PlanStage::StageState status = ss->work(&id); if (PlanStage::ADVANCED != status) { ASSERT_NE(status, PlanStage::FAILURE); - ASSERT_NE(status, PlanStage::DEAD); continue; } WorkingSetMember* member = exec->getWorkingSet()->get(id); @@ -489,7 +488,6 @@ public: PlanStage::StageState status = ss->work(&id); if (PlanStage::ADVANCED != status) { ASSERT_NE(status, PlanStage::FAILURE); - ASSERT_NE(status, PlanStage::DEAD); continue; } WorkingSetMember* member = exec->getWorkingSet()->get(id); diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp index 4b550ed7b9c..e8a119c3543 100644 --- a/src/mongo/dbtests/query_stage_subplan.cpp +++ b/src/mongo/dbtests/query_stage_subplan.cpp @@ -502,7 +502,6 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanPlanRootedOrNE) { while (stageState != PlanStage::IS_EOF) { WorkingSetID id = WorkingSet::INVALID_ID; stageState = subplan->work(&id); - ASSERT_NE(stageState, PlanStage::DEAD); ASSERT_NE(stageState, PlanStage::FAILURE); if (stageState == PlanStage::ADVANCED) { ++numResults; diff --git a/src/mongo/dbtests/query_stage_trial.cpp b/src/mongo/dbtests/query_stage_trial.cpp index d83380c9e51..aa5cdf6ff08 100644 --- a/src/mongo/dbtests/query_stage_trial.cpp +++ b/src/mongo/dbtests/query_stage_trial.cpp @@ -208,9 +208,9 @@ TEST_F(TrialStageTest, AdoptsBackupPlanIfTrialPlanDies) { auto trialPlan = std::make_unique<QueuedDataStage>(opCtx(), ws()); auto backupPlan = std::make_unique<QueuedDataStage>(opCtx(), ws()); - // Seed the trial plan with 2 results followed by a PlanStage::DEAD. + // Seed the trial plan with 2 results followed by a PlanStage::FAILURE. queueData({BSON("_id" << 0), BSON("_id" << 1)}, trialPlan.get()); - trialPlan->pushBack(PlanStage::DEAD); + trialPlan->pushBack(PlanStage::FAILURE); // Seed the backup plan with 20 different results, so that we can validate that we see the // correct dataset once the trial phase is complete. @@ -220,8 +220,8 @@ TEST_F(TrialStageTest, AdoptsBackupPlanIfTrialPlanDies) { } queueData(backupResults, backupPlan.get()); - // We schedule the trial to run for 10 works. Because we will encounter a PlanStage::DEAD before - // this point, the trial will complete early and the backup plan will be adopted. + // We schedule the trial to run for 10 works. Because we will encounter a PlanStage::FAILURE + // before this point, the trial will complete early and the backup plan will be adopted. auto trialStage = std::make_unique<TrialStage>( opCtx(), ws(), std::move(trialPlan), std::move(backupPlan), 10, 0.75); |