summaryrefslogtreecommitdiff
path: root/src/mongo/db/exec
diff options
context:
space:
mode:
authorWaley Chen <waleycz@gmail.com>2016-05-18 10:08:23 -0400
committerWaley Chen <waleycz@gmail.com>2016-05-18 10:08:23 -0400
commit7480e5ed2e83b50da03a8e1f1ca1840e280644ef (patch)
treeee75f0c2075050b12b100df1a00e22841da54125 /src/mongo/db/exec
parent2a06469e1c07c3f9d1618495803c5c6260d21e86 (diff)
downloadmongo-7480e5ed2e83b50da03a8e1f1ca1840e280644ef.tar.gz
Revert "SERVER-23243 Replace Listener::getElapsedTimeMillis() in scoped_timer.cpp"
This reverts commit 51ee6f1952addc650b1c719cce51fcf460e6e9e8.
Diffstat (limited to 'src/mongo/db/exec')
-rw-r--r--src/mongo/db/exec/SConscript2
-rw-r--r--src/mongo/db/exec/cached_plan.cpp2
-rw-r--r--src/mongo/db/exec/multi_plan.cpp2
-rw-r--r--src/mongo/db/exec/plan_stage.cpp9
-rw-r--r--src/mongo/db/exec/plan_stage.h5
-rw-r--r--src/mongo/db/exec/queued_data_stage_test.cpp38
-rw-r--r--src/mongo/db/exec/scoped_timer.cpp9
-rw-r--r--src/mongo/db/exec/scoped_timer.h12
-rw-r--r--src/mongo/db/exec/sort_test.cpp244
-rw-r--r--src/mongo/db/exec/subplan.cpp2
10 files changed, 126 insertions, 199 deletions
diff --git a/src/mongo/db/exec/SConscript b/src/mongo/db/exec/SConscript
index e54f69b82d7..931ea2abbd1 100644
--- a/src/mongo/db/exec/SConscript
+++ b/src/mongo/db/exec/SConscript
@@ -112,7 +112,6 @@ env.CppUnitTest(
"exec",
"$BUILD_DIR/mongo/db/serveronly",
"$BUILD_DIR/mongo/dbtests/mocklib",
- "$BUILD_DIR/mongo/util/clock_source_mock",
"$BUILD_DIR/mongo/util/ntservice_mock",
],
NO_CRUTCH = True,
@@ -128,7 +127,6 @@ env.CppUnitTest(
"$BUILD_DIR/mongo/db/serveronly",
"$BUILD_DIR/mongo/dbtests/mocklib",
"$BUILD_DIR/mongo/db/query/collation/collator_interface_mock",
- "$BUILD_DIR/mongo/util/clock_source_mock",
"$BUILD_DIR/mongo/util/ntservice_mock",
],
NO_CRUTCH = True,
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 3425a2562b5..7604d5ce465 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -74,7 +74,7 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
// Adds the amount of time taken by pickBestPlan() to executionTimeMillis. There's lots of
// execution work that happens here, so this is needed for the time accounting to
// make sense.
- ScopedTimer timer(getClock(), &_commonStats.executionTimeMillis);
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
// If we work this many times during the trial period, then we will replan the
// query from scratch.
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index c57e0a5f02f..95c70b47ce0 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -203,7 +203,7 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
// Adds the amount of time taken by pickBestPlan() to executionTimeMillis. There's lots of
// execution work that happens here, so this is needed for the time accounting to
// make sense.
- ScopedTimer timer(getClock(), &_commonStats.executionTimeMillis);
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
size_t numWorks = getTrialPeriodWorks(getOpCtx(), _collection);
size_t numResults = getTrialPeriodNumToReturn(*_query);
diff --git a/src/mongo/db/exec/plan_stage.cpp b/src/mongo/db/exec/plan_stage.cpp
index 13062fa919f..c81679bd8cb 100644
--- a/src/mongo/db/exec/plan_stage.cpp
+++ b/src/mongo/db/exec/plan_stage.cpp
@@ -33,14 +33,11 @@
#include "mongo/db/exec/plan_stage.h"
#include "mongo/db/exec/scoped_timer.h"
-#include "mongo/db/operation_context.h"
-#include "mongo/db/service_context.h"
namespace mongo {
PlanStage::StageState PlanStage::work(WorkingSetID* out) {
- invariant(_opCtx);
- ScopedTimer timer(getClock(), &_commonStats.executionTimeMillis);
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
++_commonStats.works;
StageState workResult = doWork(out);
@@ -105,8 +102,4 @@ void PlanStage::reattachToOperationContext(OperationContext* opCtx) {
doReattachToOperationContext();
}
-ClockSource* PlanStage::getClock() const {
- return _opCtx->getServiceContext()->getFastClockSource();
-}
-
} // namespace mongo
diff --git a/src/mongo/db/exec/plan_stage.h b/src/mongo/db/exec/plan_stage.h
index c0afd992e72..3ad007c8430 100644
--- a/src/mongo/db/exec/plan_stage.h
+++ b/src/mongo/db/exec/plan_stage.h
@@ -37,10 +37,9 @@
namespace mongo {
-class ClockSource;
class Collection;
-class OperationContext;
class RecordId;
+class OperationContext;
/**
* A PlanStage ("stage") is the basic building block of a "Query Execution Plan." A stage is
@@ -358,8 +357,6 @@ protected:
*/
virtual void doInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {}
- ClockSource* getClock() const;
-
OperationContext* getOpCtx() const {
return _opCtx;
}
diff --git a/src/mongo/db/exec/queued_data_stage_test.cpp b/src/mongo/db/exec/queued_data_stage_test.cpp
index 26f41ca4c8c..e26d303a833 100644
--- a/src/mongo/db/exec/queued_data_stage_test.cpp
+++ b/src/mongo/db/exec/queued_data_stage_test.cpp
@@ -32,12 +32,8 @@
#include "mongo/db/exec/queued_data_stage.h"
#include "mongo/db/exec/working_set.h"
-#include "mongo/db/operation_context_noop.h"
-#include "mongo/db/service_context.h"
-#include "mongo/db/service_context_noop.h"
#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
-#include "mongo/util/clock_source_mock.h"
using namespace mongo;
@@ -46,38 +42,12 @@ namespace {
using std::unique_ptr;
using stdx::make_unique;
-class QueuedDataStageTest : public unittest::Test {
-public:
- QueuedDataStageTest() {
- _service = stdx::make_unique<ServiceContextNoop>();
- _client = _service.get()->makeClient("test");
- _opCtxNoop.reset(new OperationContextNoop(_client.get(), 0));
- _opCtx = _opCtxNoop.get();
- }
-
-protected:
- OperationContext* getOpCtx() {
- return _opCtx;
- }
-
-private:
- OperationContext* _opCtx;
- std::unique_ptr<OperationContextNoop> _opCtxNoop;
-
- std::unique_ptr<ServiceContextNoop> _service;
-
- // UniqueClient is declared after ServiceContextNoop because
- // members of a class are destroyed in reverse order of declaration and
- // UniqueClient must be destroyed before the ServiceContextNoop is destroyed.
- ServiceContext::UniqueClient _client;
-};
-
//
// Basic test that we get out valid stats objects.
//
-TEST_F(QueuedDataStageTest, getValidStats) {
+TEST(QueuedDataStageTest, getValidStats) {
WorkingSet ws;
- auto mock = make_unique<QueuedDataStage>(getOpCtx(), &ws);
+ auto mock = make_unique<QueuedDataStage>(nullptr, &ws);
const CommonStats* commonStats = mock->getCommonStats();
ASSERT_EQUALS(commonStats->works, static_cast<size_t>(0));
const SpecificStats* specificStats = mock->getSpecificStats();
@@ -89,10 +59,10 @@ TEST_F(QueuedDataStageTest, getValidStats) {
//
// Test that our stats are updated as we perform operations.
//
-TEST_F(QueuedDataStageTest, validateStats) {
+TEST(QueuedDataStageTest, validateStats) {
WorkingSet ws;
WorkingSetID wsID;
- auto mock = make_unique<QueuedDataStage>(getOpCtx(), &ws);
+ auto mock = make_unique<QueuedDataStage>(nullptr, &ws);
// make sure that we're at all zero
const CommonStats* stats = mock->getCommonStats();
diff --git a/src/mongo/db/exec/scoped_timer.cpp b/src/mongo/db/exec/scoped_timer.cpp
index 8493db68116..e1db4a44ff8 100644
--- a/src/mongo/db/exec/scoped_timer.cpp
+++ b/src/mongo/db/exec/scoped_timer.cpp
@@ -29,15 +29,16 @@
#include "mongo/platform/basic.h"
#include "mongo/db/exec/scoped_timer.h"
-#include "mongo/util/clock_source.h"
+
+#include "mongo/util/net/listen.h"
namespace mongo {
-ScopedTimer::ScopedTimer(ClockSource* cs, long long* counter)
- : _clock(cs), _counter(counter), _start(cs->now()) {}
+ScopedTimer::ScopedTimer(long long* counter)
+ : _counter(counter), _start(Listener::getElapsedTimeMillis()) {}
ScopedTimer::~ScopedTimer() {
- long long elapsed = durationCount<Milliseconds>(_clock->now() - _start);
+ long long elapsed = Listener::getElapsedTimeMillis() - _start;
*_counter += elapsed;
}
diff --git a/src/mongo/db/exec/scoped_timer.h b/src/mongo/db/exec/scoped_timer.h
index da0a484fabf..3e1c29fe719 100644
--- a/src/mongo/db/exec/scoped_timer.h
+++ b/src/mongo/db/exec/scoped_timer.h
@@ -30,12 +30,8 @@
#include "mongo/base/disallow_copying.h"
-#include "mongo/util/time_support.h"
-
namespace mongo {
-class ClockSource;
-
/**
* This class increments a counter by a rough estimate of the time elapsed since its
* construction when it goes out of scope.
@@ -44,17 +40,19 @@ class ScopedTimer {
MONGO_DISALLOW_COPYING(ScopedTimer);
public:
- ScopedTimer(ClockSource* cs, long long* counter);
+ ScopedTimer(long long* counter);
~ScopedTimer();
private:
- ClockSource* const _clock;
+ // Default constructor disallowed.
+ ScopedTimer();
+
// Reference to the counter that we are incrementing with the elapsed time.
long long* _counter;
// Time at which the timer was constructed.
- const Date_t _start;
+ long long _start;
};
} // namespace mongo
diff --git a/src/mongo/db/exec/sort_test.cpp b/src/mongo/db/exec/sort_test.cpp
index f0cc0d01062..6a45fa27022 100644
--- a/src/mongo/db/exec/sort_test.cpp
+++ b/src/mongo/db/exec/sort_test.cpp
@@ -34,145 +34,23 @@
#include "mongo/db/exec/queued_data_stage.h"
#include "mongo/db/json.h"
-#include "mongo/db/operation_context_noop.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
-#include "mongo/db/service_context.h"
-#include "mongo/db/service_context_noop.h"
#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
-#include "mongo/util/clock_source_mock.h"
using namespace mongo;
namespace {
-class SortStageTest : public unittest::Test {
-public:
- SortStageTest() {
- _service = stdx::make_unique<ServiceContextNoop>();
- _client = _service.get()->makeClient("test");
- _opCtxNoop.reset(new OperationContextNoop(_client.get(), 0));
- _opCtx = _opCtxNoop.get();
- }
-
- /**
- * Test function to verify sort stage.
- * SortStageParams will be initialized using patternStr, collator, queryStr and limit.
- * inputStr represents the input data set in a BSONObj.
- * {input: [doc1, doc2, doc3, ...]}
- * expectedStr represents the expected sorted data set.
- * {output: [docA, docB, docC, ...]}
- */
- void testWork(const char* patternStr,
- CollatorInterface* collator,
- const char* queryStr,
- int limit,
- const char* inputStr,
- const char* expectedStr) {
- // WorkingSet is not owned by stages
- // so it's fine to declare
- WorkingSet ws;
-
- // QueuedDataStage will be owned by SortStage.
- auto queuedDataStage = stdx::make_unique<QueuedDataStage>(getOpCtx(), &ws);
- BSONObj inputObj = fromjson(inputStr);
- BSONElement inputElt = inputObj.getField("input");
- ASSERT(inputElt.isABSONObj());
- BSONObjIterator inputIt(inputElt.embeddedObject());
- while (inputIt.more()) {
- BSONElement elt = inputIt.next();
- ASSERT(elt.isABSONObj());
- BSONObj obj = elt.embeddedObject().getOwned();
-
- // Insert obj from input array into working set.
- WorkingSetID id = ws.allocate();
- WorkingSetMember* wsm = ws.get(id);
- wsm->obj = Snapshotted<BSONObj>(SnapshotId(), obj);
- wsm->transitionToOwnedObj();
- queuedDataStage->pushBack(id);
- }
-
- // Initialize SortStageParams
- // Setting limit to 0 means no limit
- SortStageParams params;
- params.pattern = fromjson(patternStr);
- params.collator = collator;
- params.limit = limit;
-
- auto sortKeyGen = stdx::make_unique<SortKeyGeneratorStage>(
- getOpCtx(), queuedDataStage.release(), &ws, params.pattern, fromjson(queryStr));
-
- SortStage sort(getOpCtx(), params, &ws, sortKeyGen.release());
-
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = PlanStage::NEED_TIME;
-
- // Keep working sort stage until data is available.
- while (state == PlanStage::NEED_TIME) {
- state = sort.work(&id);
- }
-
- // QueuedDataStage's state should be EOF when sort is ready to advance.
- ASSERT_TRUE(sort.child()->child()->isEOF());
-
- // While there's data to be retrieved, state should be equal to ADVANCED.
- // Insert documents into BSON document in this format:
- // {output: [docA, docB, docC, ...]}
- BSONObjBuilder bob;
- BSONArrayBuilder arr(bob.subarrayStart("output"));
- while (state == PlanStage::ADVANCED) {
- WorkingSetMember* member = ws.get(id);
- const BSONObj& obj = member->obj.value();
- arr.append(obj);
- state = sort.work(&id);
- }
- arr.doneFast();
- BSONObj outputObj = bob.obj();
-
- // Sort stage should be EOF after data is retrieved.
- ASSERT_EQUALS(state, PlanStage::IS_EOF);
- ASSERT_TRUE(sort.isEOF());
-
- // Finally, we get to compare the sorted results against what we expect.
- BSONObj expectedObj = fromjson(expectedStr);
- if (outputObj != expectedObj) {
- mongoutils::str::stream ss;
- // Even though we have the original string representation of the expected output,
- // we invoke BSONObj::toString() to get a format consistent with outputObj.
- ss << "Unexpected sort result with query=" << queryStr << "; pattern=" << patternStr
- << "; limit=" << limit << ":\n"
- << "Expected: " << expectedObj.toString() << "\n"
- << "Actual: " << outputObj.toString() << "\n";
- FAIL(ss);
- }
- }
-
-protected:
- OperationContext* getOpCtx() {
- return _opCtx;
- }
-
-private:
- OperationContext* _opCtx;
- std::unique_ptr<OperationContextNoop> _opCtxNoop;
-
- std::unique_ptr<ServiceContextNoop> _service;
-
- // UniqueClient is declared after ServiceContextNoop because
- // members of a class are destroyed in reverse order of declaration and
- // UniqueClient must be destroyed before the ServiceContextNoop is destroyed.
- ServiceContext::UniqueClient _client;
-};
-
-TEST_F(SortStageTest, SortEmptyWorkingSet) {
+TEST(SortStageTest, SortEmptyWorkingSet) {
WorkingSet ws;
// QueuedDataStage will be owned by SortStage.
- auto queuedDataStage = stdx::make_unique<QueuedDataStage>(getOpCtx(), &ws);
+ auto queuedDataStage = stdx::make_unique<QueuedDataStage>(nullptr, &ws);
auto sortKeyGen = stdx::make_unique<SortKeyGeneratorStage>(
- getOpCtx(), queuedDataStage.release(), &ws, BSONObj(), BSONObj());
+ nullptr, queuedDataStage.release(), &ws, BSONObj(), BSONObj());
SortStageParams params;
- SortStage sort(getOpCtx(), params, &ws, sortKeyGen.release());
+ SortStage sort(nullptr, params, &ws, sortKeyGen.release());
// Check initial EOF state.
ASSERT_FALSE(sort.isEOF());
@@ -193,6 +71,98 @@ TEST_F(SortStageTest, SortEmptyWorkingSet) {
ASSERT_TRUE(sort.isEOF());
}
+/**
+ * Test function to verify sort stage.
+ * SortStageParams will be initialized using patternStr, collator, queryStr and limit.
+ * inputStr represents the input data set in a BSONObj.
+ * {input: [doc1, doc2, doc3, ...]}
+ * expectedStr represents the expected sorted data set.
+ * {output: [docA, docB, docC, ...]}
+ */
+void testWork(const char* patternStr,
+ CollatorInterface* collator,
+ const char* queryStr,
+ int limit,
+ const char* inputStr,
+ const char* expectedStr) {
+ // WorkingSet is not owned by stages
+ // so it's fine to declare
+ WorkingSet ws;
+
+ // QueuedDataStage will be owned by SortStage.
+ auto queuedDataStage = stdx::make_unique<QueuedDataStage>(nullptr, &ws);
+ BSONObj inputObj = fromjson(inputStr);
+ BSONElement inputElt = inputObj.getField("input");
+ ASSERT(inputElt.isABSONObj());
+ BSONObjIterator inputIt(inputElt.embeddedObject());
+ while (inputIt.more()) {
+ BSONElement elt = inputIt.next();
+ ASSERT(elt.isABSONObj());
+ BSONObj obj = elt.embeddedObject().getOwned();
+
+ // Insert obj from input array into working set.
+ WorkingSetID id = ws.allocate();
+ WorkingSetMember* wsm = ws.get(id);
+ wsm->obj = Snapshotted<BSONObj>(SnapshotId(), obj);
+ wsm->transitionToOwnedObj();
+ queuedDataStage->pushBack(id);
+ }
+
+ // Initialize SortStageParams
+ // Setting limit to 0 means no limit
+ SortStageParams params;
+ params.pattern = fromjson(patternStr);
+ params.collator = collator;
+ params.limit = limit;
+
+ auto sortKeyGen = stdx::make_unique<SortKeyGeneratorStage>(
+ nullptr, queuedDataStage.release(), &ws, params.pattern, fromjson(queryStr));
+
+ SortStage sort(nullptr, params, &ws, sortKeyGen.release());
+
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = PlanStage::NEED_TIME;
+
+ // Keep working sort stage until data is available.
+ while (state == PlanStage::NEED_TIME) {
+ state = sort.work(&id);
+ }
+
+ // QueuedDataStage's state should be EOF when sort is ready to advance.
+ ASSERT_TRUE(sort.child()->child()->isEOF());
+
+ // While there's data to be retrieved, state should be equal to ADVANCED.
+ // Insert documents into BSON document in this format:
+ // {output: [docA, docB, docC, ...]}
+ BSONObjBuilder bob;
+ BSONArrayBuilder arr(bob.subarrayStart("output"));
+ while (state == PlanStage::ADVANCED) {
+ WorkingSetMember* member = ws.get(id);
+ const BSONObj& obj = member->obj.value();
+ arr.append(obj);
+ state = sort.work(&id);
+ }
+ arr.doneFast();
+ BSONObj outputObj = bob.obj();
+
+ // Sort stage should be EOF after data is retrieved.
+ ASSERT_EQUALS(state, PlanStage::IS_EOF);
+ ASSERT_TRUE(sort.isEOF());
+
+ // Finally, we get to compare the sorted results against what we expect.
+ BSONObj expectedObj = fromjson(expectedStr);
+ if (outputObj != expectedObj) {
+ mongoutils::str::stream ss;
+ // Even though we have the original string representation of the expected output,
+ // we invoke BSONObj::toString() to get a format consistent with outputObj.
+ ss << "Unexpected sort result with query=" << queryStr << "; pattern=" << patternStr
+ << "; limit=" << limit << ":\n"
+ << "Expected: " << expectedObj.toString() << "\n"
+ << "Actual: " << outputObj.toString() << "\n";
+ FAIL(ss);
+ }
+}
+
//
// Limit values
// The server interprets limit values from the user as follows:
@@ -206,7 +176,7 @@ TEST_F(SortStageTest, SortEmptyWorkingSet) {
// Implementation should keep all items fetched from child.
//
-TEST_F(SortStageTest, SortAscending) {
+TEST(SortStageTest, SortAscending) {
testWork("{a: 1}",
nullptr,
"{}",
@@ -215,7 +185,7 @@ TEST_F(SortStageTest, SortAscending) {
"{output: [{a: 1}, {a: 2}, {a: 3}]}");
}
-TEST_F(SortStageTest, SortDescending) {
+TEST(SortStageTest, SortDescending) {
testWork("{a: -1}",
nullptr,
"{}",
@@ -224,7 +194,7 @@ TEST_F(SortStageTest, SortDescending) {
"{output: [{a: 3}, {a: 2}, {a: 1}]}");
}
-TEST_F(SortStageTest, SortIrrelevantSortKey) {
+TEST(SortStageTest, SortIrrelevantSortKey) {
testWork("{b: 1}",
nullptr,
"{}",
@@ -239,7 +209,7 @@ TEST_F(SortStageTest, SortIrrelevantSortKey) {
// and discard the rest.
//
-TEST_F(SortStageTest, SortAscendingWithLimit) {
+TEST(SortStageTest, SortAscendingWithLimit) {
testWork("{a: 1}",
nullptr,
"{}",
@@ -248,7 +218,7 @@ TEST_F(SortStageTest, SortAscendingWithLimit) {
"{output: [{a: 1}, {a: 2}]}");
}
-TEST_F(SortStageTest, SortDescendingWithLimit) {
+TEST(SortStageTest, SortDescendingWithLimit) {
testWork("{a: -1}",
nullptr,
"{}",
@@ -263,7 +233,7 @@ TEST_F(SortStageTest, SortDescendingWithLimit) {
// and discard the rest.
//
-TEST_F(SortStageTest, SortAscendingWithLimitGreaterThanInputSize) {
+TEST(SortStageTest, SortAscendingWithLimitGreaterThanInputSize) {
testWork("{a: 1}",
nullptr,
"{}",
@@ -272,7 +242,7 @@ TEST_F(SortStageTest, SortAscendingWithLimitGreaterThanInputSize) {
"{output: [{a: 1}, {a: 2}, {a: 3}]}");
}
-TEST_F(SortStageTest, SortDescendingWithLimitGreaterThanInputSize) {
+TEST(SortStageTest, SortDescendingWithLimitGreaterThanInputSize) {
testWork("{a: -1}",
nullptr,
"{}",
@@ -286,16 +256,16 @@ TEST_F(SortStageTest, SortDescendingWithLimitGreaterThanInputSize) {
// Implementation should optimize this into a running maximum.
//
-TEST_F(SortStageTest, SortAscendingWithLimitOfOne) {
+TEST(SortStageTest, SortAscendingWithLimitOfOne) {
testWork("{a: 1}", nullptr, "{}", 1, "{input: [{a: 2}, {a: 1}, {a: 3}]}", "{output: [{a: 1}]}");
}
-TEST_F(SortStageTest, SortDescendingWithLimitOfOne) {
+TEST(SortStageTest, SortDescendingWithLimitOfOne) {
testWork(
"{a: -1}", nullptr, "{}", 1, "{input: [{a: 2}, {a: 1}, {a: 3}]}", "{output: [{a: 3}]}");
}
-TEST_F(SortStageTest, SortAscendingWithCollation) {
+TEST(SortStageTest, SortAscendingWithCollation) {
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
testWork("{a: 1}",
&collator,
@@ -305,7 +275,7 @@ TEST_F(SortStageTest, SortAscendingWithCollation) {
"{output: [{a: 'aa'}, {a: 'ba'}, {a: 'ab'}]}");
}
-TEST_F(SortStageTest, SortDescendingWithCollation) {
+TEST(SortStageTest, SortDescendingWithCollation) {
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
testWork("{a: -1}",
&collator,
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 8be5da178dc..b694a4cc902 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -494,7 +494,7 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) {
Status SubplanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
// Adds the amount of time taken by pickBestPlan() to executionTimeMillis. There's lots of
// work that happens here, so this is needed for the time accounting to make sense.
- ScopedTimer timer(getClock(), &_commonStats.executionTimeMillis);
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
// Plan each branch of the $or.
Status subplanningStatus = planSubqueries();